From 46f2d41d9d683e5ce963b1f2bba5cb11f5f4084e Mon Sep 17 00:00:00 2001 From: Bukhtawar Khan Date: Thu, 27 Jun 2024 05:52:08 +0530 Subject: [PATCH 1/5] Add Ashish Singh as maintainer Signed-off-by: Bukhtawar Khan --- .github/CODEOWNERS | 2 +- MAINTAINERS.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5a2d08756c49f..8d69e98220b69 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -24,4 +24,4 @@ /.github/ @peternied -/MAINTAINERS.md @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 91b57a4cbc74e..3298ceb15463c 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -9,6 +9,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | | Andrew Ross | [andrross](https://github.com/andrross) | Amazon | | Andriy Redko | [reta](https://github.com/reta) | Aiven | +| Ashish Singh | [ashking94](https://github.com/ashking94) | Amazon | | Bukhtawar Khan | [Bukhtawar](https://github.com/Bukhtawar) | Amazon | | Charlotte Henkle | [CEHENKLE](https://github.com/CEHENKLE) | Amazon | | Dan Widdis | [dbwiddis](https://github.com/dbwiddis) | Amazon | From 3901f33649590f4cd2a8b0e92e1e8dc96f250f07 Mon Sep 17 00:00:00 2001 From: Bukhtawar Khan Date: Mon, 26 Aug 2024 20:16:40 +0530 Subject: [PATCH 2/5] Rebasing changes for sign-off Signed-off-by: Bukhtawar Khan --- .ci/bwcVersions | 2 + .ci/java-versions.properties | 3 +- .github/CODEOWNERS | 18 +- .github/benchmark-configs.json | 171 + .github/workflows/add-performance-comment.yml | 28 + .github/workflows/assemble.yml | 16 +- .github/workflows/benchmark-pull-request.yml | 179 + .github/workflows/changelog_verifier.yml | 17 +- .github/workflows/dependabot_pr.yml | 7 + .github/workflows/maintainer-approval.yml | 2 +- .github/workflows/precommit.yml | 16 +- .github/workflows/triage.yml | 2 +- .github/workflows/version.yml | 2 +- CHANGELOG-3.0.md | 1 + CHANGELOG.md | 87 +- MAINTAINERS.md | 7 +- PERFORMANCE_BENCHMARKS.md | 112 + README.md | 2 +- TRIAGING.md | 10 +- .../routing/allocation/RerouteBenchmark.java | 135 + .../InternalDistributionBwcSetupPlugin.java | 12 +- .../gradle/test/DistroTestPlugin.java | 4 +- .../gradle/test/rest/RestResourcesPlugin.java | 56 +- .../forbidden/opensearch-all-signatures.txt | 3 + .../src/main/resources/minimumCompilerVersion | 2 +- buildSrc/version.properties | 22 +- client/rest/build.gradle | 67 +- .../rest/licenses/httpclient5-5.2.1.jar.sha1 | 1 - .../rest/licenses/httpclient5-5.2.3.jar.sha1 | 1 + client/rest/licenses/httpcore5-5.2.2.jar.sha1 | 1 - client/rest/licenses/httpcore5-5.2.5.jar.sha1 | 1 + .../rest/licenses/httpcore5-h2-5.2.2.jar.sha1 | 1 - .../rest/licenses/httpcore5-h2-5.2.5.jar.sha1 | 1 + .../httpcore5-reactive-5.2.5.jar.sha1 | 1 + .../licenses/httpcore5-reactive-LICENSE.txt | 558 +++ .../licenses/httpcore5-reactive-NOTICE.txt | 8 + .../licenses/reactive-streams-1.0.4.jar.sha1 | 1 + .../licenses/reactive-streams-LICENSE.txt | 21 + .../rest/licenses/reactive-streams-NOTICE.txt | 0 .../licenses/reactor-core-3.5.20.jar.sha1 | 1 + client/rest/licenses/reactor-core-LICENSE.txt | 201 ++ client/rest/licenses/reactor-core-NOTICE.txt | 0 .../org/opensearch/client/Cancellable.java | 29 +- .../java/org/opensearch/client/Response.java | 73 +- .../client/ResponseWarningsExtractor.java | 99 + .../org/opensearch/client/RestClient.java | 291 +- .../opensearch/client/StreamingRequest.java | 114 + .../opensearch/client/StreamingResponse.java | 96 + .../http/ReactiveHttpUriRequestProducer.java | 75 + .../opensearch/client/RestClientTests.java | 13 + .../licenses/httpclient5-5.2.1.jar.sha1 | 1 - .../licenses/httpclient5-5.2.3.jar.sha1 | 1 + .../sniffer/licenses/httpcore5-5.2.2.jar.sha1 | 1 - .../sniffer/licenses/httpcore5-5.2.5.jar.sha1 | 1 + .../licenses/jackson-core-2.17.1.jar.sha1 | 1 - .../licenses/jackson-core-2.17.2.jar.sha1 | 1 + distribution/src/config/opensearch.yml | 4 + distribution/tools/plugin-cli/build.gradle | 31 +- .../licenses/bc-fips-1.0.2.5.jar.sha1 | 1 - .../licenses/bc-fips-2.0.0.jar.sha1 | 1 + .../licenses/bcpg-fips-1.0.7.1.jar.sha1 | 1 - .../licenses/bcpg-fips-2.0.9.jar.sha1 | 1 + .../jackson-annotations-2.17.1.jar.sha1 | 1 - .../jackson-annotations-2.17.2.jar.sha1 | 1 + .../licenses/jackson-databind-2.17.1.jar.sha1 | 1 - .../licenses/jackson-databind-2.17.2.jar.sha1 | 1 + gradle/wrapper/gradle-wrapper.jar | Bin 43462 -> 43583 bytes gradle/wrapper/gradle-wrapper.properties | 4 +- gradlew | 7 +- gradlew.bat | 2 + .../ApiAnnotationProcessorTests.java | 13 + .../processor/InternalApiAnnotated.java | 4 +- ...licApiConstructorAnnotatedInternalApi.java | 21 + .../licenses/jackson-core-2.17.1.jar.sha1 | 1 - .../licenses/jackson-core-2.17.2.jar.sha1 | 1 + ...cene-core-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...cene-core-9.12.0-snapshot-c896995.jar.sha1 | 1 - .../src/main/java/org/opensearch/Version.java | 4 +- .../core/xcontent/AbstractXContentParser.java | 2 +- .../core/xcontent/filtering/FilterPath.java | 30 +- .../xcontent/filtering/FilterPathTests.java | 17 + .../main/java/org/opensearch/grok/Grok.java | 122 +- .../java/org/opensearch/grok/GrokTests.java | 10 + .../secure_sm/ThreadContextPermission.java | 40 + libs/task-commons/build.gradle | 25 + .../task/commons/clients/TaskListRequest.java | 103 + .../commons/clients/TaskManagerClient.java | 69 + .../commons/clients/TaskProducerClient.java | 24 + .../commons/clients/TaskWorkerClient.java | 38 + .../task/commons/clients}/package-info.java | 4 +- .../task/commons}/package-info.java | 4 +- .../opensearch/task/commons/task/Task.java | 361 ++ .../opensearch/task/commons/task/TaskId.java | 55 + .../task/commons/task/TaskParams.java | 23 + .../task/commons/task/TaskStatus.java | 49 + .../task/commons/task/TaskType.java | 27 + .../task/commons/task}/package-info.java | 4 +- .../task/commons/worker/TaskWorker.java | 29 + .../task/commons/worker/WorkerNode.java | 96 + .../task/commons/worker}/package-info.java | 4 +- .../task/commons/mocks/MockTaskParams.java | 25 + .../task/commons/task/TaskIdTests.java | 53 + .../task/commons/task/TaskTests.java | 147 + .../task/commons/worker/WorkerNodeTests.java | 52 + .../licenses/jackson-core-2.17.1.jar.sha1 | 1 - .../licenses/jackson-core-2.17.2.jar.sha1 | 1 + .../jackson-dataformat-cbor-2.17.1.jar.sha1 | 1 - .../jackson-dataformat-cbor-2.17.2.jar.sha1 | 1 + .../jackson-dataformat-smile-2.17.1.jar.sha1 | 1 - .../jackson-dataformat-smile-2.17.2.jar.sha1 | 1 + .../jackson-dataformat-yaml-2.17.1.jar.sha1 | 1 - .../jackson-dataformat-yaml-2.17.2.jar.sha1 | 1 + .../common/CommonAnalysisModulePlugin.java | 3 + ...enationCompoundWordTokenFilterFactory.java | 9 +- .../common/MinHashTokenFilterFactory.java | 4 +- .../common/PersianStemTokenFilterFactory.java | 52 + .../common/StemmerTokenFilterFactory.java | 3 + .../common/CommonAnalysisFactoryTests.java | 2 + .../common/CompoundAnalysisTests.java | 63 +- .../common/MinHashFilterFactoryTests.java | 66 +- .../opensearch/analysis/common/da_UTF8.xml | 1208 +++++++ .../test/analysis-common/40_token_filters.yml | 31 + .../common/IngestCommonModulePlugin.java | 2 +- .../rest-api-spec/test/ingest/70_bulk.yml | 70 +- .../jackson-annotations-2.17.1.jar.sha1 | 1 - .../jackson-annotations-2.17.2.jar.sha1 | 1 + .../licenses/jackson-databind-2.17.1.jar.sha1 | 1 - .../licenses/jackson-databind-2.17.2.jar.sha1 | 1 + .../ingest/geoip/IngestGeoIpModulePlugin.java | 40 +- .../geoip/IngestGeoIpModulePluginTests.java | 95 + .../IngestUserAgentModulePlugin.java | 39 +- .../IngestUserAgentModulePluginTests.java | 114 + ...pressions-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...pressions-9.12.0-snapshot-c896995.jar.sha1 | 1 - modules/lang-mustache/build.gradle | 2 +- .../licenses/compiler-0.9.13.jar.sha1 | 1 - .../licenses/compiler-0.9.14.jar.sha1 | 1 + modules/lang-painless/build.gradle | 2 + .../opensearch/painless/SimplePainlessIT.java | 231 ++ .../derived_fields/60_derived_field_aggs.yml | 1515 +++++++++ .../resources/rest-api-spec/test/10_basic.yml | 45 + .../SearchPipelineCommonModulePlugin.java | 106 +- .../common/SortResponseProcessor.java | 209 ++ .../common/SplitResponseProcessor.java | 162 + ...SearchPipelineCommonModulePluginTests.java | 107 + .../common/SortResponseProcessorTests.java | 230 ++ .../common/SplitResponseProcessorTests.java | 213 ++ .../test/search_pipeline/80_sort_response.yml | 152 + .../netty-buffer-4.1.111.Final.jar.sha1 | 1 - .../netty-buffer-4.1.112.Final.jar.sha1 | 1 + .../netty-codec-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-4.1.112.Final.jar.sha1 | 1 + .../netty-codec-http-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-http-4.1.112.Final.jar.sha1 | 1 + .../netty-codec-http2-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-http2-4.1.112.Final.jar.sha1 | 1 + .../netty-common-4.1.111.Final.jar.sha1 | 1 - .../netty-common-4.1.112.Final.jar.sha1 | 1 + .../netty-handler-4.1.111.Final.jar.sha1 | 1 - .../netty-handler-4.1.112.Final.jar.sha1 | 1 + .../netty-resolver-4.1.111.Final.jar.sha1 | 1 - .../netty-resolver-4.1.112.Final.jar.sha1 | 1 + .../netty-transport-4.1.111.Final.jar.sha1 | 1 - .../netty-transport-4.1.112.Final.jar.sha1 | 1 + ...-native-unix-common-4.1.111.Final.jar.sha1 | 1 - ...-native-unix-common-4.1.112.Final.jar.sha1 | 1 + ...lysis-icu-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...lysis-icu-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...-kuromoji-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...-kuromoji-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...ysis-nori-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...ysis-nori-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...-phonetic-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...-phonetic-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...s-smartcn-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...s-smartcn-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...s-stempel-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...s-stempel-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...orfologik-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...orfologik-9.12.0-snapshot-c896995.jar.sha1 | 1 - .../cache/store/disk/EhcacheDiskCache.java | 50 +- .../store/disk/EhCacheDiskCacheTests.java | 293 ++ .../jackson-annotations-2.17.1.jar.sha1 | 1 - .../jackson-annotations-2.17.2.jar.sha1 | 1 + .../licenses/jackson-databind-2.17.1.jar.sha1 | 1 - .../licenses/jackson-databind-2.17.2.jar.sha1 | 1 + .../jackson-annotations-2.17.1.jar.sha1 | 1 - .../jackson-annotations-2.17.2.jar.sha1 | 1 + .../licenses/jackson-databind-2.17.1.jar.sha1 | 1 - .../licenses/jackson-databind-2.17.2.jar.sha1 | 1 + plugins/ingest-attachment/build.gradle | 2 +- .../licenses/xz-1.10.jar.sha1 | 1 + .../licenses/xz-1.9.jar.sha1 | 1 - .../QueryInsightsPluginTransportIT.java | 274 -- .../plugin/insights/TopQueriesRestIT.java | 107 - .../plugin/insights/QueryInsightsPlugin.java | 125 - .../insights/core/exporter/DebugExporter.java | 61 - .../core/exporter/LocalIndexExporter.java | 113 - .../core/exporter/QueryInsightsExporter.java | 26 - .../QueryInsightsExporterFactory.java | 143 - .../insights/core/exporter/SinkType.java | 66 - .../core/listener/QueryInsightsListener.java | 202 -- .../core/service/QueryInsightsService.java | 283 -- .../core/service/TopQueriesService.java | 374 -- .../insights/rules/action/package-info.java | 12 - .../rules/action/top_queries/TopQueries.java | 77 - .../action/top_queries/TopQueriesAction.java | 32 - .../action/top_queries/TopQueriesRequest.java | 62 - .../top_queries/TopQueriesResponse.java | 143 - .../insights/rules/model/Attribute.java | 82 - .../insights/rules/model/MetricType.java | 119 - .../rules/model/SearchQueryRecord.java | 183 - .../insights/rules/model/package-info.java | 12 - .../rules/resthandler/package-info.java | 12 - .../top_queries/RestTopQueriesAction.java | 99 - .../resthandler/top_queries/package-info.java | 12 - .../rules/transport/package-info.java | 12 - .../TransportTopQueriesAction.java | 148 - .../transport/top_queries/package-info.java | 12 - .../settings/QueryInsightsSettings.java | 304 -- .../insights/QueryInsightsPluginTests.java | 113 - .../insights/QueryInsightsTestUtils.java | 205 -- .../core/exporter/DebugExporterTests.java | 37 - .../exporter/LocalIndexExporterTests.java | 99 - .../QueryInsightsExporterFactoryTests.java | 89 - .../listener/QueryInsightsListenerTests.java | 217 -- .../service/QueryInsightsServiceTests.java | 65 - .../core/service/TopQueriesServiceTests.java | 108 - .../top_queries/TopQueriesRequestTests.java | 43 - .../top_queries/TopQueriesResponseTests.java | 71 - .../action/top_queries/TopQueriesTests.java | 35 - .../rules/model/SearchQueryRecordTests.java | 71 - .../RestTopQueriesActionTests.java | 70 - .../TransportTopQueriesActionTests.java | 85 - plugins/repository-azure/build.gradle | 14 +- .../licenses/accessors-smart-2.5.0.jar.sha1 | 1 - .../licenses/accessors-smart-2.5.1.jar.sha1 | 1 + .../licenses/azure-core-1.49.1.jar.sha1 | 1 - .../licenses/azure-core-1.51.0.jar.sha1 | 1 + .../azure-core-http-netty-1.15.1.jar.sha1 | 1 - .../azure-core-http-netty-1.15.3.jar.sha1 | 1 + .../azure-storage-common-12.21.2.jar.sha1 | 1 - .../azure-storage-common-12.25.1.jar.sha1 | 1 + .../licenses/azure-xml-1.0.0.jar.sha1 | 1 - .../licenses/azure-xml-1.1.0.jar.sha1 | 1 + .../jackson-annotations-2.17.1.jar.sha1 | 1 - .../jackson-annotations-2.17.2.jar.sha1 | 1 + .../licenses/jackson-databind-2.17.1.jar.sha1 | 1 - .../licenses/jackson-databind-2.17.2.jar.sha1 | 1 + .../jackson-dataformat-xml-2.17.1.jar.sha1 | 1 - .../jackson-dataformat-xml-2.17.2.jar.sha1 | 1 + .../jackson-datatype-jsr310-2.17.1.jar.sha1 | 1 - .../jackson-datatype-jsr310-2.17.2.jar.sha1 | 1 + ...on-module-jaxb-annotations-2.17.1.jar.sha1 | 1 - ...on-module-jaxb-annotations-2.17.2.jar.sha1 | 1 + .../licenses/json-smart-2.5.0.jar.sha1 | 1 - .../licenses/json-smart-2.5.1.jar.sha1 | 1 + .../licenses/msal4j-1.15.1.jar.sha1 | 1 - .../licenses/msal4j-1.16.2.jar.sha1 | 1 + .../netty-codec-dns-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-dns-4.1.112.Final.jar.sha1 | 1 + .../netty-codec-http2-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-http2-4.1.112.Final.jar.sha1 | 1 + .../netty-codec-socks-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-socks-4.1.112.Final.jar.sha1 | 1 + ...netty-handler-proxy-4.1.111.Final.jar.sha1 | 1 - ...netty-handler-proxy-4.1.112.Final.jar.sha1 | 1 + .../netty-resolver-dns-4.1.111.Final.jar.sha1 | 1 - .../netty-resolver-dns-4.1.112.Final.jar.sha1 | 1 + ...-native-unix-common-4.1.111.Final.jar.sha1 | 1 - ...-native-unix-common-4.1.112.Final.jar.sha1 | 1 + .../reactor-netty-core-1.1.20.jar.sha1 | 1 - .../reactor-netty-core-1.1.22.jar.sha1 | 1 + .../reactor-netty-http-1.1.20.jar.sha1 | 1 - .../reactor-netty-http-1.1.22.jar.sha1 | 1 + .../azure/AzureStorageService.java | 3 + .../plugin-metadata/plugin-security.policy | 1 + .../azure/AzureBlobContainerRetriesTests.java | 4 +- plugins/repository-hdfs/build.gradle | 19 +- .../licenses/avro-1.11.3.jar.sha1 | 1 - .../licenses/avro-1.12.0.jar.sha1 | 1 + .../licenses/commons-cli-1.8.0.jar.sha1 | 1 - .../licenses/commons-cli-1.9.0.jar.sha1 | 1 + .../licenses/commons-lang3-3.14.0.jar.sha1 | 1 - .../licenses/commons-lang3-3.16.0.jar.sha1 | 1 + .../licenses/netty-all-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-all-4.1.112.Final.jar.sha1 | 1 + .../jackson-annotations-2.17.1.jar.sha1 | 1 - .../jackson-annotations-2.17.2.jar.sha1 | 1 + .../licenses/jackson-databind-2.17.1.jar.sha1 | 1 - .../licenses/jackson-databind-2.17.2.jar.sha1 | 1 + .../netty-buffer-4.1.111.Final.jar.sha1 | 1 - .../netty-buffer-4.1.112.Final.jar.sha1 | 1 + .../netty-codec-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-4.1.112.Final.jar.sha1 | 1 + .../netty-codec-http-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-http-4.1.112.Final.jar.sha1 | 1 + .../netty-codec-http2-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-http2-4.1.112.Final.jar.sha1 | 1 + .../netty-common-4.1.111.Final.jar.sha1 | 1 - .../netty-common-4.1.112.Final.jar.sha1 | 1 + .../netty-handler-4.1.111.Final.jar.sha1 | 1 - .../netty-handler-4.1.112.Final.jar.sha1 | 1 + .../netty-resolver-4.1.111.Final.jar.sha1 | 1 - .../netty-resolver-4.1.112.Final.jar.sha1 | 1 + .../netty-transport-4.1.111.Final.jar.sha1 | 1 - .../netty-transport-4.1.112.Final.jar.sha1 | 1 + ...sport-classes-epoll-4.1.111.Final.jar.sha1 | 1 - ...sport-classes-epoll-4.1.112.Final.jar.sha1 | 1 + ...-native-unix-common-4.1.111.Final.jar.sha1 | 1 - ...-native-unix-common-4.1.112.Final.jar.sha1 | 1 + plugins/telemetry-otel/build.gradle | 4 +- .../opentelemetry-api-1.39.0.jar.sha1 | 1 - .../opentelemetry-api-1.41.0.jar.sha1 | 1 + ...emetry-api-incubator-1.39.0-alpha.jar.sha1 | 1 - ...emetry-api-incubator-1.41.0-alpha.jar.sha1 | 1 + .../opentelemetry-context-1.39.0.jar.sha1 | 1 - .../opentelemetry-context-1.41.0.jar.sha1 | 1 + ...ntelemetry-exporter-common-1.39.0.jar.sha1 | 1 - ...ntelemetry-exporter-common-1.41.0.jar.sha1 | 1 + ...telemetry-exporter-logging-1.39.0.jar.sha1 | 1 - ...telemetry-exporter-logging-1.41.0.jar.sha1 | 1 + ...pentelemetry-exporter-otlp-1.39.0.jar.sha1 | 1 - ...pentelemetry-exporter-otlp-1.41.0.jar.sha1 | 1 + ...metry-exporter-otlp-common-1.39.0.jar.sha1 | 1 - ...metry-exporter-otlp-common-1.41.0.jar.sha1 | 1 + ...try-exporter-sender-okhttp-1.39.0.jar.sha1 | 1 - ...try-exporter-sender-okhttp-1.41.0.jar.sha1 | 1 + .../opentelemetry-sdk-1.39.0.jar.sha1 | 1 - .../opentelemetry-sdk-1.41.0.jar.sha1 | 1 + .../opentelemetry-sdk-common-1.39.0.jar.sha1 | 1 - .../opentelemetry-sdk-common-1.41.0.jar.sha1 | 1 + .../opentelemetry-sdk-logs-1.39.0.jar.sha1 | 1 - .../opentelemetry-sdk-logs-1.41.0.jar.sha1 | 1 + .../opentelemetry-sdk-metrics-1.39.0.jar.sha1 | 1 - .../opentelemetry-sdk-metrics-1.41.0.jar.sha1 | 1 + .../opentelemetry-sdk-trace-1.39.0.jar.sha1 | 1 - .../opentelemetry-sdk-trace-1.41.0.jar.sha1 | 1 + ...pentelemetry-semconv-1.25.0-alpha.jar.sha1 | 1 - ...pentelemetry-semconv-1.27.0-alpha.jar.sha1 | 1 + .../netty-buffer-4.1.111.Final.jar.sha1 | 1 - .../netty-buffer-4.1.112.Final.jar.sha1 | 1 + .../netty-codec-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-4.1.112.Final.jar.sha1 | 1 + .../netty-codec-http-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-http-4.1.112.Final.jar.sha1 | 1 + .../netty-common-4.1.111.Final.jar.sha1 | 1 - .../netty-common-4.1.112.Final.jar.sha1 | 1 + .../netty-handler-4.1.111.Final.jar.sha1 | 1 - .../netty-handler-4.1.112.Final.jar.sha1 | 1 + .../netty-resolver-4.1.111.Final.jar.sha1 | 1 - .../netty-resolver-4.1.112.Final.jar.sha1 | 1 + .../netty-transport-4.1.111.Final.jar.sha1 | 1 - .../netty-transport-4.1.112.Final.jar.sha1 | 1 + plugins/transport-reactor-netty4/build.gradle | 6 +- .../netty-buffer-4.1.111.Final.jar.sha1 | 1 - .../netty-buffer-4.1.112.Final.jar.sha1 | 1 + .../netty-codec-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-4.1.112.Final.jar.sha1 | 1 + .../netty-codec-dns-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-dns-4.1.112.Final.jar.sha1 | 1 + .../netty-codec-http-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-http-4.1.112.Final.jar.sha1 | 1 + .../netty-codec-http2-4.1.111.Final.jar.sha1 | 1 - .../netty-codec-http2-4.1.112.Final.jar.sha1 | 1 + .../netty-common-4.1.111.Final.jar.sha1 | 1 - .../netty-common-4.1.112.Final.jar.sha1 | 1 + .../netty-handler-4.1.111.Final.jar.sha1 | 1 - .../netty-handler-4.1.112.Final.jar.sha1 | 1 + .../netty-resolver-4.1.111.Final.jar.sha1 | 1 - .../netty-resolver-4.1.112.Final.jar.sha1 | 1 + .../netty-resolver-dns-4.1.111.Final.jar.sha1 | 1 - .../netty-resolver-dns-4.1.112.Final.jar.sha1 | 1 + .../netty-transport-4.1.111.Final.jar.sha1 | 1 - .../netty-transport-4.1.112.Final.jar.sha1 | 1 + ...-native-unix-common-4.1.111.Final.jar.sha1 | 1 - ...-native-unix-common-4.1.112.Final.jar.sha1 | 1 + .../reactor-netty-core-1.1.20.jar.sha1 | 1 - .../reactor-netty-core-1.1.22.jar.sha1 | 1 + .../reactor-netty-http-1.1.20.jar.sha1 | 1 - .../reactor-netty-http-1.1.22.jar.sha1 | 1 + .../rest/ReactorNetty4BadRequestIT.java | 115 + .../rest/ReactorNetty4HeadBodyIsEmptyIT.java | 204 ++ .../rest/ReactorNetty4StreamingIT.java | 139 + .../rest/ReactorNetty4StreamingStressIT.java | 95 + .../ReactorNetty4HttpServerTransport.java | 5 +- .../ReactorNetty4NonStreamingHttpChannel.java | 11 +- .../ReactorNetty4StreamingHttpChannel.java | 2 + ...ReactorNetty4StreamingRequestConsumer.java | 2 +- ...eactorNetty4StreamingResponseProducer.java | 6 +- .../build.gradle | 7 +- .../plugin/wlm/WorkloadManagementPlugin.java | 81 + .../wlm/WorkloadManagementPluginModule.java | 31 + .../wlm/action/CreateQueryGroupAction.java | 36 + .../wlm/action/CreateQueryGroupRequest.java | 82 + .../wlm/action/CreateQueryGroupResponse.java | 74 + .../wlm/action/DeleteQueryGroupAction.java | 38 + .../wlm/action/DeleteQueryGroupRequest.java | 65 + .../wlm/action/GetQueryGroupAction.java | 36 + .../wlm/action/GetQueryGroupRequest.java | 64 + .../wlm/action/GetQueryGroupResponse.java | 82 + .../TransportCreateQueryGroupAction.java | 51 + .../TransportDeleteQueryGroupAction.java | 91 + .../action/TransportGetQueryGroupAction.java | 98 + .../plugin/wlm/action/package-info.java | 12 + .../opensearch/plugin/wlm/package-info.java | 12 + .../wlm/rest/RestCreateQueryGroupAction.java | 72 + .../wlm/rest/RestDeleteQueryGroupAction.java | 57 + .../wlm/rest/RestGetQueryGroupAction.java | 68 + .../plugin/wlm/rest}/package-info.java | 4 +- .../service/QueryGroupPersistenceService.java | 273 ++ .../plugin/wlm/service/package-info.java | 12 + .../plugin/wlm/QueryGroupTestUtils.java | 144 + .../action/CreateQueryGroupRequestTests.java | 40 + .../action/CreateQueryGroupResponseTests.java | 66 + .../action/DeleteQueryGroupRequestTests.java | 42 + .../wlm/action/GetQueryGroupRequestTests.java | 53 + .../action/GetQueryGroupResponseTests.java | 151 + .../TransportDeleteQueryGroupActionTests.java | 63 + .../TransportGetQueryGroupActionTests.java | 47 + .../rest/RestDeleteQueryGroupActionTests.java | 85 + .../QueryGroupPersistenceServiceTests.java | 359 ++ ...rkloadManagementClientYamlTestSuiteIT.java | 52 + .../api/create_query_group_context.json | 18 + .../api/delete_query_group_context.json | 22 + .../api/get_query_group_context.json | 25 + .../rest-api-spec/test/wlm/10_query_group.yml | 114 + .../opensearch/packaging/util/FileUtils.java | 2 +- qa/smoke-test-http/build.gradle | 1 + .../http/DanglingIndicesRestIT.java | 4 +- .../opensearch/http/HttpSmokeTestCase.java | 7 +- .../http/IdentityAuthenticationIT.java | 4 +- .../WEB-INF/jboss-deployment-structure.xml | 3 + .../opensearch.release-notes-1.3.18.md | 4 + .../opensearch.release-notes-2.16.0.md | 92 + .../rest-api-spec/api/indices.put_alias.json | 58 +- .../test/index/110_strict_allow_templates.yml | 155 + .../test/index/115_constant_keyword.yml | 332 ++ .../test/index/120_field_name.yml | 38 + .../10_basic.yml | 58 + .../test/indices.put_alias/10_basic.yml | 206 ++ .../indices.put_mapping/all_path_options.yml | 31 + .../search.aggregation/360_date_histogram.yml | 91 + .../test/search.aggregation/40_range.yml | 79 + .../test/search/190_index_prefix_search.yml | 52 +- .../test/search/370_bitmap_filtering.yml | 184 + server/build.gradle | 5 + server/licenses/RoaringBitmap-1.1.0.jar.sha1 | 1 + server/licenses/RoaringBitmap-LICENSE.txt | 202 ++ server/licenses/RoaringBitmap-NOTICE.txt | 0 server/licenses/jackson-core-2.17.1.jar.sha1 | 1 - server/licenses/jackson-core-2.17.2.jar.sha1 | 1 + .../jackson-dataformat-cbor-2.17.1.jar.sha1 | 1 - .../jackson-dataformat-cbor-2.17.2.jar.sha1 | 1 + .../jackson-dataformat-smile-2.17.1.jar.sha1 | 1 - .../jackson-dataformat-smile-2.17.2.jar.sha1 | 1 + .../jackson-dataformat-yaml-2.17.1.jar.sha1 | 1 - .../jackson-dataformat-yaml-2.17.2.jar.sha1 | 1 + ...is-common-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...is-common-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...rd-codecs-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...rd-codecs-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...cene-core-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...cene-core-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...-grouping-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...-grouping-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...ghlighter-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...ghlighter-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...cene-join-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...cene-join-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...ne-memory-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...ne-memory-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...cene-misc-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...cene-misc-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...e-queries-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...e-queries-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...eryparser-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...eryparser-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...e-sandbox-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...e-sandbox-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...al-extras-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...al-extras-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...spatial3d-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...spatial3d-9.12.0-snapshot-c896995.jar.sha1 | 1 - ...e-suggest-9.12.0-snapshot-847316d.jar.sha1 | 1 + ...e-suggest-9.12.0-snapshot-c896995.jar.sha1 | 1 - server/licenses/reactor-core-3.5.18.jar.sha1 | 1 - server/licenses/reactor-core-3.5.20.jar.sha1 | 1 + .../opensearch/action/IndicesRequestIT.java | 29 +- .../node/tasks/CancellableTasksIT.java | 4 +- .../admin/cluster/stats/ClusterStatsIT.java | 119 +- .../admin/indices/create/CreateIndexIT.java | 24 + .../indices/create/RemoteCloneIndexIT.java | 5 +- .../indices/create/RemoteSplitIndexIT.java | 24 + .../gateway/RecoveryFromGatewayIT.java | 280 +- .../RemoteClusterStateCleanupManagerIT.java | 135 + .../remote/RemoteRoutingTableServiceIT.java | 435 +++ .../remote/RemoteStatePublicationIT.java | 173 + .../index/mapper/StarTreeMapperIT.java | 432 +++ .../indices/IndicesRequestCacheIT.java | 63 + .../indices/recovery/DanglingIndicesIT.java | 4 +- .../org/opensearch/ingest/IngestClientIT.java | 81 + .../AdmissionForClusterManagerIT.java | 34 +- .../MigrationBaseTestCase.java | 43 + .../RemoteMigrationIndexMetadataUpdateIT.java | 69 +- .../RemoteStoreMigrationTestCase.java | 9 + .../RemoteStoreBaseIntegTestCase.java | 19 +- .../opensearch/remotestore/RemoteStoreIT.java | 16 +- .../RemoteStorePinnedTimestampsIT.java | 86 + .../remotestore/RemoteStoreStatsIT.java | 18 +- .../remotestore/WritableWarmIT.java | 15 +- .../search/query/SearchQueryIT.java | 40 + .../opensearch/search/sort/FieldSortIT.java | 46 +- .../snapshots/SearchableSnapshotIT.java | 12 +- .../Lucene90DocValuesConsumerWrapper.java | 46 + .../Lucene90DocValuesProducerWrapper.java | 46 + .../SortedNumericDocValuesWriterWrapper.java | 53 + .../org/opensearch/action/ActionModule.java | 16 +- .../TransportGetDecommissionStateAction.java | 3 +- .../health/TransportClusterHealthAction.java | 5 + .../node/info/TransportNodesInfoAction.java | 2 +- .../node/stats/TransportNodesStatsAction.java | 2 +- .../get/TransportGetRepositoriesAction.java | 3 +- .../TransportClusterUpdateSettingsAction.java | 51 +- .../TransportClusterSearchShardsAction.java | 3 +- .../TransportGetWeightedRoutingAction.java | 3 +- .../state/TransportClusterStateAction.java | 6 +- .../cluster/stats/ClusterStatsIndices.java | 67 +- .../stats/ClusterStatsNodeResponse.java | 133 +- .../cluster/stats/ClusterStatsRequest.java | 17 + .../stats/ClusterStatsRequestBuilder.java | 5 + .../stats/TransportClusterStatsAction.java | 12 +- .../TransportGetStoredScriptAction.java | 3 +- .../TransportPendingClusterTasksAction.java | 5 + .../alias/get/TransportGetAliasesAction.java | 3 +- .../indices/TransportIndicesExistsAction.java | 3 +- .../TransportIndicesShardStoresAction.java | 3 +- .../TransportGetComponentTemplateAction.java | 3 +- ...sportGetComposableIndexTemplateAction.java | 3 +- .../get/TransportGetIndexTemplatesAction.java | 3 +- .../TransportSimulateIndexTemplateAction.java | 3 +- .../post/TransportSimulateTemplateAction.java | 3 +- .../tiering/HotToWarmTieringAction.java | 28 + .../tiering/HotToWarmTieringResponse.java | 157 + .../tiering/RestWarmTieringAction.java | 61 + .../indices/tiering/TieringIndexRequest.java | 195 ++ .../tiering/TieringValidationResult.java | 83 + .../TransportHotToWarmTieringAction.java | 110 + .../admin/indices/tiering/package-info.java | 36 + .../opensearch/action/bulk/BulkRequest.java | 2 +- .../ingest/GetPipelineTransportAction.java | 3 +- .../GetSearchPipelineTransportAction.java | 3 +- .../SearchQueryAggregationCategorizer.java | 55 - .../action/search/SearchQueryCategorizer.java | 85 - .../SearchQueryCategorizingVisitor.java | 39 - .../action/search/SearchQueryCounters.java | 70 - .../action/search/SearchRequestStats.java | 31 + .../action/search/SearchShardTask.java | 4 +- .../opensearch/action/search/SearchTask.java | 4 +- .../SearchTaskRequestOperationsListener.java | 30 + .../action/search/TransportSearchAction.java | 34 +- .../TransportClusterManagerNodeAction.java | 60 +- ...TransportClusterManagerNodeReadAction.java | 23 +- .../info/TransportClusterInfoAction.java | 1 + .../support/nodes/BaseNodesRequest.java | 16 + .../support/nodes/TransportNodesAction.java | 12 +- .../action/update/UpdateRequest.java | 4 +- .../client/OriginSettingClient.java | 7 +- .../client/support/AbstractClient.java | 5 +- .../org/opensearch/cluster/ClusterInfo.java | 37 + .../org/opensearch/cluster/ClusterModule.java | 3 + .../ClusterStateSystemTemplateLoader.java | 108 + .../applicationtemplates/SystemTemplate.java | 43 + .../SystemTemplateLoader.java | 26 + .../SystemTemplateMetadata.java | 89 + .../SystemTemplateRepository.java | 37 + .../SystemTemplatesPlugin.java | 31 + .../SystemTemplatesService.java | 183 + .../TemplateRepositoryMetadata.java | 54 + .../applicationtemplates/package-info.java | 10 + .../coordination/CoordinationState.java | 3 +- .../coordination/OpenSearchNodeCommand.java | 6 +- .../coordination/PersistedStateStats.java | 4 + .../PublicationTransportHandler.java | 1 - .../metadata/ComposableIndexTemplate.java | 45 +- .../opensearch/cluster/metadata/Context.java | 130 + .../cluster/metadata/IndexMetadata.java | 16 +- .../opensearch/cluster/metadata/Metadata.java | 85 +- .../metadata/MetadataCreateIndexService.java | 8 +- .../MetadataIndexTemplateService.java | 119 +- .../metadata/MetadataMappingService.java | 12 +- .../cluster/metadata/QueryGroup.java | 327 ++ .../cluster/metadata/QueryGroupMetadata.java | 185 + .../routing/IndexShardRoutingTable.java | 18 +- .../cluster/routing/OperationRouting.java | 9 + .../cluster/routing/RoutingTable.java | 11 +- .../routing/RoutingTableIncrementalDiff.java | 130 + .../routing/StringKeyDiffProvider.java | 33 + .../routing/allocation/AllocationService.java | 5 +- .../routing/allocation/ConstraintTypes.java | 2 +- .../allocation/ExistingShardsAllocator.java | 7 +- .../allocator/BalancedShardsAllocator.java | 35 +- .../allocator/LocalShardsBalancer.java | 50 +- .../decider/AwarenessAllocationDecider.java | 91 +- .../decider/DiskThresholdDecider.java | 45 +- ...RemoteStoreMigrationAllocationDecider.java | 34 +- .../decider/ShardsLimitAllocationDecider.java | 4 +- .../InternalRemoteRoutingTableService.java | 316 +- .../remote/NoopRemoteRoutingTableService.java | 55 +- .../remote/RemoteRoutingTableService.java | 58 +- .../RemoteRoutingTableServiceFactory.java | 5 +- .../service/ClusterApplierService.java | 3 +- .../cluster/service/MasterService.java | 58 +- .../cluster/service/TaskBatcher.java | 28 +- .../JsonThrowablePatternConverter.java | 4 +- ...actClusterMetadataWriteableBlobEntity.java | 43 + .../AbstractRemoteWritableEntityManager.java | 84 + .../remote/RemoteWritableEntityManager.java | 47 + ...ty.java => RemoteWriteableBlobEntity.java} | 23 +- .../RemoteWriteableEntityBlobStore.java} | 38 +- .../common/settings/ClusterSettings.java | 29 +- .../common/settings/FeatureFlagSettings.java | 4 +- .../common/settings/IndexScopedSettings.java | 14 +- .../common/util/BatchRunnableExecutor.java | 73 + .../common/util/ByteArrayBackedBitset.java | 86 + .../opensearch/common/util/FeatureFlags.java | 27 +- .../common/util/concurrent/ThreadContext.java | 41 + .../util/concurrent/ThreadContextAccess.java | 41 + .../util/concurrent/TimeoutAwareRunnable.java | 11 +- .../xcontent/JsonToStringXContentParser.java | 122 +- .../org/opensearch/env/NodeEnvironment.java | 4 +- .../gateway/AsyncShardBatchFetch.java | 8 + .../gateway/BaseGatewayShardAllocator.java | 26 + .../gateway/PrimaryShardAllocator.java | 2 +- .../gateway/PrimaryShardBatchAllocator.java | 8 +- .../gateway/ReplicaShardAllocator.java | 7 +- .../gateway/ReplicaShardBatchAllocator.java | 15 +- .../gateway/ShardsBatchGatewayAllocator.java | 162 +- .../remote/ClusterMetadataManifest.java | 80 +- .../remote/ClusterStateDiffManifest.java | 115 +- .../RemoteClusterStateAttributesManager.java | 76 +- .../RemoteClusterStateCleanupManager.java | 32 +- .../remote/RemoteClusterStateService.java | 636 ++-- .../remote/RemoteClusterStateUtils.java | 1 + .../remote/RemoteGlobalMetadataManager.java | 96 +- .../remote/RemoteIndexMetadataManager.java | 111 +- .../gateway/remote/RemoteManifestManager.java | 9 +- .../remote/RemotePersistenceStats.java | 11 + .../remote/model/RemoteClusterBlocks.java | 4 +- .../model/RemoteClusterMetadataManifest.java | 11 +- .../model/RemoteClusterStateCustoms.java | 6 +- .../model/RemoteCoordinationMetadata.java | 4 +- .../remote/model/RemoteCustomMetadata.java | 9 +- .../remote/model/RemoteDiscoveryNodes.java | 4 +- .../remote/model/RemoteGlobalMetadata.java | 4 +- .../RemoteHashesOfConsistentSettings.java | 4 +- .../remote/model/RemoteIndexMetadata.java | 4 +- .../RemotePersistentSettingsMetadata.java | 4 +- .../remote/model/RemotePinnedTimestamps.java | 144 + .../model/RemoteRoutingTableBlobStore.java | 118 + .../RemoteStorePinnedTimestampsBlobStore.java | 43 + .../remote/model/RemoteTemplatesMetadata.java | 4 +- .../RemoteTransientSettingsMetadata.java | 4 +- .../routingtable/IndexRoutingTableHeader.java | 81 - .../routingtable/RemoteIndexRoutingTable.java | 136 +- .../routingtable/RemoteRoutingTableDiff.java | 138 + .../java/org/opensearch/http/HttpChannel.java | 2 +- .../org/opensearch/index/IndexModule.java | 104 +- .../org/opensearch/index/IndexService.java | 28 +- .../org/opensearch/index/IndexSettings.java | 36 +- .../opensearch/index/codec/CodecService.java | 16 +- .../composite/CompositeCodecFactory.java | 47 + .../composite/CompositeIndexFieldInfo.java | 37 + .../codec/composite/CompositeIndexReader.java | 33 + .../codec/composite/CompositeIndexValues.java | 21 + .../LuceneDocValuesConsumerFactory.java | 50 + .../LuceneDocValuesProducerFactory.java | 60 + .../composite99/Composite99Codec.java | 57 + .../Composite99DocValuesFormat.java | 64 + .../Composite99DocValuesReader.java | 90 + .../Composite99DocValuesWriter.java | 205 ++ .../composite/composite99/package-info.java | 12 + .../datacube/startree/StarTreeValues.java | 70 + .../datacube/startree/package-info.java | 12 + .../index/codec/composite/package-info.java | 12 + .../CompositeIndexSettings.java | 55 + .../CompositeIndexValidator.java | 46 + .../datacube/DateDimension.java | 72 + .../compositeindex/datacube/Dimension.java | 22 + .../datacube/DimensionFactory.java | 99 + .../index/compositeindex/datacube/Metric.java | 65 + .../compositeindex/datacube/MetricStat.java | 65 + .../datacube/NumericDimension.java | 57 + .../compositeindex/datacube/package-info.java | 11 + .../datacube/startree/StarTreeDocument.java | 34 + .../datacube/startree/StarTreeField.java | 94 + .../startree/StarTreeFieldConfiguration.java | 108 + .../startree/StarTreeIndexSettings.java | 111 + .../datacube/startree/StarTreeValidator.java | 94 + .../aggregators/CountValueAggregator.java | 72 + .../aggregators/MaxValueAggregator.java | 27 + .../aggregators/MetricAggregatorInfo.java | 122 + .../aggregators/MinValueAggregator.java | 27 + .../StatelessDoubleValueAggregator.java | 81 + .../aggregators/SumValueAggregator.java | 106 + .../startree/aggregators/ValueAggregator.java | 61 + .../aggregators/ValueAggregatorFactory.java | 44 + .../numerictype/StarTreeNumericType.java | 66 + .../StarTreeNumericTypeConverters.java | 58 + .../aggregators/numerictype/package-info.java | 14 + .../startree/aggregators/package-info.java | 14 + .../builder/AbstractDocumentsFileManager.java | 226 ++ .../startree/builder/BaseStarTreeBuilder.java | 730 ++++ .../builder/OffHeapStarTreeBuilder.java | 334 ++ .../builder/OnHeapStarTreeBuilder.java | 283 ++ .../builder/SegmentDocsFileManager.java | 103 + .../startree/builder/StarTreeBuilder.java | 43 + .../builder/StarTreeDocsFileManager.java | 294 ++ .../startree/builder/StarTreesBuilder.java | 136 + .../startree/builder/package-info.java | 14 + .../datacube/startree/node/StarTreeNode.java | 112 + .../datacube/startree/node/package-info.java | 12 + .../datacube/startree/package-info.java | 13 + .../utils/SequentialDocValuesIterator.java | 90 + .../utils/StarTreeDocumentBitSetUtil.java | 57 + .../utils/StarTreeDocumentsSorter.java | 66 + .../datacube/startree/utils/TreeNode.java | 69 + .../datacube/startree/utils/package-info.java | 14 + .../index/compositeindex/package-info.java | 13 + .../UnsignedLongValuesComparatorSource.java | 8 +- .../org/opensearch/index/get/GetResult.java | 10 + .../mapper/CompositeDataCubeFieldType.java | 56 + .../mapper/CompositeMappedFieldType.java | 82 + .../index/mapper/ConstantFieldType.java | 2 +- .../mapper/ConstantKeywordFieldMapper.java | 70 +- .../index/mapper/DerivedFieldType.java | 75 +- .../index/mapper/DocumentParser.java | 178 +- .../index/mapper/FlatObjectFieldMapper.java | 9 +- .../org/opensearch/index/mapper/Mapper.java | 5 + .../index/mapper/MapperService.java | 26 + .../index/mapper/NumberFieldMapper.java | 69 + .../index/mapper/ObjectDerivedFieldType.java | 31 +- .../opensearch/index/mapper/ObjectMapper.java | 111 +- .../index/mapper/RootObjectMapper.java | 15 +- .../index/mapper/StarTreeMapper.java | 439 +++ .../mapper/StrictDynamicMappingException.java | 4 +- .../index/mapper/TextFieldMapper.java | 28 +- .../index/query/ExistsQueryBuilder.java | 10 +- .../index/query/NestedQueryBuilder.java | 9 + .../index/query/QueryShapeVisitor.java | 86 - .../index/query/TermsQueryBuilder.java | 109 +- .../remote/RemoteSegmentTransferTracker.java | 4 +- .../index/remote/RemoteStoreUtils.java | 127 +- .../index/search/QueryParserHelper.java | 2 +- .../index/search/stats/SearchStats.java | 24 +- .../seqno/GlobalCheckpointSyncAction.java | 3 +- .../RetentionLeaseBackgroundSyncAction.java | 3 +- .../index/seqno/RetentionLeaseSyncAction.java | 3 +- .../index/shard/SearchOperationListener.java | 60 + .../org/opensearch/index/shard/ShardPath.java | 4 +- .../index/store/FsDirectoryFactory.java | 19 +- .../store/RemoteStoreFileDownloader.java | 1 + .../store/remote/utils/TransferManager.java | 74 +- .../transfer/BlobStoreTransferService.java | 35 +- .../org/opensearch/indices/IndicesModule.java | 2 + .../indices/IndicesRequestCache.java | 11 +- .../opensearch/indices/IndicesService.java | 21 +- .../indices/RemoteStoreSettings.java | 34 + .../indices/SystemIndexDescriptor.java | 4 +- .../indices/SystemIndexRegistry.java | 140 + .../org/opensearch/indices/SystemIndices.java | 89 +- .../org/opensearch/indices/TermsLookup.java | 26 +- .../recovery/RecoverySourceHandler.java | 2 +- .../checkpoint/PublishCheckpointAction.java | 3 +- .../tiering/TieringRequestValidator.java | 277 ++ .../indices/tiering/package-info.java | 36 + .../ingest/AbstractBatchingProcessor.java | 136 + .../org/opensearch/ingest/IngestService.java | 74 +- .../main/java/org/opensearch/node/Node.java | 106 +- .../RemoteStorePinnedTimestampService.java | 292 ++ .../plugins/TaskManagerClientPlugin.java | 27 + .../opensearch/plugins/TaskWorkerPlugin.java | 36 + .../blobstore/BlobStoreRepository.java | 47 +- .../blobstore/ChecksumBlobStoreFormat.java | 35 +- .../blobstore/ConfigBlobStoreFormat.java | 40 +- .../org/opensearch/rest/RestController.java | 3 +- .../admin/cluster/RestClusterStatsAction.java | 2 + .../admin/cluster/RestNodesInfoAction.java | 2 +- .../admin/cluster/RestNodesStatsAction.java | 1 + .../indices/RestIndexPutAliasAction.java | 10 + .../rest/action/cat/RestNodesAction.java | 8 +- .../rest/action/document/RestBulkAction.java | 8 +- .../document/RestBulkStreamingAction.java | 48 +- .../search/DefaultSearchContext.java | 9 +- .../org/opensearch/search/ResourceType.java | 33 +- .../bucket/FastFilterRewriteHelper.java | 849 ----- .../CompositeAggregationFactory.java | 4 +- .../bucket/composite/CompositeAggregator.java | 121 +- .../filterrewrite/AggregatorBridge.java | 102 + .../CompositeAggregatorBridge.java | 36 + .../DateHistogramAggregatorBridge.java | 157 + .../FilterRewriteOptimizationContext.java | 189 ++ .../bucket/filterrewrite/Helper.java | 213 ++ .../filterrewrite/PointTreeTraversal.java | 223 ++ .../filterrewrite/RangeAggregatorBridge.java | 95 + .../bucket/filterrewrite/Ranges.java | 57 + .../bucket/filterrewrite/package-info.java | 19 + .../AutoDateHistogramAggregator.java | 103 +- .../histogram/DateHistogramAggregator.java | 76 +- .../bucket/histogram/InternalHistogram.java | 6 +- .../bucket/range/RangeAggregator.java | 47 +- .../bucket/terms/IncludeExclude.java | 189 +- .../support/ValuesSourceConfig.java | 9 +- .../support/values/ScriptBytesValues.java | 7 +- .../SearchBackpressureService.java | 4 +- .../search/internal/ContextIndexSearcher.java | 31 +- .../search/lookup/SearchLookup.java | 13 +- .../search/lookup/SourceLookup.java | 2 +- .../search/query/BitmapDocValuesQuery.java | 152 + .../snapshots/SnapshotsService.java | 6 +- .../opensearch/snapshots/package-info.java | 2 +- .../tasks/TaskCancellationService.java | 2 +- .../org/opensearch/tasks/TaskManager.java | 16 +- .../transport/RemoteClusterConnection.java | 3 +- .../transport/SniffConnectionStrategy.java | 3 +- .../org/opensearch/transport/TcpChannel.java | 2 +- .../wlm/QueryGroupLevelResourceUsageView.java | 50 + .../org/opensearch/wlm/QueryGroupTask.java | 76 + ...ueryGroupThreadContextStatePropagator.java | 53 + .../wlm/WorkloadManagementSettings.java | 256 ++ ...orkloadManagementTransportInterceptor.java | 64 + .../java/org/opensearch/wlm/package-info.java | 13 + ...QueryGroupResourceUsageTrackerService.java | 84 + .../opensearch/wlm/tracker/package-info.java | 12 + .../services/org.apache.lucene.codecs.Codec | 1 + .../org/opensearch/bootstrap/security.policy | 2 + .../bootstrap/test-framework.policy | 3 + .../action/DynamicActionRegistryTests.java | 5 + .../action/RestStatsActionTests.java | 59 + .../cluster/node/info/NodeInfoTests.java | 2 +- .../node/tasks/CancellableTasksTests.java | 2 +- .../cluster/stats/ClusterStatsNodesTests.java | 269 ++ .../mapping/get/GetMappingsActionTests.java | 227 ++ .../HotToWarmTieringResponseTests.java | 101 + .../tiering/TieringIndexRequestTests.java | 79 + .../TransportHotToWarmTieringActionTests.java | 118 + .../search/SearchQueryCategorizerTests.java | 245 -- .../search/SearchRequestStatsTests.java | 35 + ...ransportClusterManagerNodeActionTests.java | 84 - .../TransportClusterStatsActionTests.java | 165 + .../nodes/TransportNodesActionTests.java | 13 +- .../nodes/TransportNodesInfoActionTests.java | 131 + .../nodes/TransportNodesStatsActionTests.java | 130 + .../action/update/UpdateRequestTests.java | 4 +- .../cluster/ClusterModuleTests.java | 10 + ...ClusterStateSystemTemplateLoaderTests.java | 153 + .../SystemTemplatesServiceTests.java | 102 + .../coordination/CoordinationStateTests.java | 14 +- .../PersistedStateStatsTests.java | 62 + .../MetadataCreateIndexServiceTests.java | 27 + .../MetadataIndexTemplateServiceTests.java | 517 ++- .../cluster/metadata/MetadataTests.java | 27 + .../metadata/QueryGroupMetadataTests.java | 93 + .../cluster/metadata/QueryGroupTests.java | 181 + .../metadata/TemplateUpgradeServiceTests.java | 3 +- .../routing/OperationRoutingTests.java | 64 + .../routing/RoutingTableDiffTests.java | 325 ++ .../AllocationConstraintsTests.java | 2 +- .../allocation/BalancedSingleShardTests.java | 15 - .../DecideAllocateUnassignedTests.java | 154 + .../ExistingShardsAllocatorTests.java | 118 + .../decider/DiskThresholdDeciderTests.java | 13 - ...RemoteRoutingTableServiceFactoryTests.java | 6 +- .../RemoteRoutingTableServiceTests.java | 527 +-- .../cluster/service/MasterServiceTests.java | 285 +- .../cluster/service/TaskBatcherTests.java | 3 +- .../common/LocalTimeOffsetTests.java | 2 +- .../serializer/ICacheKeySerializerTests.java | 7 +- ...tractRemoteWritableEntityManagerTests.java | 64 + .../util/BatchRunnableExecutorTests.java | 126 + .../util/ByteArrayBackedBitsetTests.java | 56 + .../util/concurrent/ThreadContextTests.java | 28 +- .../JsonToStringXContentParserTests.java | 57 +- .../org/opensearch/core/RestStatusTests.java | 6 +- .../gateway/GatewayAllocatorTests.java | 108 + .../PrimaryShardBatchAllocatorTests.java | 79 +- .../ReplicaShardBatchAllocatorTests.java | 99 + .../remote/ClusterMetadataManifestTests.java | 102 +- ...oteClusterStateAttributesManagerTests.java | 432 +-- ...RemoteClusterStateCleanupManagerTests.java | 146 + .../RemoteClusterStateServiceTests.java | 1520 ++++++++- .../remote/RemoteClusterStateTestUtils.java | 227 ++ .../RemoteGlobalMetadataManagerTests.java | 239 +- .../RemoteIndexMetadataManagerTests.java | 198 ++ .../model/ClusterStateDiffManifestTests.java | 57 +- .../model/RemoteCustomMetadataTests.java | 87 +- .../model/RemotePinnedTimestampsTests.java | 101 + .../RemoteRoutingTableBlobStoreTests.java | 133 + .../IndexRoutingTableHeaderTests.java | 32 - .../RemoteIndexRoutingTableDiffTests.java | 263 ++ .../RemoteIndexRoutingTableTests.java | 307 +- .../opensearch/index/codec/CodecTests.java | 47 + .../LuceneDocValuesConsumerFactoryTests.java | 85 + .../LuceneDocValuesProducerFactoryTests.java | 124 + ...tedNumericDocValuesWriterWrapperTests.java | 94 + .../StarTreeDocValuesFormatTests.java | 176 + .../AbstractValueAggregatorTests.java | 77 + .../CountValueAggregatorTests.java | 55 + .../aggregators/MaxValueAggregatorTests.java | 64 + .../MetricAggregatorInfoTests.java | 113 + .../aggregators/MinValueAggregatorTests.java | 63 + .../StaticValueAggregatorTests.java | 133 + .../aggregators/SumValueAggregatorTests.java | 78 + .../ValueAggregatorFactoryTests.java | 48 + .../builder/AbstractStarTreeBuilderTests.java | 2997 +++++++++++++++++ .../builder/BaseStarTreeBuilderTests.java | 229 ++ .../builder/OffHeapStarTreeBuilderTests.java | 26 + .../builder/OnHeapStarTreeBuilderTests.java | 24 + .../builder/StarTreesBuilderTests.java | 124 + .../SequentialDocValuesIteratorTests.java | 131 + .../StarTreeDocumentBitSetUtilTests.java | 72 + .../utils/StarTreeDocumentsSorterTests.java | 201 ++ .../opensearch/index/get/GetResultTests.java | 20 + .../ConstantKeywordFieldMapperTests.java | 8 + .../mapper/ConstantKeywordFieldTypeTests.java | 54 + .../index/mapper/CopyToMapperTests.java | 40 + .../index/mapper/DerivedFieldTypeTests.java | 69 + .../index/mapper/DocumentParserTests.java | 366 ++ .../index/mapper/NumberFieldTypeTests.java | 34 + .../index/mapper/ObjectMapperTests.java | 73 + .../index/mapper/StarTreeMapperTests.java | 922 +++++ .../index/mapper/TextFieldMapperTests.java | 51 + .../index/mapper/TextFieldTypeTests.java | 1 + .../index/query/NestedQueryBuilderTests.java | 11 + .../index/query/QueryShapeVisitorTests.java | 31 - .../index/query/TermsQueryBuilderTests.java | 67 +- .../RemoteSegmentTransferTrackerTests.java | 25 +- .../index/remote/RemoteStoreUtilsTests.java | 139 + .../shard/SearchOperationListenerTests.java | 105 + .../index/store/FsDirectoryFactoryTests.java | 79 +- .../BlobStoreTransferServiceTests.java | 30 +- .../indices/IndicesServiceTests.java | 35 + .../indices/SystemIndicesTests.java | 161 +- .../opensearch/indices/TermsLookupTests.java | 5 +- .../HierarchyCircuitBreakerServiceTests.java | 2 +- .../tiering/TieringRequestValidatorTests.java | 318 ++ .../AbstractBatchingProcessorTests.java | 160 + .../opensearch/ingest/IngestServiceTests.java | 150 +- .../blobstore/BlobStoreRepositoryTests.java | 22 + .../opensearch/search/ResourceTypeTests.java | 52 + .../search/SearchCancellationTests.java | 4 + .../opensearch/search/SearchServiceTests.java | 101 + .../histogram/InternalHistogramTests.java | 43 + .../bucket/range/RangeAggregatorTests.java | 129 +- .../terms/DerivedFieldAggregationTests.java | 146 + .../terms}/IncludeExcludeTests.java | 252 +- .../support/ValuesSourceConfigTests.java | 39 + .../SearchBackpressureServiceTests.java | 12 +- .../trackers/NodeDuressTrackersTests.java | 8 +- .../internal/ContextIndexSearcherTests.java | 4 + .../profile/query/QueryProfilerTests.java | 4 + .../query/BitmapDocValuesQueryTests.java | 149 + .../search/query/QueryPhaseTests.java | 7 + .../search/query/QueryProfilePhaseTests.java | 7 + .../snapshots/SnapshotResiliencyTests.java | 3 +- ...ContextBasedTracerContextStorageTests.java | 3 +- ...QueryGroupLevelResourceUsageViewTests.java | 64 + .../opensearch/wlm/QueryGroupTaskTests.java | 44 + ...roupThreadContextStatePropagatorTests.java | 30 + .../wlm/WorkloadManagementSettingsTests.java | 241 ++ ...adManagementTransportInterceptorTests.java | 40 + ...anagementTransportRequestHandlerTests.java | 75 + ...GroupResourceUsageTrackerServiceTests.java | 126 + .../org/opensearch/bootstrap/test.policy | 2 +- settings.gradle | 2 +- test/fixtures/hdfs-fixture/build.gradle | 16 +- .../client/RestClientBuilderTestCase.java | 3 +- .../cluster/OpenSearchAllocationTestCase.java | 28 + .../cluster/routing/TestShardRouting.java | 26 + .../FakeThreadPoolClusterManagerService.java | 3 +- .../TestSystemTemplatesRepositoryPlugin.java | 72 + .../org/opensearch/index/MapperTestUtils.java | 34 + .../index/mapper/MapperTestCase.java | 2 +- .../analysis/AnalysisFactoryTestCase.java | 2 +- .../aggregations/AggregatorTestCase.java | 6 + .../test/OpenSearchIntegTestCase.java | 135 +- .../test/OpenSearchSingleNodeTestCase.java | 1 + .../test/TestClusterStateCustom.java | 68 + .../TestShardBatchGatewayAllocator.java | 26 +- .../test/rest/OpenSearchRestTestCase.java | 12 +- .../transport/nio/MockNioTransport.java | 2 +- .../org/opensearch/analysis/common/test1.json | 30 +- .../org/opensearch/analysis/common/test1.yml | 18 + 993 files changed, 48971 insertions(+), 9741 deletions(-) create mode 100644 .github/benchmark-configs.json create mode 100644 .github/workflows/add-performance-comment.yml create mode 100644 .github/workflows/benchmark-pull-request.yml create mode 100644 PERFORMANCE_BENCHMARKS.md create mode 100644 benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/RerouteBenchmark.java delete mode 100644 client/rest/licenses/httpclient5-5.2.1.jar.sha1 create mode 100644 client/rest/licenses/httpclient5-5.2.3.jar.sha1 delete mode 100644 client/rest/licenses/httpcore5-5.2.2.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-5.2.5.jar.sha1 delete mode 100644 client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-reactive-LICENSE.txt create mode 100644 client/rest/licenses/httpcore5-reactive-NOTICE.txt create mode 100644 client/rest/licenses/reactive-streams-1.0.4.jar.sha1 create mode 100644 client/rest/licenses/reactive-streams-LICENSE.txt create mode 100644 client/rest/licenses/reactive-streams-NOTICE.txt create mode 100644 client/rest/licenses/reactor-core-3.5.20.jar.sha1 create mode 100644 client/rest/licenses/reactor-core-LICENSE.txt create mode 100644 client/rest/licenses/reactor-core-NOTICE.txt create mode 100644 client/rest/src/main/java/org/opensearch/client/ResponseWarningsExtractor.java create mode 100644 client/rest/src/main/java/org/opensearch/client/StreamingRequest.java create mode 100644 client/rest/src/main/java/org/opensearch/client/StreamingResponse.java create mode 100644 client/rest/src/main/java/org/opensearch/client/http/ReactiveHttpUriRequestProducer.java delete mode 100644 client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 create mode 100644 client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 delete mode 100644 client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 create mode 100644 client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 delete mode 100644 client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 create mode 100644 client/sniffer/licenses/jackson-core-2.17.2.jar.sha1 delete mode 100644 distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.5.jar.sha1 create mode 100644 distribution/tools/plugin-cli/licenses/bc-fips-2.0.0.jar.sha1 delete mode 100644 distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.7.1.jar.sha1 create mode 100644 distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.9.jar.sha1 delete mode 100644 distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 create mode 100644 distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.2.jar.sha1 delete mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 create mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.2.jar.sha1 create mode 100644 libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorAnnotatedInternalApi.java delete mode 100644 libs/core/licenses/jackson-core-2.17.1.jar.sha1 create mode 100644 libs/core/licenses/jackson-core-2.17.2.jar.sha1 create mode 100644 libs/core/licenses/lucene-core-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 libs/core/licenses/lucene-core-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java create mode 100644 libs/task-commons/build.gradle create mode 100644 libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskListRequest.java create mode 100644 libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskManagerClient.java create mode 100644 libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskProducerClient.java create mode 100644 libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskWorkerClient.java rename {plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener => libs/task-commons/src/main/java/org/opensearch/task/commons/clients}/package-info.java (71%) rename {plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter => libs/task-commons/src/main/java/org/opensearch/task/commons}/package-info.java (72%) create mode 100644 libs/task-commons/src/main/java/org/opensearch/task/commons/task/Task.java create mode 100644 libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskId.java create mode 100644 libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskParams.java create mode 100644 libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskStatus.java create mode 100644 libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskType.java rename {plugins/query-insights/src/main/java/org/opensearch/plugin/insights => libs/task-commons/src/main/java/org/opensearch/task/commons/task}/package-info.java (73%) create mode 100644 libs/task-commons/src/main/java/org/opensearch/task/commons/worker/TaskWorker.java create mode 100644 libs/task-commons/src/main/java/org/opensearch/task/commons/worker/WorkerNode.java rename {plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings => libs/task-commons/src/main/java/org/opensearch/task/commons/worker}/package-info.java (71%) create mode 100644 libs/task-commons/src/test/java/org/opensearch/task/commons/mocks/MockTaskParams.java create mode 100644 libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskIdTests.java create mode 100644 libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskTests.java create mode 100644 libs/task-commons/src/test/java/org/opensearch/task/commons/worker/WorkerNodeTests.java delete mode 100644 libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-core-2.17.2.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 create mode 100644 modules/analysis-common/src/main/java/org/opensearch/analysis/common/PersianStemTokenFilterFactory.java create mode 100644 modules/analysis-common/src/test/resources/org/opensearch/analysis/common/da_UTF8.xml delete mode 100644 modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/jackson-annotations-2.17.2.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.17.2.jar.sha1 create mode 100644 modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.12.0-snapshot-c896995.jar.sha1 delete mode 100644 modules/lang-mustache/licenses/compiler-0.9.13.jar.sha1 create mode 100644 modules/lang-mustache/licenses/compiler-0.9.14.jar.sha1 create mode 100644 modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java create mode 100644 modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml create mode 100644 modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SortResponseProcessor.java create mode 100644 modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SplitResponseProcessor.java create mode 100644 modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java create mode 100644 modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SortResponseProcessorTests.java create mode 100644 modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SplitResponseProcessorTests.java create mode 100644 modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/80_sort_response.yml delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0-snapshot-c896995.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/jackson-annotations-2.17.2.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/jackson-databind-2.17.2.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/jackson-annotations-2.17.2.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.17.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/xz-1.10.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/xz-1.9.jar.sha1 delete mode 100644 plugins/query-insights/src/internalClusterTest/java/org/opensearch/plugin/insights/QueryInsightsPluginTransportIT.java delete mode 100644 plugins/query-insights/src/javaRestTest/java/org/opensearch/plugin/insights/TopQueriesRestIT.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/DebugExporter.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporter.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporter.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactory.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/SinkType.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/package-info.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueries.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesAction.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequest.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponse.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/package-info.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/package-info.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesAction.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/package-info.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/package-info.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/package-info.java delete mode 100644 plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java delete mode 100644 plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java delete mode 100644 plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java delete mode 100644 plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/DebugExporterTests.java delete mode 100644 plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporterTests.java delete mode 100644 plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactoryTests.java delete mode 100644 plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java delete mode 100644 plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java delete mode 100644 plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java delete mode 100644 plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequestTests.java delete mode 100644 plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponseTests.java delete mode 100644 plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesTests.java delete mode 100644 plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java delete mode 100644 plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesActionTests.java delete mode 100644 plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java delete mode 100644 plugins/repository-azure/licenses/accessors-smart-2.5.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/accessors-smart-2.5.1.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.15.3.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/azure-storage-common-12.21.2.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-storage-common-12.25.1.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-xml-1.1.0.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-annotations-2.17.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-databind-2.17.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/json-smart-2.5.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/json-smart-2.5.1.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/msal4j-1.15.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.22.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.22.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/avro-1.12.0.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-cli-1.9.0.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-lang3-3.16.0.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 create mode 100644 plugins/repository-s3/licenses/jackson-annotations-2.17.2.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 create mode 100644 plugins/repository-s3/licenses/jackson-databind-2.17.2.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.39.0-alpha.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.22.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.22.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4BadRequestIT.java create mode 100644 plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4HeadBodyIsEmptyIT.java create mode 100644 plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java create mode 100644 plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingStressIT.java rename plugins/{query-insights => workload-management}/build.gradle (61%) create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupAction.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponse.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupAction.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequest.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponse.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupAction.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/package-info.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/package-info.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateQueryGroupAction.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestGetQueryGroupAction.java rename plugins/{query-insights/src/main/java/org/opensearch/plugin/insights/core/service => workload-management/src/main/java/org/opensearch/plugin/wlm/rest}/package-info.java (68%) create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/package-info.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/QueryGroupTestUtils.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequestTests.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequestTests.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponseTests.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupActionTests.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java create mode 100644 plugins/workload-management/src/yamlRestTest/java/org/opensearch/plugin/wlm/WorkloadManagementClientYamlTestSuiteIT.java create mode 100644 plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_query_group_context.json create mode 100644 plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json create mode 100644 plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/get_query_group_context.json create mode 100644 plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml create mode 100644 release-notes/opensearch.release-notes-1.3.18.md create mode 100644 release-notes/opensearch.release-notes-2.16.0.md create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/index/110_strict_allow_templates.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/index/115_constant_keyword.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/index/120_field_name.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml create mode 100644 server/licenses/RoaringBitmap-1.1.0.jar.sha1 create mode 100644 server/licenses/RoaringBitmap-LICENSE.txt create mode 100644 server/licenses/RoaringBitmap-NOTICE.txt delete mode 100644 server/licenses/jackson-core-2.17.1.jar.sha1 create mode 100644 server/licenses/jackson-core-2.17.2.jar.sha1 delete mode 100644 server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 create mode 100644 server/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 delete mode 100644 server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 create mode 100644 server/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 delete mode 100644 server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 create mode 100644 server/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 server/licenses/lucene-core-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 server/licenses/lucene-join-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.12.0-snapshot-c896995.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.12.0-snapshot-847316d.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.12.0-snapshot-c896995.jar.sha1 delete mode 100644 server/licenses/reactor-core-3.5.18.jar.sha1 create mode 100644 server/licenses/reactor-core-3.5.20.jar.sha1 create mode 100644 server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRoutingTableServiceIT.java create mode 100644 server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java create mode 100644 server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java create mode 100644 server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java create mode 100644 server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java create mode 100644 server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java create mode 100644 server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/tiering/HotToWarmTieringAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/tiering/HotToWarmTieringResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/tiering/RestWarmTieringAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringIndexRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringValidationResult.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/tiering/package-info.java delete mode 100644 server/src/main/java/org/opensearch/action/search/SearchQueryAggregationCategorizer.java delete mode 100644 server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java delete mode 100644 server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java delete mode 100644 server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java create mode 100644 server/src/main/java/org/opensearch/action/search/SearchTaskRequestOperationsListener.java create mode 100644 server/src/main/java/org/opensearch/cluster/applicationtemplates/ClusterStateSystemTemplateLoader.java create mode 100644 server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplate.java create mode 100644 server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateLoader.java create mode 100644 server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateMetadata.java create mode 100644 server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateRepository.java create mode 100644 server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesPlugin.java create mode 100644 server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesService.java create mode 100644 server/src/main/java/org/opensearch/cluster/applicationtemplates/TemplateRepositoryMetadata.java create mode 100644 server/src/main/java/org/opensearch/cluster/applicationtemplates/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/metadata/Context.java create mode 100644 server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java create mode 100644 server/src/main/java/org/opensearch/cluster/metadata/QueryGroupMetadata.java create mode 100644 server/src/main/java/org/opensearch/cluster/routing/RoutingTableIncrementalDiff.java create mode 100644 server/src/main/java/org/opensearch/cluster/routing/StringKeyDiffProvider.java create mode 100644 server/src/main/java/org/opensearch/common/remote/AbstractClusterMetadataWriteableBlobEntity.java create mode 100644 server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableEntityManager.java create mode 100644 server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityManager.java rename server/src/main/java/org/opensearch/common/remote/{AbstractRemoteWritableBlobEntity.java => RemoteWriteableBlobEntity.java} (66%) rename server/src/main/java/org/opensearch/{gateway/remote/model/RemoteClusterStateBlobStore.java => common/remote/RemoteWriteableEntityBlobStore.java} (76%) create mode 100644 server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java create mode 100644 server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java create mode 100644 server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextAccess.java rename plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/package-info.java => server/src/main/java/org/opensearch/common/util/concurrent/TimeoutAwareRunnable.java (52%) create mode 100644 server/src/main/java/org/opensearch/gateway/remote/model/RemotePinnedTimestamps.java create mode 100644 server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java create mode 100644 server/src/main/java/org/opensearch/gateway/remote/model/RemoteStorePinnedTimestampsBlobStore.java delete mode 100644 server/src/main/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeader.java create mode 100644 server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexFieldInfo.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexReader.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexValues.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99Codec.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesFormat.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesReader.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeValues.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/datacube/startree/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/CompositeIndexSettings.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/CompositeIndexValidator.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/DateDimension.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/Dimension.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/Metric.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/NumericDimension.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeDocument.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeField.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeFieldConfiguration.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeValidator.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MaxValueAggregator.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfo.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MinValueAggregator.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/StatelessDoubleValueAggregator.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregator.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregator.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/numerictype/StarTreeNumericType.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/numerictype/StarTreeNumericTypeConverters.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/numerictype/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractDocumentsFileManager.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilder.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/SegmentDocsFileManager.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilder.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocsFileManager.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentBitSetUtil.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorter.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/TreeNode.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/mapper/CompositeDataCubeFieldType.java create mode 100644 server/src/main/java/org/opensearch/index/mapper/CompositeMappedFieldType.java create mode 100644 server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java delete mode 100644 server/src/main/java/org/opensearch/index/query/QueryShapeVisitor.java create mode 100644 server/src/main/java/org/opensearch/indices/SystemIndexRegistry.java create mode 100644 server/src/main/java/org/opensearch/indices/tiering/TieringRequestValidator.java create mode 100644 server/src/main/java/org/opensearch/indices/tiering/package-info.java create mode 100644 server/src/main/java/org/opensearch/ingest/AbstractBatchingProcessor.java create mode 100644 server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java create mode 100644 server/src/main/java/org/opensearch/plugins/TaskManagerClientPlugin.java create mode 100644 server/src/main/java/org/opensearch/plugins/TaskWorkerPlugin.java delete mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/AggregatorBridge.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/CompositeAggregatorBridge.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/DateHistogramAggregatorBridge.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/FilterRewriteOptimizationContext.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/Helper.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/PointTreeTraversal.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/RangeAggregatorBridge.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/Ranges.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/query/BitmapDocValuesQuery.java create mode 100644 server/src/main/java/org/opensearch/wlm/QueryGroupLevelResourceUsageView.java create mode 100644 server/src/main/java/org/opensearch/wlm/QueryGroupTask.java create mode 100644 server/src/main/java/org/opensearch/wlm/QueryGroupThreadContextStatePropagator.java create mode 100644 server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java create mode 100644 server/src/main/java/org/opensearch/wlm/WorkloadManagementTransportInterceptor.java create mode 100644 server/src/main/java/org/opensearch/wlm/package-info.java create mode 100644 server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java create mode 100644 server/src/main/java/org/opensearch/wlm/tracker/package-info.java create mode 100644 server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec create mode 100644 server/src/test/java/org/opensearch/action/RestStatsActionTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsActionTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/tiering/HotToWarmTieringResponseTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/tiering/TieringIndexRequestTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java delete mode 100644 server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java create mode 100644 server/src/test/java/org/opensearch/action/support/nodes/TransportClusterStatsActionTests.java create mode 100644 server/src/test/java/org/opensearch/action/support/nodes/TransportNodesInfoActionTests.java create mode 100644 server/src/test/java/org/opensearch/action/support/nodes/TransportNodesStatsActionTests.java create mode 100644 server/src/test/java/org/opensearch/cluster/applicationtemplates/ClusterStateSystemTemplateLoaderTests.java create mode 100644 server/src/test/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesServiceTests.java create mode 100644 server/src/test/java/org/opensearch/cluster/coordination/PersistedStateStatsTests.java create mode 100644 server/src/test/java/org/opensearch/cluster/metadata/QueryGroupMetadataTests.java create mode 100644 server/src/test/java/org/opensearch/cluster/metadata/QueryGroupTests.java create mode 100644 server/src/test/java/org/opensearch/cluster/routing/RoutingTableDiffTests.java create mode 100644 server/src/test/java/org/opensearch/cluster/routing/allocation/DecideAllocateUnassignedTests.java create mode 100644 server/src/test/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocatorTests.java create mode 100644 server/src/test/java/org/opensearch/common/remote/AbstractRemoteWritableEntityManagerTests.java create mode 100644 server/src/test/java/org/opensearch/common/util/BatchRunnableExecutorTests.java create mode 100644 server/src/test/java/org/opensearch/common/util/ByteArrayBackedBitsetTests.java create mode 100644 server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateTestUtils.java create mode 100644 server/src/test/java/org/opensearch/gateway/remote/RemoteIndexMetadataManagerTests.java create mode 100644 server/src/test/java/org/opensearch/gateway/remote/model/RemotePinnedTimestampsTests.java create mode 100644 server/src/test/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStoreTests.java delete mode 100644 server/src/test/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeaderTests.java create mode 100644 server/src/test/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTableDiffTests.java create mode 100644 server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java create mode 100644 server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java create mode 100644 server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java create mode 100644 server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/AbstractValueAggregatorTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MaxValueAggregatorTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfoTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MinValueAggregatorTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/StaticValueAggregatorTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregatorTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactoryTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilderTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilderTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilderTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentBitSetUtilTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorterTests.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java delete mode 100644 server/src/test/java/org/opensearch/index/query/QueryShapeVisitorTests.java create mode 100644 server/src/test/java/org/opensearch/indices/tiering/TieringRequestValidatorTests.java create mode 100644 server/src/test/java/org/opensearch/ingest/AbstractBatchingProcessorTests.java create mode 100644 server/src/test/java/org/opensearch/search/ResourceTypeTests.java create mode 100644 server/src/test/java/org/opensearch/search/aggregations/bucket/terms/DerivedFieldAggregationTests.java rename server/src/test/java/org/opensearch/search/aggregations/{support => bucket/terms}/IncludeExcludeTests.java (58%) create mode 100644 server/src/test/java/org/opensearch/search/query/BitmapDocValuesQueryTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/QueryGroupLevelResourceUsageViewTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/QueryGroupTaskTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/QueryGroupThreadContextStatePropagatorTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/WorkloadManagementSettingsTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportInterceptorTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportRequestHandlerTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerServiceTests.java create mode 100644 test/framework/src/main/java/org/opensearch/cluster/service/applicationtemplates/TestSystemTemplatesRepositoryPlugin.java create mode 100644 test/framework/src/main/java/org/opensearch/test/TestClusterStateCustom.java diff --git a/.ci/bwcVersions b/.ci/bwcVersions index a738eb54e17f6..980e08c696d54 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -36,3 +36,5 @@ BWC_VERSION: - "2.15.0" - "2.15.1" - "2.16.0" + - "2.16.1" + - "2.17.0" diff --git a/.ci/java-versions.properties b/.ci/java-versions.properties index f73122ee21a6b..e290bda773f68 100644 --- a/.ci/java-versions.properties +++ b/.ci/java-versions.properties @@ -13,7 +13,8 @@ # build and test OpenSearch for this branch. Valid Java versions # are 'java' or 'openjdk' followed by the major release number. -OPENSEARCH_BUILD_JAVA=openjdk11 +# See please https://docs.gradle.org/8.10/userguide/upgrading_version_8.html#minimum_daemon_jvm_version +OPENSEARCH_BUILD_JAVA=openjdk17 OPENSEARCH_RUNTIME_JAVA=java11 GRADLE_TASK=build GRADLE_EXTRA_ARGS= diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8d69e98220b69..18a310862dfbb 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,17 +11,27 @@ # 3. Use the command palette to run the CODEOWNERS: Show owners of current file command, which will display all code owners for the current file. # Default ownership for all repo files -* @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +* @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jainankitk @kotwanikunal @linuxpi @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/modules/lang-painless/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/modules/parent-join/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /modules/transport-netty4/ @peternied /plugins/identity-shiro/ @peternied +/server/src/internalClusterTest/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/internalClusterTest/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah + /server/src/main/java/org/opensearch/extensions/ @peternied /server/src/main/java/org/opensearch/identity/ @peternied -/server/src/main/java/org/opensearch/threadpool/ @peternied +/server/src/main/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/main/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/main/java/org/opensearch/threadpool/ @jed326 @peternied /server/src/main/java/org/opensearch/transport/ @peternied -/.github/ @peternied +/server/src/test/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/test/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah + +/.github/ @jed326 @peternied -/MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gaobinlong @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json new file mode 100644 index 0000000000000..8f4bad040fe44 --- /dev/null +++ b/.github/benchmark-configs.json @@ -0,0 +1,171 @@ +{ + "name": "Cluster and opensearch-benchmark configurations", + "id_1": { + "description": "Indexing only configuration for NYC_TAXIS workload", + "supported_major_versions": ["2", "3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "nyc_taxis", + "WORKLOAD_PARAMS": "{\"number_of_replicas\":\"0\",\"number_of_shards\":\"1\"}", + "EXCLUDE_TASKS": "type:search", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" + }, + "id_2": { + "description": "Indexing only configuration for HTTP_LOGS workload", + "supported_major_versions": ["2", "3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "http_logs", + "WORKLOAD_PARAMS": "{\"number_of_replicas\":\"0\",\"number_of_shards\":\"1\"}", + "EXCLUDE_TASKS": "type:search", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" + }, + "id_3": { + "description": "Search only test-procedure for NYC_TAXIS, uses snapshot to restore the data for OS-3.0.0", + "supported_major_versions": ["3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "nyc_taxis", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"nyc_taxis_1_shard\"}", + "CAPTURE_NODE_STAT": "true", + "TEST_PROCEDURE": "restore-from-snapshot" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" + }, + "id_4": { + "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-3.0.0", + "supported_major_versions": ["3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "http_logs", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"http_logs_1_shard\"}", + "CAPTURE_NODE_STAT": "true", + "TEST_PROCEDURE": "restore-from-snapshot" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" + }, + "id_5": { + "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-3.0.0", + "supported_major_versions": ["3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "big5", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard\"}", + "CAPTURE_NODE_STAT": "true", + "TEST_PROCEDURE": "restore-from-snapshot" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" + }, + "id_6": { + "description": "Search only test-procedure for NYC_TAXIS, uses snapshot to restore the data for OS-2.x", + "supported_major_versions": ["2"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "nyc_taxis", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots\",\"snapshot_name\":\"nyc_taxis_1_shard\"}", + "CAPTURE_NODE_STAT": "true", + "TEST_PROCEDURE": "restore-from-snapshot" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" + }, + "id_7": { + "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-2.x", + "supported_major_versions": ["2"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "http_logs", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots\",\"snapshot_name\":\"http_logs_1_shard\"}", + "CAPTURE_NODE_STAT": "true", + "TEST_PROCEDURE": "restore-from-snapshot" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" + }, + "id_8": { + "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-2.x", + "supported_major_versions": ["2"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "big5", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots\",\"snapshot_name\":\"big5_1_shard\"}", + "CAPTURE_NODE_STAT": "true", + "TEST_PROCEDURE": "restore-from-snapshot" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" + }, + "id_9": { + "description": "Indexing and search configuration for pmc workload", + "supported_major_versions": ["2", "3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "pmc", + "WORKLOAD_PARAMS": "{\"number_of_replicas\":\"0\",\"number_of_shards\":\"1\"}", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" + }, + "id_10": { + "description": "Indexing only configuration for stack-overflow workload", + "supported_major_versions": ["2", "3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "so", + "WORKLOAD_PARAMS": "{\"number_of_replicas\":\"0\",\"number_of_shards\":\"1\"}", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" + } +} diff --git a/.github/workflows/add-performance-comment.yml b/.github/workflows/add-performance-comment.yml new file mode 100644 index 0000000000000..6a310bff4c0a1 --- /dev/null +++ b/.github/workflows/add-performance-comment.yml @@ -0,0 +1,28 @@ +name: Performance Label Action + +on: + pull_request_target: + types: [labeled] + +jobs: + add-comment: + if: | + github.event.label.name == 'Performance' || + github.event.label.name == 'Search:Performance' || + github.event.label.name == 'Indexing:Performance' + runs-on: ubuntu-latest + permissions: + pull-requests: write + + steps: + - name: Add comment to PR + uses: actions/github-script@v7 + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: "Hello!\nWe have added a performance benchmark workflow that runs by adding a comment on the PR.\n Please refer https://github.com/opensearch-project/OpenSearch/blob/main/PERFORMANCE_BENCHMARKS.md on how to run benchmarks on pull requests." + }) diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml index 51ae075ffa2c9..200d9cf2d788b 100644 --- a/.github/workflows/assemble.yml +++ b/.github/workflows/assemble.yml @@ -16,6 +16,17 @@ jobs: with: java-version: ${{ matrix.java }} distribution: temurin + - name: Set up JDK 17 + # See please https://docs.gradle.org/8.10/userguide/upgrading_version_8.html#minimum_daemon_jvm_version + if: matrix.java == 11 + uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: temurin + - name: Set JAVA${{ matrix.java }}_HOME + shell: bash + run: | + echo "JAVA${{ matrix.java }}_HOME=$JAVA_HOME_${{ matrix.java }}_${{ runner.arch }}" >> $GITHUB_ENV - name: Setup docker (missing on MacOS) id: setup_docker if: runner.os == 'macos' @@ -30,10 +41,11 @@ jobs: # Report success even if previous step failed (Docker on MacOS runner is very unstable) exit 0; - name: Run Gradle (assemble) + shell: bash if: runner.os != 'macos' run: | - ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE + ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE -Druntime.java=${{ matrix.java }} - name: Run Gradle (assemble) if: runner.os == 'macos' && steps.setup_docker.outcome == 'success' run: | - ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE + ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE -Druntime.java=${{ matrix.java }} diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml new file mode 100644 index 0000000000000..c494df6e27ce3 --- /dev/null +++ b/.github/workflows/benchmark-pull-request.yml @@ -0,0 +1,179 @@ +name: Run performance benchmark on pull request +on: + issue_comment: + types: [created] +jobs: + run-performance-benchmark-on-pull-request: + if: ${{ (github.event.issue.pull_request) && (contains(github.event.comment.body, '"run-benchmark-test"')) }} + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + issues: write + pull-requests: write + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + - name: Set up required env vars + run: | + echo "PR_NUMBER=${{ github.event.issue.number }}" >> $GITHUB_ENV + echo "REPOSITORY=${{ github.event.repository.full_name }}" >> $GITHUB_ENV + OPENSEARCH_VERSION=$(awk -F '=' '/^opensearch[[:space:]]*=/ {gsub(/[[:space:]]/, "", $2); print $2}' buildSrc/version.properties) + echo "OPENSEARCH_VERSION=$OPENSEARCH_VERSION" >> $GITHUB_ENV + major_version=$(echo $OPENSEARCH_VERSION | cut -d'.' -f1) + echo "OPENSEARCH_MAJOR_VERSION=$major_version" >> $GITHUB_ENV + echo "USER_TAGS=pull_request_number:${{ github.event.issue.number }},repository:OpenSearch" >> $GITHUB_ENV + - name: Check comment format + id: check_comment + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const comment = context.payload.comment.body; + let commentJson; + try { + commentJson = JSON.parse(comment); + } catch (error) { + core.setOutput('invalid', 'true'); + return; + } + if (!commentJson.hasOwnProperty('run-benchmark-test')) { + core.setOutput('invalid', 'true'); + return; + } + const configId = commentJson['run-benchmark-test']; + let benchmarkConfigs; + try { + benchmarkConfigs = JSON.parse(fs.readFileSync('.github/benchmark-configs.json', 'utf8')); + } catch (error) { + core.setFailed('Failed to read benchmark-configs.json'); + return; + } + const openSearchMajorVersion = process.env.OPENSEARCH_MAJOR_VERSION; + console.log('MAJOR_VERSION', openSearchMajorVersion) + if (!benchmarkConfigs.hasOwnProperty(configId) || + !benchmarkConfigs[configId].supported_major_versions.includes(openSearchMajorVersion)) { + core.setOutput('invalid', 'true'); + return; + } + const clusterBenchmarkConfigs = benchmarkConfigs[configId]['cluster-benchmark-configs']; + for (const [key, value] of Object.entries(clusterBenchmarkConfigs)) { + core.exportVariable(key, value); + } + if (benchmarkConfigs[configId].hasOwnProperty('baseline_cluster_config')) { + core.exportVariable('BASELINE_CLUSTER_CONFIG', benchmarkConfigs[configId]['baseline_cluster_config']); + } + - name: Post invalid format comment + if: steps.check_comment.outputs.invalid == 'true' + uses: actions/github-script@v7 + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: 'Invalid comment format or config id. Please refer to https://github.com/opensearch-project/OpenSearch/blob/main/PERFORMANCE_BENCHMARKS.md on how to run benchmarks on pull requests.' + }) + - name: Fail workflow for invalid comment + if: steps.check_comment.outputs.invalid == 'true' + run: | + echo "Invalid comment format detected. Failing the workflow." + exit 1 + - name: Get PR Details + id: get_pr + uses: actions/github-script@v7 + with: + script: | + const issue = context.payload.issue; + const prNumber = issue.number; + console.log(`Pull Request Number: ${prNumber}`); + + const { data: pull_request } = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber, + }); + + return { + "headRepoFullName": pull_request.head.repo.full_name, + "headRefSha": pull_request.head.sha + }; + - name: Set pr details env vars + run: | + echo '${{ steps.get_pr.outputs.result }}' | jq -r '.headRepoFullName' + echo '${{ steps.get_pr.outputs.result }}' | jq -r '.headRefSha' + headRepo=$(echo '${{ steps.get_pr.outputs.result }}' | jq -r '.headRepoFullName') + headRefSha=$(echo '${{ steps.get_pr.outputs.result }}' | jq -r '.headRefSha') + echo "prHeadRepo=$headRepo" >> $GITHUB_ENV + echo "prHeadRefSha=$headRefSha" >> $GITHUB_ENV + - id: get_approvers + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + result-encoding: string + script: | + // Get the collaborators - filtered to maintainer permissions + const maintainersResponse = await github.request('GET /repos/{owner}/{repo}/collaborators', { + owner: context.repo.owner, + repo: context.repo.repo, + permission: 'maintain', + affiliation: 'all', + per_page: 100 + }); + return maintainersResponse.data.map(item => item.login).join(', '); + - uses: trstringer/manual-approval@v1 + if: (!contains(steps.get_approvers.outputs.result, github.event.comment.user.login)) + with: + secret: ${{ github.TOKEN }} + approvers: ${{ steps.get_approvers.outputs.result }} + minimum-approvals: 1 + issue-title: 'Request to approve/deny benchmark run for PR #${{ env.PR_NUMBER }}' + issue-body: "Please approve or deny the benchmark run for PR #${{ env.PR_NUMBER }}" + exclude-workflow-initiator-as-approver: false + - name: Checkout PR Repo + uses: actions/checkout@v4 + with: + repository: ${{ env.prHeadRepo }} + ref: ${{ env.prHeadRefSha }} + token: ${{ secrets.GITHUB_TOKEN }} + - name: Setup Java + uses: actions/setup-java@v4 + with: + java-version: 21 + distribution: 'temurin' + - name: Build and Assemble OpenSearch from PR + run: | + ./gradlew :distribution:archives:linux-tar:assemble -Dbuild.snapshot=false + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.UPLOAD_ARCHIVE_ARTIFACT_ROLE }} + role-session-name: publish-to-s3 + aws-region: us-west-2 + - name: Push to S3 + run: | + aws s3 cp distribution/archives/linux-tar/build/distributions/opensearch-min-$OPENSEARCH_VERSION-linux-x64.tar.gz s3://${{ secrets.ARCHIVE_ARTIFACT_BUCKET_NAME }}/PR-$PR_NUMBER/ + echo "DISTRIBUTION_URL=${{ secrets.ARTIFACT_BUCKET_CLOUDFRONT_URL }}/PR-$PR_NUMBER/opensearch-min-$OPENSEARCH_VERSION-linux-x64.tar.gz" >> $GITHUB_ENV + - name: Checkout opensearch-build repo + uses: actions/checkout@v4 + with: + repository: opensearch-project/opensearch-build + ref: main + path: opensearch-build + - name: Trigger jenkins workflow to run gradle check + run: | + cat $GITHUB_ENV + bash opensearch-build/scripts/benchmark/benchmark-pull-request.sh ${{ secrets.JENKINS_PR_BENCHMARK_GENERIC_WEBHOOK_TOKEN }} + - name: Update PR with Job Url + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const workflowUrl = process.env.WORKFLOW_URL; + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `The Jenkins job url is ${workflowUrl} . Final results will be published once the job is completed.` + }) diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index cf9343c2c3aac..cd0415119282c 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -27,18 +27,29 @@ jobs: continue-on-error: true - run: | # The check was possibly skipped leading to success for both the jobs + has_backport_label=${{ contains(join(github.event.pull_request.labels.*.name, ', '), 'backport')}} + has_breaking_label=${{ contains(join(github.event.pull_request.labels.*.name, ', '), '>breaking')}} + if [[ $has_breaking_label == true && $has_backport_label == true ]]; then + echo "error: Please make sure that the PR does not have a backport label associated with it when making breaking changes" + exit 1 + fi + if [[ ${{ steps.verify-changelog-3x.outcome }} == 'success' && ${{ steps.verify-changelog.outcome }} == 'success' ]]; then exit 0 fi - + if [[ ${{ steps.verify-changelog-3x.outcome }} == 'failure' && ${{ steps.verify-changelog.outcome }} == 'failure' ]]; then echo "error: Please ensure a changelog entry exists in CHANGELOG.md or CHANGELOG-3.0.md" exit 1 fi - + # Concatenates the labels and checks if the string contains "backport" - has_backport_label=${{ contains(join(github.event.pull_request.labels.*.name, ', '), 'backport')}} if [[ ${{ steps.verify-changelog.outcome }} == 'success' && $has_backport_label == false ]]; then echo "error: Please make sure that the PR has a backport label associated with it when making an entry to the CHANGELOG.md file" exit 1 fi + + if [[ ${{ steps.verify-changelog-3x.outcome }} == 'success' && $has_backport_label == true ]]; then + echo "error: Please make sure that the PR does not have a backport label associated with it when making an entry to the CHANGELOG-3.0.md file" + exit 1 + fi diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index cfd3ac81c5bd3..25abd99cadb96 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -22,6 +22,13 @@ jobs: with: token: ${{ steps.github_app_token.outputs.token }} + # See please https://docs.gradle.org/8.10/userguide/upgrading_version_8.html#minimum_daemon_jvm_version + - name: Set up JDK 17 + uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: temurin + - name: Update Gradle SHAs run: | ./gradlew updateSHAs diff --git a/.github/workflows/maintainer-approval.yml b/.github/workflows/maintainer-approval.yml index fdc2bf16937b4..34e8f57cc1878 100644 --- a/.github/workflows/maintainer-approval.yml +++ b/.github/workflows/maintainer-approval.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - id: find-maintainers - uses: actions/github-script@v7.0.1 + uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} result-encoding: string diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index 95ca49ac9cb43..7c65df1f677a5 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -8,7 +8,7 @@ jobs: strategy: matrix: java: [ 11, 17, 21 ] - os: [ubuntu-latest, windows-latest, macos-13] + os: [ubuntu-latest, windows-latest, macos-latest, macos-13] steps: - uses: actions/checkout@v4 - name: Set up JDK ${{ matrix.java }} @@ -17,6 +17,18 @@ jobs: java-version: ${{ matrix.java }} distribution: temurin cache: gradle + - name: Set up JDK 17 + # See please https://docs.gradle.org/8.10/userguide/upgrading_version_8.html#minimum_daemon_jvm_version + if: matrix.java == 11 + uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: temurin + - name: Set JAVA${{ matrix.java }}_HOME + shell: bash + run: | + echo "JAVA${{ matrix.java }}_HOME=$JAVA_HOME_${{ matrix.java }}_${{ runner.arch }}" >> $GITHUB_ENV - name: Run Gradle (precommit) + shell: bash run: | - ./gradlew javadoc precommit --parallel + ./gradlew javadoc precommit --parallel -Druntime.java=${{ matrix.java }} diff --git a/.github/workflows/triage.yml b/.github/workflows/triage.yml index 83bf4926a8c2d..c305818bdb0a9 100644 --- a/.github/workflows/triage.yml +++ b/.github/workflows/triage.yml @@ -9,7 +9,7 @@ jobs: if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - - uses: actions/github-script@v7.0.1 + - uses: actions/github-script@v7 with: script: | const { issue, repository } = context.payload; diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index 7f120b65d7c2e..2de54716256ff 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -129,7 +129,7 @@ jobs: - name: Create tracking issue id: create-issue - uses: actions/github-script@v7.0.1 + uses: actions/github-script@v7 with: script: | const body = ` diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 06b761b1df8bd..48d978bede420 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -43,6 +43,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) - Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) - Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) +- Remove `index.store.hybrid.mmap.extensions` setting in favor of `index.store.hybrid.nio.extensions` setting ([#9392](https://github.com/opensearch-project/OpenSearch/pull/9392)) ### Fixed - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f8fff1db214a..deea2778dedd2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,49 +5,70 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added -- Add fingerprint ingest processor ([#13724](https://github.com/opensearch-project/OpenSearch/pull/13724)) -- [Remote Store] Rate limiter for remote store low priority uploads ([#14374](https://github.com/opensearch-project/OpenSearch/pull/14374/)) -- Apply the date histogram rewrite optimization to range aggregation ([#13865](https://github.com/opensearch-project/OpenSearch/pull/13865)) -- [Writable Warm] Add composite directory implementation and integrate it with FileCache ([12782](https://github.com/opensearch-project/OpenSearch/pull/12782)) -- Fix race condition while parsing derived fields from search definition ([14445](https://github.com/opensearch-project/OpenSearch/pull/14445)) -- Add allowlist setting for ingest-common processors ([#14439](https://github.com/opensearch-project/OpenSearch/issues/14439)) +- [Offline Nodes] Adds offline-tasks library containing various interfaces to be used for Offline Background Tasks. ([#13574](https://github.com/opensearch-project/OpenSearch/pull/13574)) +- Fix for hasInitiatedFetching to fix allocation explain and manual reroute APIs (([#14972](https://github.com/opensearch-project/OpenSearch/pull/14972)) +- [Workload Management] Add queryGroupId to Task ([14708](https://github.com/opensearch-project/OpenSearch/pull/14708)) +- Add setting to ignore throttling nodes for allocation of unassigned primaries in remote restore ([#14991](https://github.com/opensearch-project/OpenSearch/pull/14991)) +- [Workload Management] Add Delete QueryGroup API Logic ([#14735](https://github.com/opensearch-project/OpenSearch/pull/14735)) +- [Streaming Indexing] Enhance RestClient with a new streaming API support ([#14437](https://github.com/opensearch-project/OpenSearch/pull/14437)) +- Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) +- [Workload Management] Add Create QueryGroup API Logic ([#14680](https://github.com/opensearch-project/OpenSearch/pull/14680))- [Workload Management] Add Create QueryGroup API Logic ([#14680](https://github.com/opensearch-project/OpenSearch/pull/14680)) +- Add ThreadContextPermission for markAsSystemContext and allow core to perform the method ([#15016](https://github.com/opensearch-project/OpenSearch/pull/15016)) +- Add ThreadContextPermission for stashAndMergeHeaders and stashWithOrigin ([#15039](https://github.com/opensearch-project/OpenSearch/pull/15039)) +- [Concurrent Segment Search] Support composite aggregations with scripting ([#15072](https://github.com/opensearch-project/OpenSearch/pull/15072)) +- Add `rangeQuery` and `regexpQuery` for `constant_keyword` field type ([#14711](https://github.com/opensearch-project/OpenSearch/pull/14711)) +- Add took time to request nodes stats ([#15054](https://github.com/opensearch-project/OpenSearch/pull/15054)) +- [Workload Management] Add Get QueryGroup API Logic ([14709](https://github.com/opensearch-project/OpenSearch/pull/14709)) +- [Workload Management] Add Settings for Workload Management feature ([#15028](https://github.com/opensearch-project/OpenSearch/pull/15028)) +- [Workload Management] QueryGroup resource tracking framework changes ([#13897](https://github.com/opensearch-project/OpenSearch/pull/13897)) +- Support filtering on a large list encoded by bitmap ([#14774](https://github.com/opensearch-project/OpenSearch/pull/14774)) +- Add slice execution listeners to SearchOperationListener interface ([#15153](https://github.com/opensearch-project/OpenSearch/pull/15153)) +- Add allowlist setting for ingest-geoip and ingest-useragent ([#15325](https://github.com/opensearch-project/OpenSearch/pull/15325)) +- Adding access to noSubMatches and noOverlappingMatches in Hyphenation ([#13895](https://github.com/opensearch-project/OpenSearch/pull/13895)) +- Add support for index level max slice count setting for concurrent segment search ([#15336](https://github.com/opensearch-project/OpenSearch/pull/15336)) ### Dependencies -- Bump `org.gradle.test-retry` from 1.5.8 to 1.5.9 ([#13442](https://github.com/opensearch-project/OpenSearch/pull/13442)) -- Update to Apache Lucene 9.11.0 ([#14042](https://github.com/opensearch-project/OpenSearch/pull/14042)) -- Bump `netty` from 4.1.110.Final to 4.1.111.Final ([#14356](https://github.com/opensearch-project/OpenSearch/pull/14356)) -- Bump `org.wiremock:wiremock-standalone` from 3.3.1 to 3.6.0 ([#14361](https://github.com/opensearch-project/OpenSearch/pull/14361)) -- Bump `reactor` from 3.5.17 to 3.5.18 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395)) -- Bump `reactor-netty` from 1.1.19 to 1.1.20 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395)) -- Bump `commons-net:commons-net` from 3.10.0 to 3.11.1 ([#14396](https://github.com/opensearch-project/OpenSearch/pull/14396)) -- Bump `com.nimbusds:nimbus-jose-jwt` from 9.37.3 to 9.40 ([#14398](https://github.com/opensearch-project/OpenSearch/pull/14398)) -- Bump `org.apache.commons:commons-configuration2` from 2.10.1 to 2.11.0 ([#14399](https://github.com/opensearch-project/OpenSearch/pull/14399)) -- Bump `com.gradle.develocity` from 3.17.4 to 3.17.5 ([#14397](https://github.com/opensearch-project/OpenSearch/pull/14397)) -- Bump `opentelemetry` from 1.36.0 to 1.39.0 ([#14457](https://github.com/opensearch-project/OpenSearch/pull/14457)) -- Bump `azure-identity` from 1.11.4 to 1.13.0, Bump `msal4j` from 1.14.3 to 1.15.1, Bump `msal4j-persistence-extension` from 1.2.0 to 1.3.0 ([#14506](https://github.com/opensearch-project/OpenSearch/pull/14506)) +- Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) +- Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.16.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861), [#15205](https://github.com/opensearch-project/OpenSearch/pull/15205)) +- OpenJDK Update (July 2024 Patch releases) ([#14998](https://github.com/opensearch-project/OpenSearch/pull/14998)) +- Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.16.2 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995)) +- Bump `actions/github-script` from 6 to 7 ([#14997](https://github.com/opensearch-project/OpenSearch/pull/14997)) +- Bump `org.tukaani:xz` from 1.9 to 1.10 ([#15110](https://github.com/opensearch-project/OpenSearch/pull/15110)) +- Bump `actions/setup-java` from 1 to 4 ([#15104](https://github.com/opensearch-project/OpenSearch/pull/15104)) +- Bump `org.apache.avro:avro` from 1.11.3 to 1.12.0 in /plugins/repository-hdfs ([#15119](https://github.com/opensearch-project/OpenSearch/pull/15119)) +- Bump `org.bouncycastle:bcpg-fips` from 1.0.7.1 to 2.0.9 ([#15103](https://github.com/opensearch-project/OpenSearch/pull/15103), [#15299](https://github.com/opensearch-project/OpenSearch/pull/15299)) +- Bump `com.azure:azure-core` from 1.49.1 to 1.51.0 ([#15111](https://github.com/opensearch-project/OpenSearch/pull/15111)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.10.5 to 1.1.10.6 ([#15207](https://github.com/opensearch-project/OpenSearch/pull/15207)) +- Bump `com.azure:azure-xml` from 1.0.0 to 1.1.0 ([#15206](https://github.com/opensearch-project/OpenSearch/pull/15206)) +- Bump `reactor` from 3.5.19 to 3.5.20 ([#15262](https://github.com/opensearch-project/OpenSearch/pull/15262)) +- Bump `reactor-netty` from 1.1.21 to 1.1.22 ([#15262](https://github.com/opensearch-project/OpenSearch/pull/15262)) +- Bump `org.apache.kerby:kerb-admin` from 2.0.3 to 2.1.0 ([#15301](https://github.com/opensearch-project/OpenSearch/pull/15301)) +- Bump `com.azure:azure-core-http-netty` from 1.15.1 to 1.15.3 ([#15300](https://github.com/opensearch-project/OpenSearch/pull/15300)) +- Bump `com.gradle.develocity` from 3.17.6 to 3.18 ([#15297](https://github.com/opensearch-project/OpenSearch/pull/15297)) +- Bump `commons-cli:commons-cli` from 1.8.0 to 1.9.0 ([#15298](https://github.com/opensearch-project/OpenSearch/pull/15298)) +- Bump `opentelemetry` from 1.40.0 to 1.41.0 ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) +- Bump `opentelemetry-semconv` from 1.26.0-alpha to 1.27.0-alpha ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) ### Changed -- [Tiered Caching] Move query recomputation logic outside write lock ([#14187](https://github.com/opensearch-project/OpenSearch/pull/14187)) -- unsignedLongRangeQuery now returns MatchNoDocsQuery if the lower bounds are greater than the upper bounds ([#14416](https://github.com/opensearch-project/OpenSearch/pull/14416)) -- Updated the `indices.query.bool.max_clause_count` setting from being static to dynamically updateable ([#13568](https://github.com/opensearch-project/OpenSearch/pull/13568)) -- Make the class CommunityIdProcessor final ([#14448](https://github.com/opensearch-project/OpenSearch/pull/14448)) +- Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) +- Optimize regexp-based include/exclude on aggregations when pattern matches prefixes ([#14371](https://github.com/opensearch-project/OpenSearch/pull/14371)) +- Replace and block usages of org.apache.logging.log4j.util.Strings ([#15238](https://github.com/opensearch-project/OpenSearch/pull/15238)) ### Deprecated ### Removed ### Fixed -- Fix bug in SBP cancellation logic ([#13259](https://github.com/opensearch-project/OpenSearch/pull/13474)) -- Fix handling of Short and Byte data types in ScriptProcessor ingest pipeline ([#14379](https://github.com/opensearch-project/OpenSearch/issues/14379)) -- Switch to iterative version of WKT format parser ([#14086](https://github.com/opensearch-project/OpenSearch/pull/14086)) -- Fix the computed max shards of cluster to avoid int overflow ([#14155](https://github.com/opensearch-project/OpenSearch/pull/14155)) -- Fixed rest-high-level client searchTemplate & mtermVectors endpoints to have a leading slash ([#14465](https://github.com/opensearch-project/OpenSearch/pull/14465)) -- Write shard level metadata blob when snapshotting searchable snapshot indexes ([#13190](https://github.com/opensearch-project/OpenSearch/pull/13190)) -- Fix aggs result of NestedAggregator with sub NestedAggregator ([#13324](https://github.com/opensearch-project/OpenSearch/pull/13324)) -- Fix fs info reporting negative available size ([#11573](https://github.com/opensearch-project/OpenSearch/pull/11573)) -- Add ListPitInfo::getKeepAlive() getter ([#14495](https://github.com/opensearch-project/OpenSearch/pull/14495)) -- Fix FuzzyQuery in keyword field will use IndexOrDocValuesQuery when both of index and doc_value are true ([#14378](https://github.com/opensearch-project/OpenSearch/pull/14378)) -- Fix file cache initialization ([#14004](https://github.com/opensearch-project/OpenSearch/pull/14004)) +- Fix constraint bug which allows more primary shards than average primary shards per index ([#14908](https://github.com/opensearch-project/OpenSearch/pull/14908)) +- Fix NPE when bulk ingest with empty pipeline ([#15033](https://github.com/opensearch-project/OpenSearch/pull/15033)) +- Fix missing value of FieldSort for unsigned_long ([#14963](https://github.com/opensearch-project/OpenSearch/pull/14963)) +- Fix delete index template failed when the index template matches a data stream but is unused ([#15080](https://github.com/opensearch-project/OpenSearch/pull/15080)) +- Fix array_index_out_of_bounds_exception when indexing documents with field name containing only dot ([#15126](https://github.com/opensearch-project/OpenSearch/pull/15126)) +- Fixed array field name omission in flat_object function for nested JSON ([#13620](https://github.com/opensearch-project/OpenSearch/pull/13620)) +- Fix range aggregation optimization ignoring top level queries ([#15194](https://github.com/opensearch-project/OpenSearch/pull/15194)) +- Fix incorrect parameter names in MinHash token filter configuration handling ([#15233](https://github.com/opensearch-project/OpenSearch/pull/15233)) +- Fix split response processor not included in allowlist ([#15393](https://github.com/opensearch-project/OpenSearch/pull/15393)) +- Fix unchecked cast in dynamic action map getter ([#15394](https://github.com/opensearch-project/OpenSearch/pull/15394)) ### Security diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 3298ceb15463c..4a8aa9305df74 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -5,21 +5,24 @@ This document contains a list of maintainers in this repo. See [opensearch-proje ## Current Maintainers | Maintainer | GitHub ID | Affiliation | -| ------------------------ | ------------------------------------------------------- | ----------- | +|--------------------------|---------------------------------------------------------|-------------| | Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | | Andrew Ross | [andrross](https://github.com/andrross) | Amazon | | Andriy Redko | [reta](https://github.com/reta) | Aiven | +| Ankit Jain | [jainankitk](https://github.com/jainankitk) | Amazon | | Ashish Singh | [ashking94](https://github.com/ashking94) | Amazon | | Bukhtawar Khan | [Bukhtawar](https://github.com/Bukhtawar) | Amazon | | Charlotte Henkle | [CEHENKLE](https://github.com/CEHENKLE) | Amazon | | Dan Widdis | [dbwiddis](https://github.com/dbwiddis) | Amazon | | Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | +| Gao Binlong | [gaobinlong](https://github.com/gaobinlong) | Amazon | | Gaurav Bafna | [gbbafna](https://github.com/gbbafna) | Amazon | | Jay Deng | [jed326](https://github.com/jed326) | Amazon | | Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | +| Varun Bansal | [linuxpi](https://github.com/linuxpi) | Amazon | | Marc Handalian | [mch2](https://github.com/mch2) | Amazon | | Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | -| Nick Knize | [nknize](https://github.com/nknize) | Amazon | +| Nick Knize | [nknize](https://github.com/nknize) | Lucenia | | Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | | Peter Nied | [peternied](https://github.com/peternied) | Amazon | | Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon | diff --git a/PERFORMANCE_BENCHMARKS.md b/PERFORMANCE_BENCHMARKS.md new file mode 100644 index 0000000000000..252c4ae312136 --- /dev/null +++ b/PERFORMANCE_BENCHMARKS.md @@ -0,0 +1,112 @@ +# README: Running Performance Benchmarks on Pull Requests + +## Overview + +`benchmark-pull-request` GitHub Actions workflow is designed to automatically run performance benchmarks on a pull request when a specific comment is made on the pull request. This ensures that performance benchmarks are consistently and accurately applied to code changes, helping maintain the performance standards of the repository. + +## Workflow Trigger + +The workflow is triggered when a new comment is created on a pull request. Specifically, it checks for the presence of the `"run-benchmark-test"` keyword in the comment body. If this keyword is detected, the workflow proceeds to run the performance benchmarks. + +## Key Steps in the Workflow + +1. **Check Comment Format and Configuration:** + - Validates the format of the comment to ensure it contains the required `"run-benchmark-test"` keyword and is in json format. + - Extracts the benchmark configuration ID from the comment and verifies if it exists in the `benchmark-config.json` file. + - Checks if the extracted configuration ID is supported for the current OpenSearch major version. + +2. **Post Invalid Format Comment:** + - If the comment format is invalid or the configuration ID is not supported, a comment is posted on the pull request indicating the problem, and the workflow fails. + +3. **Manual Approval (if necessary):** + - Fetches the list of approvers from the `.github/CODEOWNERS` file. + - If the commenter is not one of the maintainers, a manual approval request is created. The workflow pauses until an approver approves or denies the benchmark run by commenting appropriate word on the issue. + - The issue for approval request is auto-closed once the approver is done adding appropriate comment + +4. **Build and Assemble OpenSearch:** + - Builds and assembles (x64-linux tar) the OpenSearch distribution from the pull request code changes. + +5. **Upload to S3:** + - Configures AWS credentials and uploads the assembled OpenSearch distribution to an S3 bucket for further use in benchmarking. + - The S3 bucket is fronted by cloudfront to only allow downloads. + - The lifecycle policy on the S3 bucket will delete the uploaded artifacts after 30-days. + +6. **Trigger Jenkins Workflow:** + - Triggers a Jenkins workflow to run the benchmark tests using a webhook token. + +7. **Update Pull Request with Job URL:** + - Posts a comment on the pull request with the URL of the Jenkins job. The final benchmark results will be posted once the job completes. + - To learn about how benchmark job works see https://github.com/opensearch-project/opensearch-build/tree/main/src/test_workflow#benchmarking-tests + +## How to Use This Workflow + +1. **Ensure `benchmark-config.json` is Up-to-Date:** + - The `benchmark-config.json` file should contain valid benchmark configurations with supported major versions and cluster-benchmark configurations. + +2. **Add the Workflow to Your Repository:** + - Save the workflow YAML file (typically named `benchmark.yml`) in the `.github/workflows` directory of your repository. + +3. **Make a Comment to Trigger the Workflow:** + - On any pull request issue, make a comment containing the keyword `"run-benchmark-test"` along with the configuration ID. For example: + ```json + {"run-benchmark-test": "id_1"} + ``` + +4. **Monitor Workflow Progress:** + - The workflow will validate the comment, check for approval (if necessary), build the OpenSearch distribution, and trigger the Jenkins job. + - A comment will be posted on the pull request with the URL of the Jenkins job. You can monitor the progress and final results there as well. + +## Example Comment Format + +To run the benchmark with configuration ID `id_1`, post the following comment on the pull request issue: +```json +{"run-benchmark-test": "id_1"} +``` + +## How to add a new benchmark configuration + +The benchmark-config.json file accepts the following schema. +```json +{ + "id_": { + "description": "Short description of the configuration", + "supported_major_versions": ["2", "3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "Use single node cluster for benchmarking, accepted values are \"true\" or \"false\"", + "MIN_DISTRIBUTION": "Use OpenSearch min distribution, should always be \"true\"", + "MANAGER_NODE_COUNT": "For multi-node cluster tests, number of cluster manager nodes, empty value defaults to 3.", + "DATA_NODE_COUNT": "For multi-node cluster tests, number of data nodes, empty value defaults to 2.", + "DATA_INSTANCE_TYPE": "EC2 instance type for data node, empty defaults to r5.xlarge.", + "DATA_NODE_STORAGE": "Data node ebs block storage size, empty value defaults to 100Gb", + "JVM_SYS_PROPS": "A comma-separated list of key=value pairs that will be added to jvm.options as JVM system properties", + "ADDITIONAL_CONFIG": "Additional space delimited opensearch.yml config parameters. e.g., `search.concurrent_segment_search.enabled:true`", + "TEST_WORKLOAD": "The workload name from OpenSearch Benchmark Workloads. https://github.com/opensearch-project/opensearch-benchmark-workloads. Default is nyc_taxis", + "WORKLOAD_PARAMS": "With this parameter you can inject variables into workloads, e.g.{\"number_of_replicas\":\"0\",\"number_of_shards\":\"3\"}. See https://opensearch.org/docs/latest/benchmark/reference/commands/command-flags/#workload-params", + "EXCLUDE_TASKS": "Defines a comma-separated list of test procedure tasks not to run. e.g. type:search, see https://opensearch.org/docs/latest/benchmark/reference/commands/command-flags/#exclude-tasks", + "INCLUDE_TASKS": "Defines a comma-separated list of test procedure tasks to run. By default, all tasks listed in a test procedure array are run. See https://opensearch.org/docs/latest/benchmark/reference/commands/command-flags/#include-tasks", + "TEST_PROCEDURE": "Defines a test procedure to use. e.g., `append-no-conflicts,significant-text`. Uses default if none provided. See https://opensearch.org/docs/latest/benchmark/reference/commands/command-flags/#test-procedure", + "CAPTURE_NODE_STAT": "Enable opensearch-benchmark node-stats telemetry to capture system level metrics like cpu, jvm etc., see https://opensearch.org/docs/latest/benchmark/reference/telemetry/#node-stats" + }, + "cluster_configuration": { + "size": "Single-Node/Multi-Node", + "data_instance_config": "data-instance-config, e.g., 4vCPU, 32G Mem, 16G Heap" + } + } +} +``` +To add a new test configuration that are suitable to your changes please create a new PR to add desired cluster and benchmark configurations. + +## How to compare my results against baseline? + +Apart from just running benchmarks the user will also be interested in how their change is performing against current OpenSearch distribution with the exactly same cluster and benchmark configurations. +The user can refer to https://s12d.com/basline-dashboards (WIP) to access baseline data for their workload, this data is generated by our nightly benchmark runs on latest build distribution artifacts for 3.0 and 2.x. +In the future, we will add the [compare](https://opensearch.org/docs/latest/benchmark/reference/commands/compare/) feature of opensearch-benchmark to run comparison and publish data on the PR as well. + +## Notes + +- Ensure all required secrets (e.g., `GITHUB_TOKEN`, `UPLOAD_ARCHIVE_ARTIFACT_ROLE`, `ARCHIVE_ARTIFACT_BUCKET_NAME`, `JENKINS_PR_BENCHMARK_GENERIC_WEBHOOK_TOKEN`) are properly set in the repository secrets. +- The `CODEOWNERS` file should list the GitHub usernames of approvers for the benchmark process. + +By following these instructions, repository maintainers can ensure consistent and automated performance benchmarking for all code changes introduced via pull requests. + + diff --git a/README.md b/README.md index 17af2911b9221..5d4a9a671c013 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ [![Security Vulnerabilities](https://img.shields.io/github/issues/opensearch-project/OpenSearch/security%20vulnerability?labelColor=red)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"security%20vulnerability") [![Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/issues) [![Open Pull Requests](https://img.shields.io/github/issues-pr/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/pulls) -[![2.14.1 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.14.1)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.14.1") +[![2.17.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.17.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.17.0") [![3.0.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0") [![GHA gradle check](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml) [![GHA validate pull request](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml) diff --git a/TRIAGING.md b/TRIAGING.md index c7c07a8ce30bd..dddcbc15394ab 100644 --- a/TRIAGING.md +++ b/TRIAGING.md @@ -1,6 +1,6 @@ -The maintainers of the OpenSearch Repo seek to promote an inclusive and engaged community of contributors. In order to facilitate this, weekly triage meetings are open-to-all and attendance is encouraged for anyone who hopes to contribute, discuss an issue, or learn more about the project. There are several weekly triage meetings scoped to the following component areas: Search, Storage, Cluster Manager, and finally "Core" as a catch-all for all other issues. To learn more about contributing to the OpenSearch Repo visit the [Contributing](./CONTRIBUTING.md) documentation. +The maintainers of the OpenSearch Repo seek to promote an inclusive and engaged community of contributors. In order to facilitate this, weekly triage meetings are open-to-all and attendance is encouraged for anyone who hopes to contribute, discuss an issue, or learn more about the project. There are several weekly triage meetings scoped to the following component areas: Search, Storage, and Cluster Manager. To learn more about contributing to the OpenSearch Repo visit the [Contributing](./CONTRIBUTING.md) documentation. ### Do I need to attend for my issue to be addressed/triaged? @@ -14,7 +14,7 @@ Each meeting we seek to address all new issues. However, should we run out of ti ### How do I join a Triage meeting? - Check the [OpenSearch Meetup Group](https://www.meetup.com/opensearch/) for the latest schedule and details for joining each meeting. Each component area has its own meetup series: [Search](https://www.meetup.com/opensearch/events/300929493/), [Storage](https://www.meetup.com/opensearch/events/299907409/), [Cluster Manager](https://www.meetup.com/opensearch/events/301082218/), [Indexing](https://www.meetup.com/opensearch/events/301734024/), and [Core](https://www.meetup.com/opensearch/events/301061009/). + Check the [OpenSearch Meetup Group](https://www.meetup.com/opensearch/) for the latest schedule and details for joining each meeting. Each component area has its own meetup series: [Search](https://www.meetup.com/opensearch/events/300929493/), [Storage](https://www.meetup.com/opensearch/events/299907409/), [Cluster Manager](https://www.meetup.com/opensearch/events/301082218/), and [Indexing](https://www.meetup.com/opensearch/events/301734024/). After joining the virtual meeting, you can enable your video / voice to join the discussion. If you do not have a webcam or microphone available, you can still join in via the text chat. @@ -31,11 +31,11 @@ Meeting structure may vary slightly, but the general structure is as follows: - [Search](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22) - [Indexing](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22%2C) - [Storage](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3AStorage%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22) - - [Cluster Manager](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22) - - [Core](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+-label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22%2C%22Storage%22%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22%2C%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22%2C%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22) + - [Cluster Manager](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22%2C%22ShardManagement%3AResiliency%22%2C%22ShardManagement%3AInsights%22%2C%22ShardManagement%3ASizing%22%2C%22ShardManagement%3APerformance%22%2C%22ShardManagement%3APlacement%22%2C%22ShardManagement%3ARouting%22) + - [Core](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+-label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22%2C%22Storage%22%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22%2C%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22%2C%22ShardManagement%3AResiliency%22%2C%22ShardManagement%3AInsights%22%2C%22ShardManagement%3ASizing%22%2C%22ShardManagement%3APerformance%22%2C%22ShardManagement%3APlacement%22%2C%22ShardManagement%3ARouting%22%2C%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22) 5. **Attendee Requests:** An opportunity for any meeting member to request consideration of an issue or pull request. 6. **Open Discussion:** Attendees can bring up any topics not already covered by filed issues or pull requests. -7. **Review of Old Untriaged Issues:** Time permitting, each meeting will look at all [untriaged issues older than 14 days](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+created%3A%3C2024-05-20) to prevent issues from falling through the cracks (note the GitHub API does not allow for relative times, so the date in this search must be updated every meeting). +7. **Review of Old Untriaged Issues:** Look at all [untriaged issues older than 14 days](https://peternied.github.io/redirect/issue_search.html?owner=opensearch-project&repo=OpenSearch&tag=untriaged&created-since-days=14) to prevent issues from falling through the cracks. ### What is the role of the facilitator? diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/RerouteBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/RerouteBenchmark.java new file mode 100644 index 0000000000000..e54bca579423b --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/RerouteBenchmark.java @@ -0,0 +1,135 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.benchmark.routing.allocation; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.common.logging.LogConfigurator; +import org.opensearch.common.settings.Settings; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; + +@Fork(1) +@Warmup(iterations = 3) +@Measurement(iterations = 3) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Benchmark) +@SuppressWarnings("unused") // invoked by benchmarking framework +public class RerouteBenchmark { + @Param({ + // indices| nodes + " 10000| 500|", }) + public String indicesNodes = "1|1"; + public int numIndices; + public int numNodes; + public int numShards = 10; + public int numReplicas = 1; + + private AllocationService allocationService; + private ClusterState initialClusterState; + + @Setup + public void setUp() throws Exception { + LogConfigurator.setNodeName("test"); + final String[] params = indicesNodes.split("\\|"); + numIndices = toInt(params[0]); + numNodes = toInt(params[1]); + + int totalShardCount = (numReplicas + 1) * numShards * numIndices; + Metadata.Builder mb = Metadata.builder(); + for (int i = 1; i <= numIndices; i++) { + mb.put( + IndexMetadata.builder("test_" + i) + .settings(Settings.builder().put("index.version.created", Version.CURRENT)) + .numberOfShards(numShards) + .numberOfReplicas(numReplicas) + ); + } + + Metadata metadata = mb.build(); + RoutingTable.Builder rb = RoutingTable.builder(); + for (int i = 1; i <= numIndices; i++) { + rb.addAsNew(metadata.index("test_" + i)); + } + RoutingTable routingTable = rb.build(); + initialClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(setUpClusterNodes(numNodes)) + .build(); + } + + @Benchmark + public ClusterState measureShardAllocationEmptyCluster() throws Exception { + ClusterState clusterState = initialClusterState; + allocationService = Allocators.createAllocationService( + Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.load_awareness.provisioned_capacity", numNodes) + .put("cluster.routing.allocation.load_awareness.skew_factor", "50") + .put("cluster.routing.allocation.node_concurrent_recoveries", "2") + .build() + ); + clusterState = allocationService.reroute(clusterState, "reroute"); + while (clusterState.getRoutingNodes().hasUnassignedShards()) { + clusterState = startInitializingShardsAndReroute(allocationService, clusterState); + } + return clusterState; + } + + private int toInt(String v) { + return Integer.valueOf(v.trim()); + } + + private DiscoveryNodes.Builder setUpClusterNodes(int nodes) { + DiscoveryNodes.Builder nb = DiscoveryNodes.builder(); + for (int i = 1; i <= nodes; i++) { + Map attributes = new HashMap<>(); + attributes.put("zone", "zone_" + (i % 3)); + nb.add(Allocators.newNode("node_0_" + i, attributes)); + } + return nb; + } + + private static ClusterState startInitializingShardsAndReroute(AllocationService allocationService, ClusterState clusterState) { + return startShardsAndReroute(allocationService, clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + } + + private static ClusterState startShardsAndReroute( + AllocationService allocationService, + ClusterState clusterState, + List initializingShards + ) { + return allocationService.reroute(allocationService.applyStartedShards(clusterState, initializingShards), "reroute after starting"); + } +} diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index 6892af1b17f97..0502280cb69ad 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -158,7 +158,17 @@ private static List resolveArchiveProjects(File checkoutDir projects.addAll(asList("deb", "rpm")); if (bwcVersion.onOrAfter("7.0.0")) { // starting with 7.0 we bundle a jdk which means we have platform-specific archives - projects.addAll(asList("darwin-tar", "linux-tar", "windows-zip")); + projects.addAll( + asList( + "darwin-tar", + "darwin-arm64-tar", + "linux-tar", + "linux-arm64-tar", + "linux-ppc64le-tar", + "linux-s390x-tar", + "windows-zip" + ) + ); } else { // prior to 7.0 we published only a single zip and tar archives projects.addAll(asList("zip", "tar")); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index b2b3e3003e572..8d5ce9143cbac 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -77,9 +77,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin { - private static final String SYSTEM_JDK_VERSION = "21.0.3+9"; + private static final String SYSTEM_JDK_VERSION = "21.0.4+7"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "21.0.3+9"; + private static final String GRADLE_JDK_VERSION = "21.0.4+7"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java index fcadf35593ce6..9396797536052 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java @@ -81,50 +81,52 @@ public void apply(Project project) { // tests Configuration testConfig = project.getConfigurations().create("restTestConfig"); project.getConfigurations().create("restTests"); + + if (BuildParams.isInternal()) { + // core + Dependency restTestdependency = project.getDependencies().project(new HashMap() { + { + put("path", ":rest-api-spec"); + put("configuration", "restTests"); + } + }); + testConfig.withDependencies(s -> s.add(restTestdependency)); + } else { + Dependency dependency = project.getDependencies().create("org.opensearch:rest-api-spec:" + VersionProperties.getOpenSearch()); + testConfig.withDependencies(s -> s.add(dependency)); + } + Provider copyRestYamlTestTask = project.getTasks() .register("copyYamlTestsTask", CopyRestTestsTask.class, task -> { task.includeCore.set(extension.restTests.getIncludeCore()); task.coreConfig = testConfig; task.sourceSetName = SourceSet.TEST_SOURCE_SET_NAME; - if (BuildParams.isInternal()) { - // core - Dependency restTestdependency = project.getDependencies().project(new HashMap() { - { - put("path", ":rest-api-spec"); - put("configuration", "restTests"); - } - }); - project.getDependencies().add(task.coreConfig.getName(), restTestdependency); - } else { - Dependency dependency = project.getDependencies() - .create("org.opensearch:rest-api-spec:" + VersionProperties.getOpenSearch()); - project.getDependencies().add(task.coreConfig.getName(), dependency); - } task.dependsOn(task.coreConfig); }); // api Configuration specConfig = project.getConfigurations().create("restSpec"); // name chosen for passivity project.getConfigurations().create("restSpecs"); + + if (BuildParams.isInternal()) { + Dependency restSpecDependency = project.getDependencies().project(new HashMap() { + { + put("path", ":rest-api-spec"); + put("configuration", "restSpecs"); + } + }); + specConfig.withDependencies(s -> s.add(restSpecDependency)); + } else { + Dependency dependency = project.getDependencies().create("org.opensearch:rest-api-spec:" + VersionProperties.getOpenSearch()); + specConfig.withDependencies(s -> s.add(dependency)); + } + Provider copyRestYamlSpecTask = project.getTasks() .register("copyRestApiSpecsTask", CopyRestApiTask.class, task -> { task.includeCore.set(extension.restApi.getIncludeCore()); task.dependsOn(copyRestYamlTestTask); task.coreConfig = specConfig; task.sourceSetName = SourceSet.TEST_SOURCE_SET_NAME; - if (BuildParams.isInternal()) { - Dependency restSpecDependency = project.getDependencies().project(new HashMap() { - { - put("path", ":rest-api-spec"); - put("configuration", "restSpecs"); - } - }); - project.getDependencies().add(task.coreConfig.getName(), restSpecDependency); - } else { - Dependency dependency = project.getDependencies() - .create("org.opensearch:rest-api-spec:" + VersionProperties.getOpenSearch()); - project.getDependencies().add(task.coreConfig.getName(), dependency); - } task.dependsOn(task.coreConfig); }); diff --git a/buildSrc/src/main/resources/forbidden/opensearch-all-signatures.txt b/buildSrc/src/main/resources/forbidden/opensearch-all-signatures.txt index f9f24fd1e2367..199e206450178 100644 --- a/buildSrc/src/main/resources/forbidden/opensearch-all-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/opensearch-all-signatures.txt @@ -17,6 +17,9 @@ java.nio.file.Paths @ Use org.opensearch.common.io.PathUtils.get() instead. java.nio.file.FileSystems#getDefault() @ use org.opensearch.common.io.PathUtils.getDefaultFileSystem() instead. +joptsimple.internal.Strings @ use org.opensearch.core.common.Strings instead. +org.apache.logging.log4j.util.Strings @ use org.opensearch.core.common.Strings instead. + java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.opensearch.env.Environment.getFileStore() instead, impacted by JDK-8034057 java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.opensearch.env.Environment.isWritable() instead, impacted by JDK-8034057 diff --git a/buildSrc/src/main/resources/minimumCompilerVersion b/buildSrc/src/main/resources/minimumCompilerVersion index 8351c19397f4f..98d9bcb75a685 100644 --- a/buildSrc/src/main/resources/minimumCompilerVersion +++ b/buildSrc/src/main/resources/minimumCompilerVersion @@ -1 +1 @@ -14 +17 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index e9aa32ea9a4f5..ccec8e2891a65 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,14 +1,14 @@ opensearch = 3.0.0 -lucene = 9.12.0-snapshot-c896995 +lucene = 9.12.0-snapshot-847316d bundled_jdk_vendor = adoptium -bundled_jdk = 21.0.3+9 +bundled_jdk = 21.0.4+7 # optional dependencies spatial4j = 0.7 jts = 1.15.0 -jackson = 2.17.1 -jackson_databind = 2.17.1 +jackson = 2.17.2 +jackson_databind = 2.17.2 snakeyaml = 2.1 icu4j = 70.1 supercsv = 2.4.0 @@ -29,16 +29,16 @@ hdrhistogram = 2.2.2 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.111.Final +netty = 4.1.112.Final joda = 2.12.7 # project reactor -reactor_netty = 1.1.20 -reactor = 3.5.18 +reactor_netty = 1.1.22 +reactor = 3.5.20 # client dependencies -httpclient5 = 5.2.1 -httpcore5 = 5.2.2 +httpclient5 = 5.2.3 +httpcore5 = 5.2.5 httpclient = 4.5.14 httpcore = 4.4.16 httpasyncclient = 4.1.5 @@ -74,5 +74,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.39.0 -opentelemetrysemconv = 1.25.0-alpha +opentelemetry = 1.41.0 +opentelemetrysemconv = 1.27.0-alpha diff --git a/client/rest/build.gradle b/client/rest/build.gradle index f18df65dfddfa..93faf0024b51e 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -47,10 +47,15 @@ dependencies { api "org.apache.httpcomponents.client5:httpclient5:${versions.httpclient5}" api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" api "org.apache.httpcomponents.core5:httpcore5-h2:${versions.httpcore5}" + api "org.apache.httpcomponents.core5:httpcore5-reactive:${versions.httpcore5}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" api "org.slf4j:slf4j-api:${versions.slf4j}" + // reactor + api "io.projectreactor:reactor-core:${versions.reactor}" + api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" + testImplementation project(":client:test") testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testImplementation "junit:junit:${versions.junit}" @@ -93,22 +98,52 @@ testingConventions { } } -thirdPartyAudit.ignoreMissingClasses( - 'org.conscrypt.Conscrypt', - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', - //commons-logging optional dependencies - 'org.apache.avalon.framework.logger.Logger', - 'org.apache.log.Hierarchy', - 'org.apache.log.Logger', - 'org.apache.log4j.Level', - 'org.apache.log4j.Logger', - 'org.apache.log4j.Priority', - //commons-logging provided dependencies - 'javax.servlet.ServletContextEvent', - 'javax.servlet.ServletContextListener' -) +thirdPartyAudit { + ignoreMissingClasses( + 'org.conscrypt.Conscrypt', + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', + //commons-logging optional dependencies + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + 'org.apache.log4j.Priority', + //commons-logging provided dependencies + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener', + 'io.micrometer.context.ContextAccessor', + 'io.micrometer.context.ContextRegistry', + 'io.micrometer.context.ContextSnapshot', + 'io.micrometer.context.ContextSnapshot$Scope', + 'io.micrometer.context.ContextSnapshotFactory', + 'io.micrometer.context.ContextSnapshotFactory$Builder', + 'io.micrometer.context.ThreadLocalAccessor', + 'io.micrometer.core.instrument.Clock', + 'io.micrometer.core.instrument.Counter', + 'io.micrometer.core.instrument.Counter$Builder', + 'io.micrometer.core.instrument.DistributionSummary', + 'io.micrometer.core.instrument.DistributionSummary$Builder', + 'io.micrometer.core.instrument.Meter', + 'io.micrometer.core.instrument.MeterRegistry', + 'io.micrometer.core.instrument.Metrics', + 'io.micrometer.core.instrument.Tag', + 'io.micrometer.core.instrument.Tags', + 'io.micrometer.core.instrument.Timer', + 'io.micrometer.core.instrument.Timer$Builder', + 'io.micrometer.core.instrument.Timer$Sample', + 'io.micrometer.core.instrument.binder.jvm.ExecutorServiceMetrics', + 'io.micrometer.core.instrument.composite.CompositeMeterRegistry', + 'io.micrometer.core.instrument.search.Search', + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration' + ) + ignoreViolations( + 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException' + ) +} tasks.withType(JavaCompile) { // Suppressing '[options] target value 8 is obsolete and will be removed in a future release' diff --git a/client/rest/licenses/httpclient5-5.2.1.jar.sha1 b/client/rest/licenses/httpclient5-5.2.1.jar.sha1 deleted file mode 100644 index 3555fe22f8e12..0000000000000 --- a/client/rest/licenses/httpclient5-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0c900514d3446d9ce5d9dbd90c21192048125440 \ No newline at end of file diff --git a/client/rest/licenses/httpclient5-5.2.3.jar.sha1 b/client/rest/licenses/httpclient5-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..43e233e72001a --- /dev/null +++ b/client/rest/licenses/httpclient5-5.2.3.jar.sha1 @@ -0,0 +1 @@ +5d753a99d299756998a08c488f2efdf9cf26198e \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.2.2.jar.sha1 b/client/rest/licenses/httpcore5-5.2.2.jar.sha1 deleted file mode 100644 index b641256c7d4a4..0000000000000 --- a/client/rest/licenses/httpcore5-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6da28f5aa6c2b129ef49632e041a5203ce7507b2 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..ca97e8612ea39 --- /dev/null +++ b/client/rest/licenses/httpcore5-5.2.5.jar.sha1 @@ -0,0 +1 @@ +dab1e18842971a45ca8942491ce005ab86a028d7 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 deleted file mode 100644 index 94bc0fa49bdb0..0000000000000 --- a/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -54ee1ed58fe8ac40be1083ea9873a6c734939ab9 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..bb40fe65854f6 --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 @@ -0,0 +1 @@ +09425df4d1365cee86a8e031a036bdca4343da4b \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..ab9241fc93d45 --- /dev/null +++ b/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 @@ -0,0 +1 @@ +f68949965075b957c12b4c1ef89fd4bab2a0fdb1 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-reactive-LICENSE.txt b/client/rest/licenses/httpcore5-reactive-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client/rest/licenses/httpcore5-reactive-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/rest/licenses/httpcore5-reactive-NOTICE.txt b/client/rest/licenses/httpcore5-reactive-NOTICE.txt new file mode 100644 index 0000000000000..fcf14beb5c1ec --- /dev/null +++ b/client/rest/licenses/httpcore5-reactive-NOTICE.txt @@ -0,0 +1,8 @@ + +Apache HttpComponents Core Reactive Extensions +Copyright 2005-2021 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + diff --git a/client/rest/licenses/reactive-streams-1.0.4.jar.sha1 b/client/rest/licenses/reactive-streams-1.0.4.jar.sha1 new file mode 100644 index 0000000000000..45a80e3f7e361 --- /dev/null +++ b/client/rest/licenses/reactive-streams-1.0.4.jar.sha1 @@ -0,0 +1 @@ +3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/client/rest/licenses/reactive-streams-LICENSE.txt b/client/rest/licenses/reactive-streams-LICENSE.txt new file mode 100644 index 0000000000000..1e3c7e7c77495 --- /dev/null +++ b/client/rest/licenses/reactive-streams-LICENSE.txt @@ -0,0 +1,21 @@ +MIT No Attribution + +Copyright 2014 Reactive Streams + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/client/rest/licenses/reactive-streams-NOTICE.txt b/client/rest/licenses/reactive-streams-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/client/rest/licenses/reactor-core-3.5.20.jar.sha1 b/client/rest/licenses/reactor-core-3.5.20.jar.sha1 new file mode 100644 index 0000000000000..0c80be89f66c8 --- /dev/null +++ b/client/rest/licenses/reactor-core-3.5.20.jar.sha1 @@ -0,0 +1 @@ +1fc0f91e2b93778a974339d2c24363d7f34f90b4 \ No newline at end of file diff --git a/client/rest/licenses/reactor-core-LICENSE.txt b/client/rest/licenses/reactor-core-LICENSE.txt new file mode 100644 index 0000000000000..e5583c184e67a --- /dev/null +++ b/client/rest/licenses/reactor-core-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/client/rest/licenses/reactor-core-NOTICE.txt b/client/rest/licenses/reactor-core-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/client/rest/src/main/java/org/opensearch/client/Cancellable.java b/client/rest/src/main/java/org/opensearch/client/Cancellable.java index 56e31a3742f35..d087c60927e3e 100644 --- a/client/rest/src/main/java/org/opensearch/client/Cancellable.java +++ b/client/rest/src/main/java/org/opensearch/client/Cancellable.java @@ -34,6 +34,8 @@ import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; import org.apache.hc.core5.concurrent.CancellableDependency; +import java.io.IOException; +import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; /** @@ -77,7 +79,7 @@ public synchronized boolean cancel() { } /** - * Executes some arbitrary code iff the on-going request has not been cancelled, otherwise throws {@link CancellationException}. + * Executes some arbitrary code if the on-going request has not been cancelled, otherwise throws {@link CancellationException}. * This is needed to guarantee that cancelling a request works correctly even in case {@link #cancel()} is called between different * attempts of the same request. The low-level client reuses the same instance of the {@link CancellableDependency} by calling * {@link HttpUriRequestBase#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, @@ -95,6 +97,31 @@ synchronized void runIfNotCancelled(Runnable runnable) { runnable.run(); } + /** + * Executes some arbitrary code if the on-going request has not been cancelled, otherwise throws {@link CancellationException}. + * This is needed to guarantee that cancelling a request works correctly even in case {@link #cancel()} is called between different + * attempts of the same request. The low-level client reuses the same instance of the {@link CancellableDependency} by calling + * {@link HttpUriRequestBase#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, + * and we need to handle the case where it gets called while there is no request being executed as one attempt may have failed and + * the subsequent attempt has not been started yet. + * If the request has already been cancelled we don't go ahead with the next attempt, and artificially raise the + * {@link CancellationException}, otherwise we run the provided {@link Runnable} which will reset the request and send the next attempt. + * Note that this method must be synchronized as well as the {@link #cancel()} method, to prevent a request from being cancelled + * when there is no future to cancel, which would make cancelling the request a no-op. + */ + synchronized T callIfNotCancelled(Callable callable) throws IOException { + if (this.httpRequest.isCancelled()) { + throw newCancellationException(); + } + try { + return callable.call(); + } catch (final IOException ex) { + throw ex; + } catch (final Exception ex) { + throw new IOException(ex); + } + } + static CancellationException newCancellationException() { return new CancellationException("request was cancelled"); } diff --git a/client/rest/src/main/java/org/opensearch/client/Response.java b/client/rest/src/main/java/org/opensearch/client/Response.java index b062d937ed630..cb92e33e49156 100644 --- a/client/rest/src/main/java/org/opensearch/client/Response.java +++ b/client/rest/src/main/java/org/opensearch/client/Response.java @@ -40,11 +40,8 @@ import org.apache.hc.core5.http.message.RequestLine; import org.apache.hc.core5.http.message.StatusLine; -import java.util.ArrayList; import java.util.List; import java.util.Objects; -import java.util.regex.Matcher; -import java.util.regex.Pattern; /** * Holds an opensearch response. It wraps the {@link HttpResponse} returned and associates it with @@ -116,79 +113,11 @@ public HttpEntity getEntity() { return response.getEntity(); } - /** - * Optimized regular expression to test if a string matches the RFC 1123 date - * format (with quotes and leading space). Start/end of line characters and - * atomic groups are used to prevent backtracking. - */ - private static final Pattern WARNING_HEADER_DATE_PATTERN = Pattern.compile("^ " + // start of line, leading space - // quoted RFC 1123 date format - "\"" + // opening quote - "(?>Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // day of week, atomic group to prevent backtracking - "\\d{2} " + // 2-digit day - "(?>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) " + // month, atomic group to prevent backtracking - "\\d{4} " + // 4-digit year - "\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second) - "GMT" + // GMT - "\"$"); // closing quote (optional, since an older version can still send a warn-date), end of line - - /** - * Length of RFC 1123 format (with quotes and leading space), used in - * matchWarningHeaderPatternByPrefix(String). - */ - private static final int WARNING_HEADER_DATE_LENGTH = 0 + 1 + 1 + 3 + 1 + 1 + 2 + 1 + 3 + 1 + 4 + 1 + 2 + 1 + 2 + 1 + 2 + 1 + 3 + 1; - - /** - * Tests if a string matches the RFC 7234 specification for warning headers. - * This assumes that the warn code is always 299 and the warn agent is always - * OpenSearch. - * - * @param s the value of a warning header formatted according to RFC 7234 - * @return {@code true} if the input string matches the specification - */ - private static boolean matchWarningHeaderPatternByPrefix(final String s) { - return s.startsWith("299 OpenSearch-"); - } - - /** - * Refer to org.opensearch.common.logging.DeprecationLogger - */ - private static String extractWarningValueFromWarningHeader(final String s) { - String warningHeader = s; - - /* - * The following block tests for the existence of a RFC 1123 date in the warning header. If the date exists, it is removed for - * extractWarningValueFromWarningHeader(String) to work properly (as it does not handle dates). - */ - if (s.length() > WARNING_HEADER_DATE_LENGTH) { - final String possibleDateString = s.substring(s.length() - WARNING_HEADER_DATE_LENGTH); - final Matcher matcher = WARNING_HEADER_DATE_PATTERN.matcher(possibleDateString); - - if (matcher.matches()) { - warningHeader = warningHeader.substring(0, s.length() - WARNING_HEADER_DATE_LENGTH); - } - } - - final int firstQuote = warningHeader.indexOf('\"'); - final int lastQuote = warningHeader.length() - 1; - final String warningValue = warningHeader.substring(firstQuote + 1, lastQuote); - return warningValue; - } - /** * Returns a list of all warning headers returned in the response. */ public List getWarnings() { - List warnings = new ArrayList<>(); - for (Header header : response.getHeaders("Warning")) { - String warning = header.getValue(); - if (matchWarningHeaderPatternByPrefix(warning)) { - warnings.add(extractWarningValueFromWarningHeader(warning)); - } else { - warnings.add(warning); - } - } - return warnings; + return ResponseWarningsExtractor.getWarnings(response); } /** diff --git a/client/rest/src/main/java/org/opensearch/client/ResponseWarningsExtractor.java b/client/rest/src/main/java/org/opensearch/client/ResponseWarningsExtractor.java new file mode 100644 index 0000000000000..441daff4f3af4 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/ResponseWarningsExtractor.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpResponse; + +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +final class ResponseWarningsExtractor { + + /** + * Optimized regular expression to test if a string matches the RFC 1123 date + * format (with quotes and leading space). Start/end of line characters and + * atomic groups are used to prevent backtracking. + */ + private static final Pattern WARNING_HEADER_DATE_PATTERN = Pattern.compile("^ " + // start of line, leading space + // quoted RFC 1123 date format + "\"" + // opening quote + "(?>Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // day of week, atomic group to prevent backtracking + "\\d{2} " + // 2-digit day + "(?>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) " + // month, atomic group to prevent backtracking + "\\d{4} " + // 4-digit year + "\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second) + "GMT" + // GMT + "\"$"); // closing quote (optional, since an older version can still send a warn-date), end of line + + /** + * Length of RFC 1123 format (with quotes and leading space), used in + * matchWarningHeaderPatternByPrefix(String). + */ + private static final int WARNING_HEADER_DATE_LENGTH = 0 + 1 + 1 + 3 + 1 + 1 + 2 + 1 + 3 + 1 + 4 + 1 + 2 + 1 + 2 + 1 + 2 + 1 + 3 + 1; + + private ResponseWarningsExtractor() {} + + /** + * Returns a list of all warning headers returned in the response. + * @param response HTTP response + */ + static List getWarnings(final HttpResponse response) { + List warnings = new ArrayList<>(); + for (Header header : response.getHeaders("Warning")) { + String warning = header.getValue(); + if (matchWarningHeaderPatternByPrefix(warning)) { + warnings.add(extractWarningValueFromWarningHeader(warning)); + } else { + warnings.add(warning); + } + } + return warnings; + } + + /** + * Tests if a string matches the RFC 7234 specification for warning headers. + * This assumes that the warn code is always 299 and the warn agent is always + * OpenSearch. + * + * @param s the value of a warning header formatted according to RFC 7234 + * @return {@code true} if the input string matches the specification + */ + private static boolean matchWarningHeaderPatternByPrefix(final String s) { + return s.startsWith("299 OpenSearch-"); + } + + /** + * Refer to org.opensearch.common.logging.DeprecationLogger + */ + private static String extractWarningValueFromWarningHeader(final String s) { + String warningHeader = s; + + /* + * The following block tests for the existence of a RFC 1123 date in the warning header. If the date exists, it is removed for + * extractWarningValueFromWarningHeader(String) to work properly (as it does not handle dates). + */ + if (s.length() > WARNING_HEADER_DATE_LENGTH) { + final String possibleDateString = s.substring(s.length() - WARNING_HEADER_DATE_LENGTH); + final Matcher matcher = WARNING_HEADER_DATE_PATTERN.matcher(possibleDateString); + + if (matcher.matches()) { + warningHeader = warningHeader.substring(0, s.length() - WARNING_HEADER_DATE_LENGTH); + } + } + + final int firstQuote = warningHeader.indexOf('\"'); + final int lastQuote = warningHeader.length() - 1; + final String warningValue = warningHeader.substring(firstQuote + 1, lastQuote); + return warningValue; + } + +} diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java index 15905add76c4f..5c87e3fda5701 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClient.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java @@ -62,14 +62,19 @@ import org.apache.hc.core5.http.HttpEntity; import org.apache.hc.core5.http.HttpHost; import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.Message; import org.apache.hc.core5.http.io.entity.HttpEntityWrapper; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; import org.apache.hc.core5.http.message.RequestLine; import org.apache.hc.core5.http.nio.AsyncRequestProducer; import org.apache.hc.core5.http.nio.AsyncResponseConsumer; import org.apache.hc.core5.net.URIBuilder; +import org.apache.hc.core5.reactive.ReactiveResponseConsumer; import org.apache.hc.core5.reactor.IOReactorStatus; import org.apache.hc.core5.util.Args; import org.opensearch.client.http.HttpUriRequestProducer; +import org.opensearch.client.http.ReactiveHttpUriRequestProducer; import javax.net.ssl.SSLHandshakeException; @@ -83,6 +88,7 @@ import java.net.SocketTimeoutException; import java.net.URI; import java.net.URISyntaxException; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; @@ -98,6 +104,7 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CancellationException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; @@ -106,6 +113,10 @@ import java.util.stream.Collectors; import java.util.zip.GZIPOutputStream; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Mono; +import reactor.core.publisher.MonoSink; + import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Collections.singletonList; @@ -300,6 +311,23 @@ public boolean isRunning() { return client.getStatus() == IOReactorStatus.ACTIVE; } + /** + * Sends a streaming request to the OpenSearch cluster that the client points to and returns streaming response. This is an experimental API. + * @param request streaming request + * @return streaming response + * @throws IOException IOException + */ + public StreamingResponse streamRequest(StreamingRequest request) throws IOException { + final InternalStreamingRequest internalRequest = new InternalStreamingRequest(request); + + final StreamingResponse response = new StreamingResponse<>( + new RequestLine(internalRequest.httpRequest), + streamRequest(nextNodes(), internalRequest) + ); + + return response; + } + /** * Sends a request to the OpenSearch cluster that the client points to. * Blocks until the request is completed and returns its response or fails @@ -332,13 +360,13 @@ public Response performRequest(Request request) throws IOException { private Response performRequest(final NodeTuple> nodeTuple, final InternalRequest request, Exception previousException) throws IOException { - RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); + RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); ClassicHttpResponse httpResponse; try { - httpResponse = client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, null).get(); + httpResponse = client.execute(context.requestProducer(), context.asyncResponseConsumer(), context.context(), null).get(); } catch (Exception e) { - RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, e); - onFailure(context.node); + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node(), e); + onFailure(context.node()); Exception cause = extractAndWrapCause(e); addSuppressedException(previousException, cause); if (nodeTuple.nodes.hasNext()) { @@ -352,7 +380,7 @@ private Response performRequest(final NodeTuple> nodeTuple, final } throw new IllegalStateException("unexpected exception type: must be either RuntimeException or IOException", cause); } - ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); + ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node(), httpResponse); if (responseOrResponseException.responseException == null) { return responseOrResponseException.response; } @@ -363,6 +391,46 @@ private Response performRequest(final NodeTuple> nodeTuple, final throw responseOrResponseException.responseException; } + private Publisher>> streamRequest( + final NodeTuple> nodeTuple, + final InternalStreamingRequest request + ) throws IOException { + return request.cancellable.callIfNotCancelled(() -> { + final Node node = nodeTuple.nodes.next(); + + final Mono>> publisher = Mono.create(emitter -> { + final RequestContext context = request.createContextForNextAttempt(node, nodeTuple.authCache, emitter); + final Future future = client.execute( + context.requestProducer(), + context.asyncResponseConsumer(), + context.context(), + null + ); + + if (future instanceof org.apache.hc.core5.concurrent.Cancellable) { + request.httpRequest.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); + } + }); + + return publisher.flatMap(message -> { + try { + final ResponseOrResponseException responseOrResponseException = convertResponse(request, node, message); + if (responseOrResponseException.responseException == null) { + return Mono.just(message); + } else { + if (nodeTuple.nodes.hasNext()) { + return Mono.from(streamRequest(nodeTuple, request)); + } else { + return Mono.error(responseOrResponseException.responseException); + } + } + } catch (final Exception ex) { + return Mono.error(ex); + } + }); + }); + } + private ResponseOrResponseException convertResponse(InternalRequest request, Node node, ClassicHttpResponse httpResponse) throws IOException { RequestLogger.logResponse(logger, request.httpRequest, node.getHost(), httpResponse); @@ -393,6 +461,40 @@ private ResponseOrResponseException convertResponse(InternalRequest request, Nod throw responseException; } + private ResponseOrResponseException convertResponse( + InternalStreamingRequest request, + Node node, + Message> message + ) throws IOException { + + // Streaming Response could accumulate a lot of data so we may not be able to fully consume it. + final ClassicHttpResponse httpResponse = new BasicClassicHttpResponse( + message.getHead().getCode(), + message.getHead().getReasonPhrase() + ); + final Response response = new Response(new RequestLine(request.httpRequest), node.getHost(), httpResponse); + + RequestLogger.logResponse(logger, request.httpRequest, node.getHost(), httpResponse); + int statusCode = httpResponse.getCode(); + + if (isSuccessfulResponse(statusCode) || request.ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) { + onResponse(node); + if (request.warningsHandler.warningsShouldFailRequest(response.getWarnings())) { + throw new WarningFailureException(response); + } + return new ResponseOrResponseException(response); + } + ResponseException responseException = new ResponseException(response); + if (isRetryStatus(statusCode)) { + // mark host dead and retry against next one + onFailure(node); + return new ResponseOrResponseException(responseException); + } + // mark host alive and don't retry, as the error should be a request problem + onResponse(node); + throw responseException; + } + /** * Sends a request to the OpenSearch cluster that the client points to. * The request is executed asynchronously and the provided @@ -427,16 +529,23 @@ private void performRequestAsync( final FailureTrackingResponseListener listener ) { request.cancellable.runIfNotCancelled(() -> { - final RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); + final RequestContext context = request.createContextForNextAttempt( + nodeTuple.nodes.next(), + nodeTuple.authCache + ); Future future = client.execute( - context.requestProducer, - context.asyncResponseConsumer, - context.context, + context.requestProducer(), + context.asyncResponseConsumer(), + context.context(), new FutureCallback() { @Override public void completed(ClassicHttpResponse httpResponse) { try { - ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); + ResponseOrResponseException responseOrResponseException = convertResponse( + request, + context.node(), + httpResponse + ); if (responseOrResponseException.responseException == null) { listener.onSuccess(responseOrResponseException.response); } else { @@ -455,8 +564,8 @@ public void completed(ClassicHttpResponse httpResponse) { @Override public void failed(Exception failure) { try { - RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure); - onFailure(context.node); + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node(), failure); + onFailure(context.node()); if (nodeTuple.nodes.hasNext()) { listener.trackFailure(failure); performRequestAsync(nodeTuple, request, listener); @@ -822,6 +931,66 @@ public void remove() { } } + private class InternalStreamingRequest { + private final StreamingRequest request; + private final Set ignoreErrorCodes; + private final HttpUriRequestBase httpRequest; + private final Cancellable cancellable; + private final WarningsHandler warningsHandler; + + InternalStreamingRequest(StreamingRequest request) { + this.request = request; + Map params = new HashMap<>(request.getParameters()); + // ignore is a special parameter supported by the clients, shouldn't be sent to es + String ignoreString = params.remove("ignore"); + this.ignoreErrorCodes = getIgnoreErrorCodes(ignoreString, request.getMethod()); + URI uri = buildUri(pathPrefix, request.getEndpoint(), params); + this.httpRequest = createHttpRequest(request.getMethod(), uri, null); + this.cancellable = Cancellable.fromRequest(httpRequest); + setHeaders(httpRequest, request.getOptions().getHeaders()); + setRequestConfig(httpRequest, request.getOptions().getRequestConfig()); + this.warningsHandler = request.getOptions().getWarningsHandler() == null + ? RestClient.this.warningsHandler + : request.getOptions().getWarningsHandler(); + } + + private void setHeaders(HttpRequest httpRequest, Collection
requestHeaders) { + // request headers override default headers, so we don't add default headers if they exist as request headers + final Set requestNames = new HashSet<>(requestHeaders.size()); + for (Header requestHeader : requestHeaders) { + httpRequest.addHeader(requestHeader); + requestNames.add(requestHeader.getName()); + } + for (Header defaultHeader : defaultHeaders) { + if (requestNames.contains(defaultHeader.getName()) == false) { + httpRequest.addHeader(defaultHeader); + } + } + if (compressionEnabled) { + httpRequest.addHeader("Accept-Encoding", "gzip"); + } + } + + private void setRequestConfig(HttpUriRequestBase httpRequest, RequestConfig requestConfig) { + if (requestConfig != null) { + httpRequest.setConfig(requestConfig); + } + } + + public Publisher getPublisher() { + return request.getBody(); + } + + RequestContext createContextForNextAttempt( + Node node, + AuthCache authCache, + MonoSink>> emitter + ) { + this.httpRequest.reset(); + return new ReactiveRequestContext(this, node, authCache, emitter); + } + } + private class InternalRequest { private final Request request; private final Set ignoreErrorCodes; @@ -868,12 +1037,22 @@ private void setRequestConfig(HttpUriRequestBase httpRequest, RequestConfig requ } } - RequestContext createContextForNextAttempt(Node node, AuthCache authCache) { + RequestContext createContextForNextAttempt(Node node, AuthCache authCache) { this.httpRequest.reset(); - return new RequestContext(this, node, authCache); + return new AsyncRequestContext(this, node, authCache); } } + private interface RequestContext { + Node node(); + + AsyncRequestProducer requestProducer(); + + AsyncResponseConsumer asyncResponseConsumer(); + + HttpClientContext context(); + } + /** * The Apache HttpClient 5 adds "Authorization" header even if the credentials for basic authentication are not provided * (effectively, username and password are 'null'). To workaround that, wrapping the AuthCache around current HttpClientContext @@ -934,13 +1113,73 @@ public void clear() { } - private static class RequestContext { + private static class ReactiveRequestContext implements RequestContext { + private final Node node; + private final AsyncRequestProducer requestProducer; + private final AsyncResponseConsumer asyncResponseConsumer; + private final HttpClientContext context; + + ReactiveRequestContext( + InternalStreamingRequest request, + Node node, + AuthCache authCache, + MonoSink>> emitter + ) { + this.node = node; + // we stream the request body if the entity allows for it + this.requestProducer = ReactiveHttpUriRequestProducer.create(request.httpRequest, node.getHost(), request.getPublisher()); + this.asyncResponseConsumer = new ReactiveResponseConsumer(new FutureCallback>>() { + @Override + public void failed(Exception ex) { + emitter.error(ex); + } + + @Override + public void completed(Message> result) { + if (result == null) { + emitter.success(); + } else { + emitter.success(result); + } + } + + @Override + public void cancelled() { + failed(new CancellationException("Future cancelled")); + } + }); + this.context = HttpClientContext.create(); + context.setAuthCache(new WrappingAuthCache(context, authCache)); + } + + @Override + public AsyncResponseConsumer asyncResponseConsumer() { + return asyncResponseConsumer; + } + + @Override + public HttpClientContext context() { + return context; + } + + @Override + public Node node() { + return node; + } + + @Override + public AsyncRequestProducer requestProducer() { + return requestProducer; + } + } + + private static class AsyncRequestContext implements RequestContext { private final Node node; private final AsyncRequestProducer requestProducer; private final AsyncResponseConsumer asyncResponseConsumer; private final HttpClientContext context; - RequestContext(InternalRequest request, Node node, AuthCache authCache) { + AsyncRequestContext(InternalRequest request, Node node, AuthCache authCache) { this.node = node; // we stream the request body if the entity allows for it this.requestProducer = HttpUriRequestProducer.create(request.httpRequest, node.getHost()); @@ -950,6 +1189,26 @@ private static class RequestContext { this.context = HttpClientContext.create(); context.setAuthCache(new WrappingAuthCache(context, authCache)); } + + @Override + public AsyncResponseConsumer asyncResponseConsumer() { + return asyncResponseConsumer; + } + + @Override + public HttpClientContext context() { + return context; + } + + @Override + public Node node() { + return node; + } + + @Override + public AsyncRequestProducer requestProducer() { + return requestProducer; + } } private static Set getIgnoreErrorCodes(String ignoreString, String requestMethod) { diff --git a/client/rest/src/main/java/org/opensearch/client/StreamingRequest.java b/client/rest/src/main/java/org/opensearch/client/StreamingRequest.java new file mode 100644 index 0000000000000..e1767407b1353 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/StreamingRequest.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import org.reactivestreams.Publisher; + +import static java.util.Collections.unmodifiableMap; + +/** + * HTTP Streaming Request to OpenSearch. This is an experimental API. + */ +public class StreamingRequest { + private final String method; + private final String endpoint; + private final Map parameters = new HashMap<>(); + + private RequestOptions options = RequestOptions.DEFAULT; + private final Publisher publisher; + + /** + * Constructor + * @param method method + * @param endpoint endpoint + * @param publisher publisher + */ + public StreamingRequest(String method, String endpoint, Publisher publisher) { + this.method = method; + this.endpoint = endpoint; + this.publisher = publisher; + } + + /** + * Get endpoint + * @return endpoint + */ + public String getEndpoint() { + return endpoint; + } + + /** + * Get method + * @return method + */ + public String getMethod() { + return method; + } + + /** + * Get options + * @return options + */ + public RequestOptions getOptions() { + return options; + } + + /** + * Get parameters + * @return parameters + */ + public Map getParameters() { + if (options.getParameters().isEmpty()) { + return unmodifiableMap(parameters); + } else { + Map combinedParameters = new HashMap<>(parameters); + combinedParameters.putAll(options.getParameters()); + return unmodifiableMap(combinedParameters); + } + } + + /** + * Add a query string parameter. + * @param name the name of the url parameter. Must not be null. + * @param value the value of the url url parameter. If {@code null} then + * the parameter is sent as {@code name} rather than {@code name=value} + * @throws IllegalArgumentException if a parameter with that name has + * already been set + */ + public void addParameter(String name, String value) { + Objects.requireNonNull(name, "url parameter name cannot be null"); + if (parameters.containsKey(name)) { + throw new IllegalArgumentException("url parameter [" + name + "] has already been set to [" + parameters.get(name) + "]"); + } else { + parameters.put(name, value); + } + } + + /** + * Add query parameters using the provided map of key value pairs. + * + * @param paramSource a map of key value pairs where the key is the url parameter. + * @throws IllegalArgumentException if a parameter with that name has already been set. + */ + public void addParameters(Map paramSource) { + paramSource.forEach(this::addParameter); + } + + /** + * Body publisher + * @return body publisher + */ + public Publisher getBody() { + return publisher; + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/StreamingResponse.java b/client/rest/src/main/java/org/opensearch/client/StreamingResponse.java new file mode 100644 index 0000000000000..87d404c115723 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/StreamingResponse.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.Message; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; + +import java.util.List; + +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +/** + * HTTP Streaming Response from OpenSearch. This is an experimental API. + */ +public class StreamingResponse { + private final RequestLine requestLine; + private final Mono>> publisher; + private volatile HttpHost host; + + /** + * Constructor + * @param requestLine request line + * @param publisher message publisher(response with a body) + */ + public StreamingResponse(RequestLine requestLine, Publisher>> publisher) { + this.requestLine = requestLine; + // We cache the publisher here so the body or / and HttpResponse could + // be consumed independently or/and more than once. + this.publisher = Mono.from(publisher).cache(); + } + + /** + * Set host + * @param host host + */ + public void setHost(HttpHost host) { + this.host = host; + } + + /** + * Get request line + * @return request line + */ + public RequestLine getRequestLine() { + return requestLine; + } + + /** + * Get host + * @return host + */ + public HttpHost getHost() { + return host; + } + + /** + * Get response boby {@link Publisher} + * @return response boby {@link Publisher} + */ + public Publisher getBody() { + return publisher.flatMapMany(m -> Flux.from(m.getBody())); + } + + /** + * Returns the status line of the current response + */ + public StatusLine getStatusLine() { + return new StatusLine( + publisher.map(Message::getHead) + .onErrorResume(ResponseException.class, e -> Mono.just(e.getResponse().getHttpResponse())) + .block() + ); + } + + /** + * Returns a list of all warning headers returned in the response. + */ + public List getWarnings() { + return ResponseWarningsExtractor.getWarnings( + publisher.map(Message::getHead) + .onErrorResume(ResponseException.class, e -> Mono.just(e.getResponse().getHttpResponse())) + .block() + ); + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/http/ReactiveHttpUriRequestProducer.java b/client/rest/src/main/java/org/opensearch/client/http/ReactiveHttpUriRequestProducer.java new file mode 100644 index 0000000000000..63a71e29b8b31 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/http/ReactiveHttpUriRequestProducer.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client.http; + +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.nio.AsyncEntityProducer; +import org.apache.hc.core5.http.nio.support.BasicRequestProducer; +import org.apache.hc.core5.net.URIAuthority; +import org.apache.hc.core5.reactive.ReactiveEntityProducer; +import org.apache.hc.core5.util.Args; + +import java.nio.ByteBuffer; + +import org.reactivestreams.Publisher; + +/** + * The reactive producer of the {@link HttpUriRequestBase} instances associated with a particular {@link HttpHost} + */ +public class ReactiveHttpUriRequestProducer extends BasicRequestProducer { + private final HttpUriRequestBase request; + + ReactiveHttpUriRequestProducer(final HttpUriRequestBase request, final AsyncEntityProducer entityProducer) { + super(request, entityProducer); + this.request = request; + } + + /** + * Get the produced {@link HttpUriRequestBase} instance + * @return produced {@link HttpUriRequestBase} instance + */ + public HttpUriRequestBase getRequest() { + return request; + } + + /** + * Create new request producer for {@link HttpUriRequestBase} instance and {@link HttpHost} + * @param request {@link HttpUriRequestBase} instance + * @param host {@link HttpHost} instance + * @param publisher publisher + * @return new request producer + */ + public static ReactiveHttpUriRequestProducer create( + final HttpUriRequestBase request, + final HttpHost host, + Publisher publisher + ) { + Args.notNull(request, "Request"); + Args.notNull(host, "HttpHost"); + + // TODO: Should we copy request here instead of modifying in place? + request.setAuthority(new URIAuthority(host)); + request.setScheme(host.getSchemeName()); + + final Header contentTypeHeader = request.getFirstHeader("Content-Type"); + final ContentType contentType = (contentTypeHeader == null) + ? ContentType.APPLICATION_JSON + : ContentType.parse(contentTypeHeader.getValue()); + + final Header contentEncodingHeader = request.getFirstHeader("Content-Encoding"); + final String contentEncoding = (contentEncodingHeader == null) ? null : contentEncodingHeader.getValue(); + + final AsyncEntityProducer entityProducer = new ReactiveEntityProducer(publisher, -1, contentType, contentEncoding); + return new ReactiveHttpUriRequestProducer(request, entityProducer); + } + +} diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java index dd51da3a30d8c..f4f1c57cdd588 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java @@ -56,12 +56,15 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; +import reactor.core.publisher.Mono; + import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; @@ -418,6 +421,16 @@ public void testIsRunning() { assertFalse(restClient.isRunning()); } + public void testStreamWithUnsupportedMethod() throws Exception { + try (RestClient restClient = createRestClient()) { + final UnsupportedOperationException ex = assertThrows( + UnsupportedOperationException.class, + () -> restClient.streamRequest(new StreamingRequest<>("unsupported", randomAsciiLettersOfLength(5), Mono.empty())) + ); + assertEquals("http method not supported: unsupported", ex.getMessage()); + } + } + private static void assertNodes(NodeTuple> nodeTuple, AtomicInteger lastNodeIndex, int runs) throws IOException { int distance = lastNodeIndex.get() % nodeTuple.nodes.size(); /* diff --git a/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 b/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 deleted file mode 100644 index 3555fe22f8e12..0000000000000 --- a/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0c900514d3446d9ce5d9dbd90c21192048125440 \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 b/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..43e233e72001a --- /dev/null +++ b/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 @@ -0,0 +1 @@ +5d753a99d299756998a08c488f2efdf9cf26198e \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 b/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 deleted file mode 100644 index b641256c7d4a4..0000000000000 --- a/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6da28f5aa6c2b129ef49632e041a5203ce7507b2 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 b/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..ca97e8612ea39 --- /dev/null +++ b/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 @@ -0,0 +1 @@ +dab1e18842971a45ca8942491ce005ab86a028d7 \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 b/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 deleted file mode 100644 index 82dab5981e652..0000000000000 --- a/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.17.2.jar.sha1 b/client/sniffer/licenses/jackson-core-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..e15f2340980bc --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.17.2.jar.sha1 @@ -0,0 +1 @@ +969a35cb35c86512acbadcdbbbfb044c877db814 \ No newline at end of file diff --git a/distribution/src/config/opensearch.yml b/distribution/src/config/opensearch.yml index 10bab9b3fce92..4115601f62ada 100644 --- a/distribution/src/config/opensearch.yml +++ b/distribution/src/config/opensearch.yml @@ -125,3 +125,7 @@ ${path.logs} # Gates the functionality of enabling Opensearch to use pluggable caches with respective store names via setting. # #opensearch.experimental.feature.pluggable.caching.enabled: false +# +# Gates the functionality of star tree index, which improves the performance of search aggregations. +# +#opensearch.experimental.feature.composite_index.star_tree.enabled: true diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 3083ad4375460..784cdc457a1a9 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -37,8 +37,8 @@ base { dependencies { compileOnly project(":server") compileOnly project(":libs:opensearch-cli") - api "org.bouncycastle:bcpg-fips:1.0.7.1" - api "org.bouncycastle:bc-fips:1.0.2.5" + api "org.bouncycastle:bcpg-fips:2.0.9" + api "org.bouncycastle:bc-fips:2.0.0" testImplementation project(":test:framework") testImplementation 'com.google.jimfs:jimfs:1.3.0' testRuntimeOnly("com.google.guava:guava:${versions.guava}") { @@ -58,33 +58,6 @@ test { jvmArgs += [ "-Djava.security.egd=file:/dev/urandom" ] } -/* - * these two classes intentionally use the following JDK internal APIs in order to offer the necessary - * functionality - * - * sun.security.internal.spec.TlsKeyMaterialParameterSpec - * sun.security.internal.spec.TlsKeyMaterialSpec - * sun.security.internal.spec.TlsMasterSecretParameterSpec - * sun.security.internal.spec.TlsPrfParameterSpec - * sun.security.internal.spec.TlsRsaPremasterSecretParameterSpec - * sun.security.provider.SecureRandom - * - */ -thirdPartyAudit.ignoreViolations( - 'org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider$CoreSecureRandom', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$BaseTLSKeyGeneratorSpi', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSKeyMaterialGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSKeyMaterialGenerator$2', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSMasterSecretGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSMasterSecretGenerator$2', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSPRFKeyGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSRsaPreMasterSecretGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSRsaPreMasterSecretGenerator$2', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSExtendedMasterSecretGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSExtendedMasterSecretGenerator$2' -) - thirdPartyAudit.ignoreMissingClasses( 'org.brotli.dec.BrotliInputStream', 'org.objectweb.asm.AnnotationVisitor', diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.5.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.5.jar.sha1 deleted file mode 100644 index 1b44c77dd4ee1..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -704e65f7e4fe679e5ab2aa8a840f27f8ced4c522 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-2.0.0.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-2.0.0.jar.sha1 new file mode 100644 index 0000000000000..79f0e3e9930bb --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bc-fips-2.0.0.jar.sha1 @@ -0,0 +1 @@ +ee9ac432cf08f9a9ebee35d7cf8a45f94959a7ab \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.7.1.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.7.1.jar.sha1 deleted file mode 100644 index 44cebc7c92d87..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e1952428655ea822066f86df2e3ecda8fa0ba2b \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.9.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.9.jar.sha1 new file mode 100644 index 0000000000000..20cdbf6dc8aa8 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.9.jar.sha1 @@ -0,0 +1 @@ +f69719ef8dbf34d5f906ce480496446b2fd2ae27 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index d64cd4917707c1f8861d8cb53dd15194d4248596..a4b76b9530d66f5e68d973ea569d8e19de379189 100644 GIT binary patch delta 34592 zcmY(qRX`kF)3u#IAjsf0xCD212@LM;?(PINyAue(f;$XO2=4Cg1P$=#e%|lo zKk1`B>Q#GH)wNd-&cJog!qw7YfYndTeo)CyX{fOHsQjGa<{e=jamMNwjdatD={CN3>GNchOE9OGPIqr)3v>RcKWR3Z zF-guIMjE2UF0Wqk1)21791y#}ciBI*bAenY*BMW_)AeSuM5}vz_~`+1i!Lo?XAEq{TlK5-efNFgHr6o zD>^vB&%3ZGEWMS>`?tu!@66|uiDvS5`?bF=gIq3rkK(j<_TybyoaDHg8;Y#`;>tXI z=tXo~e9{U!*hqTe#nZjW4z0mP8A9UUv1}C#R*@yu9G3k;`Me0-BA2&Aw6f`{Ozan2 z8c8Cs#dA-7V)ZwcGKH}jW!Ja&VaUc@mu5a@CObzNot?b{f+~+212lwF;!QKI16FDS zodx>XN$sk9;t;)maB^s6sr^L32EbMV(uvW%or=|0@U6cUkE`_!<=LHLlRGJx@gQI=B(nn z-GEjDE}*8>3U$n(t^(b^C$qSTI;}6q&ypp?-2rGpqg7b}pyT zOARu2x>0HB{&D(d3sp`+}ka+Pca5glh|c=M)Ujn_$ly^X6&u z%Q4Y*LtB_>i6(YR!?{Os-(^J`(70lZ&Hp1I^?t@~SFL1!m0x6j|NM!-JTDk)%Q^R< z@e?23FD&9_W{Bgtr&CG&*Oer3Z(Bu2EbV3T9FeQ|-vo5pwzwQ%g&=zFS7b{n6T2ZQ z*!H(=z<{D9@c`KmHO&DbUIzpg`+r5207}4D=_P$ONIc5lsFgn)UB-oUE#{r+|uHc^hzv_df zV`n8&qry%jXQ33}Bjqcim~BY1?KZ}x453Oh7G@fA(}+m(f$)TY%7n=MeLi{jJ7LMB zt(mE*vFnep?YpkT_&WPV9*f>uSi#n#@STJmV&SLZnlLsWYI@y+Bs=gzcqche=&cBH2WL)dkR!a95*Ri)JH_4c*- zl4pPLl^as5_y&6RDE@@7342DNyF&GLJez#eMJjI}#pZN{Y8io{l*D+|f_Y&RQPia@ zNDL;SBERA|B#cjlNC@VU{2csOvB8$HzU$01Q?y)KEfos>W46VMh>P~oQC8k=26-Ku)@C|n^zDP!hO}Y z_tF}0@*Ds!JMt>?4y|l3?`v#5*oV-=vL7}zehMON^=s1%q+n=^^Z{^mTs7}*->#YL z)x-~SWE{e?YCarwU$=cS>VzmUh?Q&7?#Xrcce+jeZ|%0!l|H_=D_`77hBfd4Zqk&! zq-Dnt_?5*$Wsw8zGd@?woEtfYZ2|9L8b>TO6>oMh%`B7iBb)-aCefM~q|S2Cc0t9T zlu-ZXmM0wd$!gd-dTtik{bqyx32%f;`XUvbUWWJmpHfk8^PQIEsByJm+@+-aj4J#D z4#Br3pO6z1eIC>X^yKk|PeVwX_4B+IYJyJyc3B`4 zPrM#raacGIzVOexcVB;fcsxS=s1e&V;Xe$tw&KQ`YaCkHTKe*Al#velxV{3wxx}`7@isG zp6{+s)CG%HF#JBAQ_jM%zCX5X;J%-*%&jVI?6KpYyzGbq7qf;&hFprh?E5Wyo=bZ) z8YNycvMNGp1836!-?nihm6jI`^C`EeGryoNZO1AFTQhzFJOA%Q{X(sMYlzABt!&f{ zoDENSuoJQIg5Q#@BUsNJX2h>jkdx4<+ipUymWKFr;w+s>$laIIkfP6nU}r+?J9bZg zUIxz>RX$kX=C4m(zh-Eg$BsJ4OL&_J38PbHW&7JmR27%efAkqqdvf)Am)VF$+U3WR z-E#I9H6^)zHLKCs7|Zs<7Bo9VCS3@CDQ;{UTczoEprCKL3ZZW!ffmZFkcWU-V|_M2 zUA9~8tE9<5`59W-UgUmDFp11YlORl3mS3*2#ZHjv{*-1#uMV_oVTy{PY(}AqZv#wF zJVks)%N6LaHF$$<6p8S8Lqn+5&t}DmLKiC~lE{jPZ39oj{wR&fe*LX-z0m}9ZnZ{U z>3-5Bh{KKN^n5i!M79Aw5eY=`6fG#aW1_ZG;fw7JM69qk^*(rmO{|Z6rXy?l=K=#_ zE-zd*P|(sskasO(cZ5L~_{Mz&Y@@@Q)5_8l<6vB$@226O+pDvkFaK8b>%2 zfMtgJ@+cN@w>3)(_uR;s8$sGONbYvoEZ3-)zZk4!`tNzd<0lwt{RAgplo*f@Z)uO` zzd`ljSqKfHJOLxya4_}T`k5Ok1Mpo#MSqf~&ia3uIy{zyuaF}pV6 z)@$ZG5LYh8Gge*LqM_|GiT1*J*uKes=Oku_gMj&;FS`*sfpM+ygN&yOla-^WtIU#$ zuw(_-?DS?6DY7IbON7J)p^IM?N>7x^3)(7wR4PZJu(teex%l>zKAUSNL@~{czc}bR z)I{XzXqZBU3a;7UQ~PvAx8g-3q-9AEd}1JrlfS8NdPc+!=HJ6Bs( zCG!0;e0z-22(Uzw>hkEmC&xj?{0p|kc zM}MMXCF%RLLa#5jG`+}{pDL3M&|%3BlwOi?dq!)KUdv5__zR>u^o|QkYiqr(m3HxF z6J*DyN#Jpooc$ok=b7{UAVM@nwGsr6kozSddwulf5g1{B=0#2)zv!zLXQup^BZ4sv*sEsn)+MA?t zEL)}3*R?4(J~CpeSJPM!oZ~8;8s_=@6o`IA%{aEA9!GELRvOuncE`s7sH91 zmF=+T!Q6%){?lJn3`5}oW31(^Of|$r%`~gT{eimT7R~*Mg@x+tWM3KE>=Q>nkMG$U za7r>Yz2LEaA|PsMafvJ(Y>Xzha?=>#B!sYfVob4k5Orb$INFdL@U0(J8Hj&kgWUlO zPm+R07E+oq^4f4#HvEPANGWLL_!uF{nkHYE&BCH%l1FL_r(Nj@M)*VOD5S42Gk-yT z^23oAMvpA57H(fkDGMx86Z}rtQhR^L!T2iS!788E z+^${W1V}J_NwdwdxpXAW8}#6o1(Uu|vhJvubFvQIH1bDl4J4iDJ+181KuDuHwvM?` z%1@Tnq+7>p{O&p=@QT}4wT;HCb@i)&7int<0#bj8j0sfN3s6|a(l7Bj#7$hxX@~iP z1HF8RFH}irky&eCN4T94VyKqGywEGY{Gt0Xl-`|dOU&{Q;Ao;sL>C6N zXx1y^RZSaL-pG|JN;j9ADjo^XR}gce#seM4QB1?S`L*aB&QlbBIRegMnTkTCks7JU z<0(b+^Q?HN1&$M1l&I@>HMS;!&bb()a}hhJzsmB?I`poqTrSoO>m_JE5U4=?o;OV6 zBZjt;*%1P>%2{UL=;a4(aI>PRk|mr&F^=v6Fr&xMj8fRCXE5Z2qdre&;$_RNid5!S zm^XiLK25G6_j4dWkFqjtU7#s;b8h?BYFxV?OE?c~&ME`n`$ix_`mb^AWr+{M9{^^Rl;~KREplwy2q;&xe zUR0SjHzKVYzuqQ84w$NKVPGVHL_4I)Uw<$uL2-Ml#+5r2X{LLqc*p13{;w#E*Kwb*1D|v?e;(<>vl@VjnFB^^Y;;b3 z=R@(uRj6D}-h6CCOxAdqn~_SG=bN%^9(Ac?zfRkO5x2VM0+@_qk?MDXvf=@q_* z3IM@)er6-OXyE1Z4sU3{8$Y$>8NcnU-nkyWD&2ZaqX1JF_JYL8y}>@V8A5%lX#U3E zet5PJM`z79q9u5v(OE~{by|Jzlw2<0h`hKpOefhw=fgLTY9M8h+?37k@TWpzAb2Fc zQMf^aVf!yXlK?@5d-re}!fuAWu0t57ZKSSacwRGJ$0uC}ZgxCTw>cjRk*xCt%w&hh zoeiIgdz__&u~8s|_TZsGvJ7sjvBW<(C@}Y%#l_ID2&C`0;Eg2Z+pk;IK}4T@W6X5H z`s?ayU-iF+aNr5--T-^~K~p;}D(*GWOAYDV9JEw!w8ZYzS3;W6*_`#aZw&9J ziXhBKU3~zd$kKzCAP-=t&cFDeQR*_e*(excIUxKuD@;-twSlP6>wWQU)$|H3Cy+`= z-#7OW!ZlYzZxkdQpfqVDFU3V2B_-eJS)Fi{fLtRz!K{~7TR~XilNCu=Z;{GIf9KYz zf3h=Jo+1#_s>z$lc~e)l93h&RqW1VHYN;Yjwg#Qi0yzjN^M4cuL>Ew`_-_wRhi*!f zLK6vTpgo^Bz?8AsU%#n}^EGigkG3FXen3M;hm#C38P@Zs4{!QZPAU=m7ZV&xKI_HWNt90Ef zxClm)ZY?S|n**2cNYy-xBlLAVZ=~+!|7y`(fh+M$#4zl&T^gV8ZaG(RBD!`3?9xcK zp2+aD(T%QIgrLx5au&TjG1AazI;`8m{K7^!@m>uGCSR;Ut{&?t%3AsF{>0Cm(Kf)2 z?4?|J+!BUg*P~C{?mwPQ#)gDMmro20YVNsVx5oWQMkzQ? zsQ%Y>%7_wkJqnSMuZjB9lBM(o zWut|B7w48cn}4buUBbdPBW_J@H7g=szrKEpb|aE>!4rLm+sO9K%iI75y~2HkUo^iw zJ3se$8$|W>3}?JU@3h@M^HEFNmvCp|+$-0M?RQ8SMoZ@38%!tz8f8-Ptb@106heiJ z^Bx!`0=Im z1!NUhO=9ICM*+||b3a7w*Y#5*Q}K^ar+oMMtekF0JnO>hzHqZKH0&PZ^^M(j;vwf_ z@^|VMBpcw8;4E-9J{(u7sHSyZpQbS&N{VQ%ZCh{c1UA5;?R} z+52*X_tkDQ(s~#-6`z4|Y}3N#a&dgP4S_^tsV=oZr4A1 zaSoPN1czE(UIBrC_r$0HM?RyBGe#lTBL4~JW#A`P^#0wuK)C-2$B6TvMi@@%K@JAT_IB^T7Zfqc8?{wHcSVG_?{(wUG%zhCm=%qP~EqeqKI$9UivF zv+5IUOs|%@ypo6b+i=xsZ=^G1yeWe)z6IX-EC`F=(|_GCNbHbNp(CZ*lpSu5n`FRA zhnrc4w+Vh?r>her@Ba_jv0Omp#-H7avZb=j_A~B%V0&FNi#!S8cwn0(Gg-Gi_LMI{ zCg=g@m{W@u?GQ|yp^yENd;M=W2s-k7Gw2Z(tsD5fTGF{iZ%Ccgjy6O!AB4x z%&=6jB7^}pyftW2YQpOY1w@%wZy%}-l0qJlOSKZXnN2wo3|hujU+-U~blRF!^;Tan z0w;Srh0|Q~6*tXf!5-rCD)OYE(%S|^WTpa1KHtpHZ{!;KdcM^#g8Z^+LkbiBHt85m z;2xv#83lWB(kplfgqv@ZNDcHizwi4-8+WHA$U-HBNqsZ`hKcUI3zV3d1ngJP-AMRET*A{> zb2A>Fk|L|WYV;Eu4>{a6ESi2r3aZL7x}eRc?cf|~bP)6b7%BnsR{Sa>K^0obn?yiJ zCVvaZ&;d_6WEk${F1SN0{_`(#TuOOH1as&#&xN~+JDzX(D-WU_nLEI}T_VaeLA=bc zl_UZS$nu#C1yH}YV>N2^9^zye{rDrn(rS99>Fh&jtNY7PP15q%g=RGnxACdCov47= zwf^9zfJaL{y`R#~tvVL#*<`=`Qe zj_@Me$6sIK=LMFbBrJps7vdaf_HeX?eC+P^{AgSvbEn?n<}NDWiQGQG4^ZOc|GskK z$Ve2_n8gQ-KZ=s(f`_X!+vM5)4+QmOP()2Fe#IL2toZBf+)8gTVgDSTN1CkP<}!j7 z0SEl>PBg{MnPHkj4wj$mZ?m5x!1ePVEYI(L_sb0OZ*=M%yQb?L{UL(2_*CTVbRxBe z@{)COwTK1}!*CK0Vi4~AB;HF(MmQf|dsoy(eiQ>WTKcEQlnKOri5xYsqi61Y=I4kzAjn5~{IWrz_l))|Ls zvq7xgQs?Xx@`N?f7+3XKLyD~6DRJw*uj*j?yvT3}a;(j_?YOe%hUFcPGWRVBXzpMJ zM43g6DLFqS9tcTLSg=^&N-y0dXL816v&-nqC0iXdg7kV|PY+js`F8dm z2PuHw&k+8*&9SPQ6f!^5q0&AH(i+z3I7a?8O+S5`g)>}fG|BM&ZnmL;rk)|u{1!aZ zEZHpAMmK_v$GbrrWNP|^2^s*!0waLW=-h5PZa-4jWYUt(Hr@EA(m3Mc3^uDxwt-me^55FMA9^>hpp26MhqjLg#^Y7OIJ5%ZLdNx&uDgIIqc zZRZl|n6TyV)0^DDyVtw*jlWkDY&Gw4q;k!UwqSL6&sW$B*5Rc?&)dt29bDB*b6IBY z6SY6Unsf6AOQdEf=P1inu6(6hVZ0~v-<>;LAlcQ2u?wRWj5VczBT$Op#8IhppP-1t zfz5H59Aa~yh7EN;BXJsLyjkjqARS5iIhDVPj<=4AJb}m6M@n{xYj3qsR*Q8;hVxDyC4vLI;;?^eENOb5QARj#nII5l$MtBCI@5u~(ylFi$ zw6-+$$XQ}Ca>FWT>q{k)g{Ml(Yv=6aDfe?m|5|kbGtWS}fKWI+})F6`x@||0oJ^(g|+xi zqlPdy5;`g*i*C=Q(aGeDw!eQg&w>UUj^{o?PrlFI=34qAU2u@BgwrBiaM8zoDTFJ< zh7nWpv>dr?q;4ZA?}V}|7qWz4W?6#S&m>hs4IwvCBe@-C>+oohsQZ^JC*RfDRm!?y zS4$7oxcI|##ga*y5hV>J4a%HHl^t$pjY%caL%-FlRb<$A$E!ws?8hf0@(4HdgQ!@> zds{&g$ocr9W4I84TMa9-(&^_B*&R%^=@?Ntxi|Ejnh;z=!|uVj&3fiTngDPg=0=P2 zB)3#%HetD84ayj??qrxsd9nqrBem(8^_u_UY{1@R_vK-0H9N7lBX5K(^O2=0#TtUUGSz{ z%g>qU8#a$DyZ~EMa|8*@`GOhCW3%DN%xuS91T7~iXRr)SG`%=Lfu%U~Z_`1b=lSi?qpD4$vLh$?HU6t0MydaowUpb zQr{>_${AMesCEffZo`}K0^~x>RY_ZIG{(r39MP>@=aiM@C;K)jUcfQV8#?SDvq>9D zI{XeKM%$$XP5`7p3K0T}x;qn)VMo>2t}Ib(6zui;k}<<~KibAb%p)**e>ln<=qyWU zrRDy|UXFi9y~PdEFIAXejLA{K)6<)Q`?;Q5!KsuEw({!#Rl8*5_F{TP?u|5(Hijv( ztAA^I5+$A*+*e0V0R~fc{ET-RAS3suZ}TRk3r)xqj~g_hxB`qIK5z(5wxYboz%46G zq{izIz^5xW1Vq#%lhXaZL&)FJWp0VZNO%2&ADd?+J%K$fM#T_Eke1{dQsx48dUPUY zLS+DWMJeUSjYL453f@HpRGU6Dv)rw+-c6xB>(=p4U%}_p>z^I@Ow9`nkUG21?cMIh9}hN?R-d)*6%pr6d@mcb*ixr7 z)>Lo<&2F}~>WT1ybm^9UO{6P9;m+fU^06_$o9gBWL9_}EMZFD=rLJ~&e?fhDnJNBI zKM=-WR6g7HY5tHf=V~6~QIQ~rakNvcsamU8m28YE=z8+G7K=h%)l6k zmCpiDInKL6*e#)#Pt;ANmjf`8h-nEt&d}(SBZMI_A{BI#ck-_V7nx)K9_D9K-p@?Zh81#b@{wS?wCcJ%og)8RF*-0z+~)6f#T` zWqF7_CBcnn=S-1QykC*F0YTsKMVG49BuKQBH%WuDkEy%E?*x&tt%0m>>5^HCOq|ux zuvFB)JPR-W|%$24eEC^AtG3Gp4qdK%pjRijF5Sg3X}uaKEE z-L5p5aVR!NTM8T`4|2QA@hXiLXRcJveWZ%YeFfV%mO5q#($TJ`*U>hicS+CMj%Ip# zivoL;dd*araeJK9EA<(tihD50FHWbITBgF9E<33A+eMr2;cgI3Gg6<-2o|_g9|> zv5}i932( zYfTE9?4#nQhP@a|zm#9FST2 z!y+p3B;p>KkUzH!K;GkBW}bWssz)9b>Ulg^)EDca;jDl+q=243BddS$hY^fC6lbpM z(q_bo4V8~eVeA?0LFD6ZtKcmOH^75#q$Eo%a&qvE8Zsqg=$p}u^|>DSWUP5i{6)LAYF4E2DfGZuMJ zMwxxmkxQf}Q$V3&2w|$`9_SQS^2NVbTHh;atB>=A%!}k-f4*i$X8m}Ni^ppZXk5_oYF>Gq(& z0wy{LjJOu}69}~#UFPc;$7ka+=gl(FZCy4xEsk);+he>Nnl>hb5Ud-lj!CNicgd^2 z_Qgr_-&S7*#nLAI7r()P$`x~fy)+y=W~6aNh_humoZr7MWGSWJPLk}$#w_1n%(@? z3FnHf1lbxKJbQ9c&i<$(wd{tUTX6DAKs@cXIOBv~!9i{wD@*|kwfX~sjKASrNFGvN zrFc=!0Bb^OhR2f`%hrp2ibv#KUxl)Np1aixD9{^o=)*U%n%rTHX?FSWL^UGpHpY@7 z74U}KoIRwxI#>)Pn4($A`nw1%-D}`sGRZD8Z#lF$6 zOeA5)+W2qvA%m^|$WluUU-O+KtMqd;Pd58?qZj})MbxYGO<{z9U&t4D{S2G>e+J9K ztFZ?}ya>SVOLp9hpW)}G%kTrg*KXXXsLkGdgHb+R-ZXqdkdQC0_)`?6mqo8(EU#d( zy;u&aVPe6C=YgCRPV!mJ6R6kdY*`e+VGM~`VtC>{k27!9vAZT)x2~AiX5|m1Rq}_= z;A9LX^nd$l-9&2%4s~p5r6ad-siV`HtxKF}l&xGSYJmP=z!?Mlwmwef$EQq~7;#OE z)U5eS6dB~~1pkj#9(}T3j!((8Uf%!W49FfUAozijoxInUE7z`~U3Y^}xc3xp){#9D z<^Tz2xw}@o@fdUZ@hnW#dX6gDOj4R8dV}Dw`u!h@*K)-NrxT8%2`T}EvOImNF_N1S zy?uo6_ZS>Qga4Xme3j#aX+1qdFFE{NT0Wfusa$^;eL5xGE_66!5_N8!Z~jCAH2=${ z*goHjl|z|kbmIE{cl-PloSTtD+2=CDm~ZHRgXJ8~1(g4W=1c3=2eF#3tah7ho`zm4 z05P&?nyqq$nC?iJ-nK_iBo=u5l#|Ka3H7{UZ&O`~t-=triw=SE7ynzMAE{Mv-{7E_ zViZtA(0^wD{iCCcg@c{54Ro@U5p1QZq_XlEGtdBAQ9@nT?(zLO0#)q55G8_Ug~Xnu zR-^1~hp|cy&52iogG@o?-^AD8Jb^;@&Ea5jEicDlze6%>?u$-eE};bQ`T6@(bED0J zKYtdc?%9*<<$2LCBzVx9CA4YV|q-qg*-{yQ;|0=KIgI6~z0DKTtajw2Oms3L zn{C%{P`duw!(F@*P)lFy11|Z&x`E2<=$Ln38>UR~z6~za(3r;45kQK_^QTX%!s zNzoIFFH8|Y>YVrUL5#mgA-Jh>j7)n)5}iVM4%_@^GSwEIBA2g-;43* z*)i7u*xc8jo2z8&=8t7qo|B-rsGw)b8UXnu`RgE4u!(J8yIJi(5m3~aYsADcfZ!GG zzqa7p=sg`V_KjiqI*LA-=T;uiNRB;BZZ)~88 z`C%p8%hIev2rxS12@doqsrjgMg3{A&N8A?%Ui5vSHh7!iC^ltF&HqG~;=16=h0{ygy^@HxixUb1XYcR36SB}}o3nxu z_IpEmGh_CK<+sUh@2zbK9MqO!S5cao=8LSQg0Zv4?ju%ww^mvc0WU$q@!oo#2bv24 z+?c}14L2vlDn%Y0!t*z=$*a!`*|uAVu&NO!z_arim$=btpUPR5XGCG0U3YU`v>yMr z^zmTdcEa!APX zYF>^Q-TP11;{VgtMqC}7>B^2gN-3KYl33gS-p%f!X<_Hr?`rG8{jb9jmuQA9U;BeG zHj6Pk(UB5c6zwX%SNi*Py*)gk^?+729$bAN-EUd*RKN7{CM4`Q65a1qF*-QWACA&m zrT)B(M}yih{2r!Tiv5Y&O&=H_OtaHUz96Npo_k0eN|!*s2mLe!Zkuv>^E8Xa43ZwH zOI058AZznYGrRJ+`*GmZzMi6yliFmGMge6^j?|PN%ARns!Eg$ufpcLc#1Ns!1@1 zvC7N8M$mRgnixwEtX{ypBS^n`k@t2cCh#_6L6WtQb8E~*Vu+Rr)YsKZRX~hzLG*BE zaeU#LPo?RLm(Wzltk79Jd1Y$|6aWz1)wf1K1RtqS;qyQMy@H@B805vQ%wfSJB?m&&=^m4i* zYVH`zTTFbFtNFkAI`Khe4e^CdGZw;O0 zqkQe2|NG_y6D%h(|EZNf&77_!NU%0y={^E=*gKGQ=)LdKPM3zUlM@otH2X07Awv8o zY8Y7a1^&Yy%b%m{mNQ5sWNMTIq96Wtr>a(hL>Qi&F(ckgKkyvM0IH<_}v~Fv-GqDapig=3*ZMOx!%cYY)SKzo7ECyem z9Mj3C)tCYM?C9YIlt1?zTJXNOo&oVxu&uXKJs7i+j8p*Qvu2PAnY}b`KStdpi`trk ztAO}T8eOC%x)mu+4ps8sYZ=vYJp16SVWEEgQyFKSfWQ@O5id6GfL`|2<}hMXLPszS zgK>NWOoR zBRyKeUPevpqKKShD|MZ`R;~#PdNMB3LWjqFKNvH9k+;(`;-pyXM55?qaji#nl~K8m z_MifoM*W*X9CQiXAOH{cZcP0;Bn10E1)T@62Um>et2ci!J2$5-_HPy(AGif+BJpJ^ ziHWynC_%-NlrFY+(f7HyVvbDIM$5ci_i3?22ZkF>Y8RPBhgx-7k3M2>6m5R24C|~I z&RPh9xpMGzhN4bii*ryWaN^d(`0 zTOADlU)g`1p+SVMNLztd)c+;XjXox(VHQwqzu>FROvf0`s&|NEv26}(TAe;@=FpZq zaVs6mp>W0rM3Qg*6x5f_bPJd!6dQGmh?&v0rpBNfS$DW-{4L7#_~-eA@7<2BsZV=X zow){3aATmLZOQrs>uzDkXOD=IiX;Ue*B(^4RF%H zeaZ^*MWn4tBDj(wj114r(`)P96EHq4th-;tWiHhkp2rDlrklX}I@ib-nel0slFoQO zOeTc;Rh7sMIebO`1%u)=GlEj+7HU;c|Nj>2j)J-kpR)s3#+9AiB zd$hAk6;3pu9(GCR#)#>aCGPYq%r&i02$0L9=7AlIGYdlUO5%eH&M!ZWD&6^NBAj0Y9ZDcPg@r@8Y&-}e!aq0S(`}NuQ({;aigCPnq75U9cBH&Y7 ze)W0aD>muAepOKgm7uPg3Dz7G%)nEqTUm_&^^3(>+eEI;$ia`m>m0QHEkTt^=cx^JsBC68#H(3zc~Z$E9I)oSrF$3 zUClHXhMBZ|^1ikm3nL$Z@v|JRhud*IhOvx!6X<(YSX(9LG#yYuZeB{=7-MyPF;?_8 zy2i3iVKG2q!=JHN>~!#Bl{cwa6-yB@b<;8LSj}`f9pw7#x3yTD>C=>1S@H)~(n_K4 z2-yr{2?|1b#lS`qG@+823j;&UE5|2+EdU4nVw5=m>o_gj#K>>(*t=xI7{R)lJhLU{ z4IO6!x@1f$aDVIE@1a0lraN9!(j~_uGlks)!&davUFRNYHflp<|ENwAxsp~4Hun$Q z$w>@YzXp#VX~)ZP8`_b_sTg(Gt7?oXJW%^Pf0UW%YM+OGjKS}X`yO~{7WH6nX8S6Z ztl!5AnM2Lo*_}ZLvo%?iV;D2z>#qdpMx*xY2*GGlRzmHCom`VedAoR=(A1nO)Y>;5 zCK-~a;#g5yDgf7_phlkM@)C8s!xOu)N2UnQhif-v5kL$*t=X}L9EyBRq$V(sI{90> z=ghTPGswRVbTW@dS2H|)QYTY&I$ljbpNPTc_T|FEJkSW7MV!JM4I(ksRqQ8)V5>}v z2Sf^Z9_v;dKSp_orZm09jb8;C(vzFFJgoYuWRc|Tt_&3k({wPKiD|*m!+za$(l*!gNRo{xtmqjy1=kGzFkTH=Nc>EL@1Um0BiN1)wBO$i z6rG={bRcT|%A3s3xh!Bw?=L&_-X+6}L9i~xRj2}-)7fsoq0|;;PS%mcn%_#oV#kAp zGw^23c8_0~ ze}v9(p};6HM0+qF5^^>BBEI3d=2DW&O#|(;wg}?3?uO=w+{*)+^l_-gE zSw8GV=4_%U4*OU^hibDV38{Qb7P#Y8zh@BM9pEM_o2FuFc2LWrW2jRRB<+IE)G=Vx zuu?cp2-`hgqlsn|$nx@I%TC!`>bX^G00_oKboOGGXLgyLKXoo$^@L7v;GWqfUFw3< zekKMWo0LR;TaFY}Tt4!O$3MU@pqcw!0w0 zA}SnJ6Lb597|P5W8$OsEHTku2Kw9y4V=hx*K%iSn!#LW9W#~OiWf^dXEP$^2 zaok=UyGwy3GRp)bm6Gqr>8-4h@3=2`Eto2|JE6Sufh?%U6;ut1v1d@#EfcQP2chCt z+mB{Bk5~()7G>wM3KYf7Xh?LGbwg1uWLotmc_}Z_o;XOUDyfU?{9atAT$={v82^w9 z(MW$gINHt4xB3{bdbhRR%T}L?McK?!zkLK3(e>zKyei(yq%Nsijm~LV|9mll-XHavFcc$teX7v);H>=oN-+E_Q{c|! zp
    JV~-9AH}jxf6IF!PxrB9is{_9s@PYth^`pb%DkwghLdAyDREz(csf9)HcVRq z+2Vn~>{(S&_;bq_qA{v7XbU?yR7;~JrLfo;g$Lkm#ufO1P`QW_`zWW+4+7xzQZnO$ z5&GyJs4-VGb5MEDBc5=zxZh9xEVoY(|2yRv&!T7LAlIs@tw+4n?v1T8M>;hBv}2n) zcqi+>M*U@uY>4N3eDSAH2Rg@dsl!1py>kO39GMP#qOHipL~*cCac2_vH^6x@xmO|E zkWeyvl@P$2Iy*mCgVF+b{&|FY*5Ygi8237i)9YW#Fp& z?TJTQW+7U)xCE*`Nsx^yaiJ0KSW}}jc-ub)8Z8x(|K7G>`&l{Y&~W=q#^4Gf{}aJ%6kLXsmv6cr=Hi*uB`V26;dr4C$WrPnHO>g zg1@A%DvIWPDtXzll39kY6#%j;aN7grYJP9AlJgs3FnC?crv$wC7S4_Z?<_s0j;MmE z75yQGul2=bY%`l__1X3jxju2$Ws%hNv75ywfAqjgFO7wFsFDOW^)q2%VIF~WhwEW0 z45z^+r+}sJ{q+>X-w(}OiD(!*&cy4X&yM`!L0Fe+_RUfs@=J{AH#K~gArqT=#DcGE z!FwY(h&+&811rVCVoOuK)Z<-$EX zp`TzcUQC256@YWZ*GkE@P_et4D@qpM92fWA6c$MV=^qTu7&g)U?O~-fUR&xFqNiY1 zRd=|zUs_rmFZhKI|H}dcKhy%Okl(#y#QuMi81zsY56Y@757xBQqDNkd+XhLQhp2BB zBF^aJ__D676wLu|yYo6jNJNw^B+Ce;DYK!f$!dNs1*?D^97u^jKS++7S z5qE%zG#HY-SMUn^_yru=T6v`)CM%K<>_Z>tPe|js`c<|y7?qol&)C=>uLWkg5 zmzNcSAG_sL)E9or;i+O}tY^70@h7+=bG1;YDlX{<4zF_?{)K5B&?^tKZ6<$SD%@>F zY0cl2H7)%zKeDX%Eo7`ky^mzS)s;842cP{_;dzFuyd~Npb4u!bwkkhf8-^C2e3`q8>MuPhgiv0VxHxvrN9_`rJv&GX0fWz-L-Jg^B zrTsm>)-~j0F1sV=^V?UUi{L2cp%YwpvHwwLaSsCIrGI#({{QfbgDxMqR1Z0TcrO*~ z;`z(A$}o+TN+QHHSvsC2`@?YICZ>s8&hY;SmOyF0PKaZIauCMS*cOpAMn@6@g@rZ+ z+GT--(uT6#mL8^*mMf7BE`(AVj?zLY-2$aI%TjtREu}5AWdGlcWLvfz(%wn72tGczwUOgGD3RXpWs%onuMxs9!*D^698AupW z9qTDQu4`!>n|)e35b4t+d(+uOx+>VC#nXCiRex_Fq4fu1f`;C`>g;IuS%6KgEa3NK z<8dsc`?SDP0g~*EC3QU&OZH-QpPowNEUd4rJF9MGAgb@H`mjRGq;?wFRDVQY7mMpm z3yoB7eQ!#O#`XIBDXqU>Pt~tCe{Q#awQI4YOm?Q3muUO6`nZ4^zi5|(wb9R)oyarG?mI|I@A0U!+**&lW7_bYKF2biJ4BDbi~*$h?kQ`rCC(LG-oO(nPxMU zfo#Z#n8t)+3Ph87roL-y2!!U4SEWNCIM16i~-&+f55;kxC2bL$FE@jH{5p$Z8gxOiP%Y`hTTa_!v{AKQz&- ztE+dosg?pN)leO5WpNTS>IKdEEn21zMm&?r28Q52{$e2tGL44^Ys=^?m6p=kOy!gJ zWm*oFGKS@mqj~{|SONA*T2)3XC|J--en+NrnPlNhAmXMqmiXs^*154{EVE{Uc%xqF zrbcQ~sezg;wQkW;dVezGrdC0qf!0|>JG6xErVZ8_?B(25cZrr-sL&=jKwW>zKyYMY zdRn1&@Rid0oIhoRl)+X4)b&e?HUVlOtk^(xldhvgf^7r+@TXa!2`LC9AsB@wEO&eU2mN) z(2^JsyA6qfeOf%LSJx?Y8BU1m=}0P;*H3vVXSjksEcm>#5Xa`}jj5D2fEfH2Xje-M zUYHgYX}1u_p<|fIC+pI5g6KGn%JeZPZ-0!!1})tOab>y=S>3W~x@o{- z6^;@rhHTgRaoor06T(UUbrK4+@5bO?r=!vckDD+nwK+>2{{|{u4N@g}r(r z#3beB`G2`XrO(iR6q2H8yS9v;(z-=*`%fk%CVpj%l#pt?g4*)yP|xS-&NBKOeW5_5 zXkVr;A)BGS=+F;j%O|69F0Lne?{U*t=^g?1HKy7R)R*<>%xD>K zelPqrp$&BF_?^mZ&U<*tWDIuhrw3HJj~--_0)GL8jxYs2@VLev2$;`DG7X6UI9Z)P zq|z`w46OtLJ1=V3U8B%9@FSsRP+Ze)dQ@;zLq|~>(%J5G-n}dRZ6&kyH|cQ!{Vil( zBUvQvj*~0_A1JCtaGZW|?6>KdP}!4A%l>(MnVv>A%d;!|qA>*t&-9-JFU4GZhn`jG z8GrgNsQJ%JSLgNFP`5;(=b+M9GO8cg+ygIz^4i?=eR@IY>IcG?+on?I4+Y47p-DB8 zjrlar)KtoI{#kBcqL&4?ub@Df+zMt*USCD_T8O$J$~oMrC6*TP7j@H5trGV$r0P6I zV7EZ{MWH`5`DrX*wx&`d;C`jjYoc_PMSqNB290QXlRn_4*F{5hBmEE4DHBC$%EsbR zQGb7p;)4MAjY@Bd*2F3L?<8typrrUykb$JXr#}c1|BL*QF|18D{ZTYBZ_=M&Ec6IS ziv{(%>CbeR(9Aog)}hA!xSm1p@K?*ce*-6R%odqGGk?I4@6q3dmHq)4jbw+B?|%#2 zbX;ioJ_tcGO*#d0v?il&mPAi+AKQvsQnPf*?8tX6qfOPsf-ttT+RZX6Dm&RF6beP3 zdotcJDI1Kn7wkq=;Au=BIyoGfXCNVjCKTj+fxU@mxp*d*7aHec0GTUPt`xbN8x%fe zikv87g)u~0cpQaf zd<7Mi9GR0B@*S&l&9pCl-HEaNX?ZY8MoXaYHGDf}733;(88<{E%)< z^k)X#To3=_O2$lKPsc9P-MkDAhJ~{x<=xTJw2aRY5SSZIA6Gij5cFzsGk@S)4@C65 zwN^6CwOI9`5c(3?cqRrH_gSq+ox(wtSBZc-Jr5N%^t3N&WB|TT_i4!i3lxwI=*p)Y zn7fb%HlXhf8OGjhzswj!=Crh~YwQYb+p~UaV@s%YPgiH_);$|Gx3{{v5v?7s<)+cb zxlT0Bb!OwtE!K>gx6c4v^M9mL0F=It*NfQL0J0O$RCpt746=H1pPNG#AZC|Y`SZt( zG`yKMBPV_0I|S?}?$t7GU%;*_39bCGO*x3+R|<=9WNe!8jH- zw5ZJS(k@wws?6w1rejjyZ>08aizReJBo%IRb3b3|VuR6Uo&sL?L5j(isqs%CYe@@b zIID7kF*hyqmy+7D(SPa^xNVm54hVF3{;4I9+mh)F22+_YFP>ux`{F)8l;uRX>1-cH zXqPnGsFRr|UZwJtjG=1x2^l_tF-mS0@sdC38kMi$kDw8W#zceJowZuV=@agQ_#l5w znB`g+sb1mhkrXh$X4y(<-CntwmVwah5#oA_p-U<_5$ zGDc%(b6Z=!QQ%w6YZS&HWovIaN8wMw1B-9N+Vyl=>(yIgy}BrAhpc2}8YL-i*_KY7 ztV+`WKcC?{RKA@t3pu*BtqZJFSd2d)+cc07-Z#4x&7Dnd{yg6)lz@`z%=Sl-`9Z~*io zck_Lshk9JRJs=t>1jmKB~>`6+(J z@(S}J2Q{Q{a-ASTnIViecW(FIagWQ%G41y?zS)gpooM z@c<2$7TykMs4LH*UUYfts(!Ncn`?eZl}f zg)wx@0N0J(X(OJ^=$2()HLn)=Cn~=zx(_9(B@L04%{F_Zn}5!~5Ec5D4ibN6G_AD} zzxY^T_JF##qM8~B%aZ1OC}X^kQu`JDwaRaZnt!YcRrP7fq>eIihJW1UY{Xhkn>NdX zKy|<6-wD*;GtE08sLYryW<-e)?7k;;B>e$u?v!QhU9jPK6*Y$o8{Tl`N`+QvG ze}71rVC)fis9TZ<>EJ2JR`80F^2rkB7dihm$1Ta2bR?&wz>e`)w<4)1{3SfS$uKfV z3R=JT!eY+i7+IIfl3SIgiR|KvBWH*s;OEuF5tq~wLOB^xP_Dc7-BbNjpC|dHYJrZCWj-ucmv4;YS~eN!LvwER`NCd`R4Xh5%zP$V^nU>j zdOkNvbyB_117;mhiTiL_TBcy&Grvl->zO_SlCCX5dFLd`q7x-lBj*&ykj^ zR3@z`y0<8XlBHEhlCk7IV=ofWsuF|d)ECS}qnWf?I#-o~5=JFQM8u+7I!^>dg|wEb zbu4wp#rHGayeYTT>MN+(x3O`nFMpOSERQdpzQv2ui|Z5#Qd zB(+GbXda|>CW55ky@mG13K0wfXAm8yoek3MJG!Hujn$5)Q(6wWb-l4ogu?jj2Q|srw?r z-TG0$OfmDx%(qcX`Fc`D!WS{3dN*V%SZas3$vFXQy98^y3oT~8Yv>$EX0!uiRae?m z_}pvK=rBy5Z_#_!8QEmix_@_*w8E8(2{R5kf^056;GzbLOPr2uqFYaG6Fkrv($n_51%7~QN<>9$WdjE=H}>(a41KM%d2x#e@K3{W|+=-h*mR&2C01e z2sMP;YjU)9h+1kxOKJ+g*W=&D@=$q4jF%@HyRtCwOmEmpS|Rr9V_2br*NOd^ z4LN#oxd5yL=#MPWN{9Vo^X-Wo{a7IF2hvYWB%eUCkAZq+=NQ=iLI9?~@ zr+|ky4Rgm7yEDuc2dIe941~qc8V_$7;?7|XLk6+nbrh}e&Tt20EWZ@dRFDoYbwhkn zjJ$th974Z0F${3wtVLk_Ty;*J-Pi zP0IwrAT!Lj34GcoSB8g?IKPt%!iLD-$s+f_eZg@9q!2Si?`F#fUqY`!{bM0O7V^G%VB|A zyMM>SKNg|KKP}+>>?n6|5MlPK3Vto&;nxppD;yk@z4DXPm0z9hxb+U&Fv4$y&G>q= z799L0$A2&#>CfSgCuu$+9W>s<-&yq3!C{F9N!{d?I|g|+Qd9@*d;GplgY5Fk$LOV+ zoMealKns!!80PWsJ%(}L61B!7l?j1_5P#LRrVv%NBhs{R`;aufHYb&b+mF%A+DGl5 zBemAHtbLFi++KT(wv9*?;awp>ROX~P?e<4#Uf5RKIV{c3NxmUz!LYO#Cxdz*CoRQp zSvX|#NN06=q_eTU5-T!RmUJ?Ht=XQF8t)f+GnY5nY5>-}WLR1+R5pou?l@Y|F@KEX zk=jh-yq=Rn9;riE*;Slo}PfNKhXO#;FrZCf%VZ9h7W z<63YWE^s_SlAVQh6B(En9i<9%4AT|2bTQ4Ph2)pI?f2S`$j?bp`>_3(`Fz&?ig-FJ zoO7KAh@4BDOU>sBXV84Eajr9;>wlbW&OSUt&dug?oAV;`+3oBzpI18%%1wA4blzmb z-{QPYJmn_2-F$A5JI!a8+-p8Bk*^U?^f5j7uZ}jEz0E3;XbahB2iZwS&l4jj4WRS6 z3O&!w=ymQSl~7LUE99noXd2y1)9E>yK`+ouR%sTOQ@Qjt@<;lErGLk1wrw7r zV)M})+amJXs_9hQa++&vrqgU&Xr8T)=G&5Vy6vOnvt37L*nU7&ws&ZO-9`)TGA**t zpby#0X|df;etRud+s~#Y_7zlPZ=_oLg%q&wraF6s>g@;VO#2sUseO=^+3%&Z?61(- z_IKzU`+Kw;Blil&LR#qv&{rzQnG|%i(Q3zLI@gh)2FE^H;~1dx9G|AOj(e%mSwT(C z71Zp!jar*i3S|_ik_3{n0L4KavYWWZ2x3MhyU!66E$h=L+A&-s$9X_w9Q_e;+`-{ZW# z^Zn2H_I~`}!vGeFRRY^DyKK#pORBr{&?X}ut`1a(x__(dt3y_-*Np0pX~q39D{Rns z!iXBWZO~+oZu>($Mrf0rjM>$JZar!n_0_!*e@yT7n=HfVT6#jbYZ0wYEXnTgPDZ0N zVE5?$1-v94G2@1jFyj##-E1Um(naG-8WuGy@rRAg)t9Oe0$RJ3OoWV8X4DXvW+ftx zk%S(O8h?#_3B9-1NHn&@ZAXtr=PXcAATV*GzFBXK>hVb9*`iMM-zvA6RwMH#2^901uxUFh&4fT% zmP?pjNsiRIMD)<6xZyOeThl_DN_ZJ*?KUIHgnx{vz`WKxj&!7HbM8{w?{Rued(M1v zKHsK{_q=YI88@Bf0*RW@cIV@=<{eGsG21xrTrWycT7*KBd!eD2zb1R(O@H~k7>Duv zHPwp=n8;t#1>7~fuM9IaD5w%BpwLtNCe_Sq9eal4oj2DB1#<+(MGR-P&Ig%3t%=!< zS$|KxI1a~an2Q>L$s;1$9nQJal4dk)Box$YsAKgCiEGni##jr|%So6Y4J@pYBF!;~ zhXwpKhc7&QZ$=e~Sb&ABZ4o)&U~N*dSU`2G^eQh-WCe9tA}~Ae369btLlB{GjOKB@yEDH!C7Q&df^#X zi~?{rCuAE|kAjKzt+r#t6s)1h840@A<%i5(O;$Q&tD(opg0)yzgm#=ucf4CSqkqYS zaTdivk5I~#=1Z9K5M*uV6H??6s9*ynT`vzr2@%Tkr4k+Tr_ib40$fPP7$yLA$cwJ@ zF@`94=op)$x^0t+QAsNY$pi!4e7hp~gO=|yD=^8JTvTiC(HAamYEQ}t z+hR~QoKTOz%)IHEg&6iC4vP=3mw&u4wvcSwi$vNBGQE5RoSUs^l+u{A+6s~aMMkXG z+1g4wD8^Y27Oe4f``K{+tm76n(*d6BUA4;pLa26`6RD6?Rq?2K1yMXVAk`&xbks*~{+``Mhg4cQEuw+aM zaI9{}9en8DCh*S9CojIk)qh|k?#iNiCQ}rAmr&iYRJiND ztt+j*c+}Fv&6x&7U~!(Sb1eAz1N@Nf`w?YxGJdhy+seiNNZEYIG1_<^?&pm^P8W?d ze(p@$nWC`Pxqpf8d&AIGNJn#Ty)j z1NbA^Y}pNQ>OfTdiAp+WR>C6390IrFj;YZglitGH8r7(GvVRpWjZd7|r24M{u66B) zs#VS$?R*!1FT&sO-ssvW8s5jh$-O=^9=7^y z75||~QA6zLW}Lu!YOZh1J$j46m zNH|;^a$U_RKgla5h>5(igl^ek(~2nL5a_0}ipvA_Xf0k*E-ExJNld0{LZ;F^DzqAL+IZGJ7<3i1szf zxMRkQ(|@;wj9%I7h{c*{;?g%giylU}Dz{iwb(1vGK<-vlnKs!|Mb9}iTt)Rl&NZka zkkugrMiY(ng3QseY!npaOf1jo3|r35nK+eTYh*`DHabuv@IFy zG7@V!LWE0&)bvqgQ8=-L-(vt#Z-&xaOj3G@Nqw1FfbNQ`!bFEl@z)0)+#Z5e#_hQ|Rd!KrEoRn^aFz zkzYzz%hher>ixcg6fW`=rr>Nx@enQ!sQqYR{<2^|eUfw?e8;B_`T)Kxkp8${U>g?k*VhCd zp^yYLvi}<#5TDjrx@{0U$jx*tQn+mhcXsq2e46a@44^-Sd;C6S2=}sK1LQ_OUhgO` z^4yN+e9Dv9TQ64y1Bw)0i4u)98(^+@R~eUUsG!Ye84 zFa7-?x3cqUXX)$G<2MgYiGWhjq?Q-CE(|sm-68_z>h_O2vME5nX;RodIf)=No(={I z_<&3QJcPg8kAI}_Vd+OH4z{NsFMmjv3;kunMSh94VNnqD?85uOps%nq=q?kU_JT5@ zwih;eQlhxr)7d^K#-~InWlc&<*#?{A(8f^+C_WmRR{B&Yh3pxhLU9-toLz%rCPi}} zE!cw^pQlXB3aACUpacU&ZlBUl(Jo4fxpbDVwDn^m{VG||ar9B)9}@K`(SJxmAWro& z_3yzfUqLoXg`H($!I;FTudPdo6FTJm2@^S|&42H(XbSRW7!)V&=I`{;mWicu@BT7z zQs!)F9t-K|aFaMsoJ_6z-ICrzjW5#yJRs>~)bugki)ST$8T%!D4F@EBliCNSA5!fl zN;OuKbR3m0rj=rrq}5`nq<<%iHIl|euXt6QA}$hFNqV)oR?_Rm4oPnoLy|ru_DQ-= zJTDFa;zjY2p{sg zWqz0I5y>-U{xR1Rl4r{NQ?6Ge&y@N7t~Vsll=-(^?@FF2^Y6JnkbgW==09{7N}eh4 z?h`%x-LM8D}+*41ZA#EG0D9KQjc2#z59Pq zO9u!y^MeiK3jhHB6_epc9Fs0q7m}w4lLmSnf6Gb(F%*XXShZTmYQ1gTje=G?4qg`Z zf*U~;6hT37na-R}qnQiIv@S#+#J6xEf(swOhZ4_JMMMtdob%^9e?s#9@%jc}19Jk8 z4-eKFdIEVQN4T|=j2t&EtMI{9_E$cx)DHN2-1mG28IEdMq557#dRO3U?22M($g zlriC81f!!ELd`)1V?{MBFnGYPgmrGp{4)cn6%<#sg5fMU9E|fi%iTOm9KgiN)zu3o zSD!J}c*e{V&__#si_#}hO9u$51d|3zY5@QM=aUgu9h0?tFMkPm8^?8iLjVN0f)0|R zWazNhlxTrCNF5d_LAD%TwkbkKL>+-8TV4VSawTAw*fNnD^2giQT{goNRR~OwAH5%vorH%=FNNm``;VB z_N`CeB%?_hv?RK-S(>S)VQBau{&NwD>j_ zF-Hwk*KNZb#pqexc5oKPcXjOO*cH#{XIq~NkPxH{TYm*Rtv_hwbV2JZd$e=Z)-pN0 z^PH`XkLz~lpy{|;F6Sq&pjD@}vs!0PGe z6v$ZT%$%iV1Z}J(*k7K8=sNv;I#+Ovvr?~~bXs?u{hF!CQ|_-`Y?!WYn_8|j3&GBu zl|F+DcYh8nxg49<-)ESHyI0Vo;oInYTMcVX9@5;g9>>x1BRMQ@KPJc%Za)^J6|_nr zKQ#*4^Z(G>Pt6Lgrp6!zX?X+rXibm;)WBbN1WBP~{Iw45)a0toTeof%G+Oh5Wryxb zN@p5YCm&YsN!Jd$jG8^|w^_Wo-1ad{*|(#*+kcnS97j-dxV>sGIk+cCchX&K1yxY6 z`dB};!Xf&3!*LyHut$Qlnc5WEME3}4k)j3H$aVHvxg78Y3_E@b3u@5wjX7b zPLz^7h65uMRj8d}5Y1tP55ozK;r0{r?;WHL>g4laujaX3dTd*h+xuy|LOa-f%M7RA zuz#V1WlscYXGzO0Xsu-c>6UPEVQ}o>+w7v~meKw6 zfS|`8k|tL(5VDPt0$*C)(&lVYGnVeCrsb+>%XBrvR5fz~VkMmn-RV#V&X1#`XH?fx zvxb>b_48WV%}uD=X5}V20@O1vluQ2hQ-2>^k+tl+2Al20(<||vxfpIJ~|9`dJ zVH^pxv&RS97h5DqN9ZW4!UT{rMgsH>#tHOouVIW{%W|QnHohN<4ZE5RR@l7FPk$#A zI?0%8pKlXW%QH2&OfWTY{1~5fO3=QyMi3vb*?iSmEU7hC;l7%nHAo*ucA`RmedXLF zXlD(SytNYn`{9Rs;@fw21qcpYFGUH*Xmdk{4fK z0AKh-FGJC#f0Ik!{d{T7B7elr2J8>e z4=VKi^h2D=Q8&0_LHc1j$T9pQ7-FcHxZj3w-{RF}MXBm@?_X&zG?V%-Bet=g# zgEZn=6W?w3jeoQ(!&ECWHqJ zs;lJ@+Tf9MhC9~LX7*WT*0A%cJEpn#(bX;0i-*TF1j2A3zeOFlEi7~=R7B$hpH(7@ zc$q9Z%JU#Am8%BTa1gvUGZPX)hL@#()Y8UP?D?tiCHan51waKUtqypCE-ALn&``k4jkeO@}6ROkhI5oJaRd?*oW z5XmD5>YOZAT4pPd`M`dOKE|;8c#wXMeqKQ__X$u$!F<91^W0T4GtRNpyh;fxIv+8{ zOV!mig|0Jq`E}FfEGH;5uUHx|3whm^-h~cRG|loa&)cs`#D7mW5K(xZ?6+)vAgAZC zD+2J-T)KRUZh~%1{k&VASQx^y`SF+OS6KX4kyjRJJpeT){PgS47=e2L=`KjGaKL_s zUIno%SwM4WAF(xl=4hpof(h_9QEfU}Rt7%rCFq{-h?=0}Z_#HJdX0XYPezSbpFe{d z0C)YJ60>{(bbnZJLT@3P<#<0>aI5md?+Lo2+D-Fke_x?5v0p-So~;%rL+cL|`Xc=y zDo2?BXJ-XJpB{>GjhRUa08Q0fc~|Te5H?$jM>&XZG_?d?@$c3DX04&{U<}^Kj^=z zll8%>K>i=dqr$~=S9jB6O9hsxyPZc556Zw=j_nVDRZX|_LS7YaUr=}9egcpXb&Lyu z)YmbNGJh^0d;nj66%_}BAGOYHUX^~)0N68LkJ^TyJHrdKncoeHWg@5uMJ!*CaF?vi zs}inQ2`7nFmB(0lPrqn_`mS~KaI)&6rO6}?TrFA@(Ja=?UzYTXI{;CnCeCzb>5&FP zU9f&`4m+(A>lG0a8$bbgJoRdhk?tvg@Ikz#RDUy9`Bv_`)Mkhjai_S8ErG{n6Y!ZX zjPs#^rE8v{eXb(WZW}1zS0~dl)qaDzZc6#Eb{ck_GRA z#30&5L=j;Tg=w(=Im_LHt$@}KL1QA*~192~ak5Zap zUm99S=A}`1@@=9=5f6x7EHE6dJZ-x$j_M#N`oWZ#8SoMRTSbJEkaI_E1S`LPb#u`l za~4L#=6*e^6>@H+e`vvSoIfb`u^orz|9^Gmf4h-i>_^V46i#@Dxdo?h3>Vd9UB7Q1 zd*h%uq=*CJ?O?Lm(&(J#sK(r_I|5=@p*QJ8=tPJL3W(!iGFv{}j#xpF;@rMTpd4td z<_1}s1;k09u3T^?RJY`6H5?F+aq(TFbgz!+$2p?$R`cYY_JBwWirgNmvn*Q5HGe{f z-XaT1oDGR#3t6;+$vF}g;7xCzl>r&9Od6(sppYNY?IXMuZ9`V@!`mKeeSE_wM4Gd+URu(#jex(s}ep9w1GC3 z7Kw+jq#o_EXrxGYA1~6D%cM+Ge1B+?9*7ocTWaW4s-L{|jmQn!kxEX{y*KxIy1Xsk zjnC7@NQ-xSD&Z?q_a#!IA$;sPe$gu?Z@nHJio8s36Lg7G@2AP18uG-3n|dSD^zhIP z+Lua-$Q13Lqz^#~2=HF178_n9HXiZ3Ovmd`>ukdKrc^2!X-ZAeBT)7dg@2>+{JWz! z=p-xnDEg15lCRLp=uPi))DZP-pCqq%wfcyWMMo@`orpju`U#jwh%@+&z~1$+@gb_i z)6qj`VXXJU%FkkS64rkme)%TMc?)t4l%`DCsP&j<&wVcTDtWIqWv3~3;0Bqggf}`x z?`&K}p9&;=Aun6(T&k=7S$}GZhkTxv`XW6!32V~_TI%bru-U&74|$7pp-A6@^%t>z zik|j#`C5GOo6l26yv4Vpk#1d>ruU>0Sp1{7@3N40)z%`t|2VeC&_KN}@=GU4?^hP}~YUu?KOKHT)vA#ce-FMp(9pP!wPTFk%# zEwqky;$|C=p1Ezu@6K6!t$>6N_Ie-e^%}k#xcn}ovllZSv|SPDuQ-}tU^i{{+`l1; z+iYOZMxq` zyNmevH37(cCUt;!hJWefMf#0t`kVyL=P%JpzSQp?pS<i{A@amJ0F;?aT#H3gGL(m+ zMd2x(2y7PxEPwgIW>H_-O1kRG@$x~jQ_UiPlcvRrqG+t>u>Js>8_Xp<>`syJiiA&! ztVK|;R}+4AD**Ck_Nds%Xh&S}{}jiCxVtDeH;a2t6-Dft*jg0#%HQsyNF;oXVK{$( zQQY6LPpMO5t9niY*so`U_cqrfS%ttA> zMrrXr{mf-r8(+hNdUxQONMdM>QWS?n{+OpF2q5te-AZ?0^44=hA%DU`#Rc;$`A425WvPKyy?$o4V#Hc#hepIh#q zrzgc`^ts)D{=4V}+2@w~FVe?kpIh#KoUY0~x7_FGtMoP5=a&0# zq5$MRx9AIxXym?ZxgQhVvd=B|)8ZMaXDKe4fFb_31FMfwok)^Lq|q0WrRvD@ZBR=G z2pQ0I&-V@h0C*ge;YJ*jtBNjvYflqF6o%gs=t3z%xd|2&*IQdyR=^LH8WYpRgrrep z4Mx6Aw}fxhSE$jN_`x6Gk20R2MM&C)-R$h{nfE#GnVgwFe}DZ3unAM( z^yK7C>62cU)*<-~eOtHo^)=lJyq4q2*a>{Y3mU}nkX(`x@nlm*hSem0>o7{ZNZ;O< zZbWN(%QigOG8~nI>Q5dw>RYT0OXvK4;<_A&n$p-%65n=wqR{bejviAOu@}cn>s#w3 zqd~{|=TQiObS+3ii(WV`2`mPoZQ7x1xMY3^WvfM@Sq*HPLJh+LQwQ=`ny&P1^Hu$T ztXM-zVD=*VoC&`n>n>@37!?>fN*sy>#GXLvspC8GGlAj!USU^YC|}skAcN~^Xqe0( zjqx#zAj>muU<=IUs~34|v06u2ahGbSeT-uAG|Vv*Bw$#pf8#qXFt zMfw|VuC{UeT)2WpJ6&O+E6jF;;~n9>cf~Ip6j-_@&PGFD0%Vu*QJ@Ht`C7Og!xt#L> zmqlJGEh<%*ATJUmZc(FfNSB##fy_`Y-70r{Iv3jEfR|~Ii!xC44vZ(KNj#>kjsE86 zE3FB*OayD~$|}3Y&(h6^X|1 z(TcJ}8{Ua3yL1loSfg!2gTekntVO7WNyFQCfwF2ti$UvL8C6{{IPBg01XK~$ThIQx z{)~aw>(9F2L#G36*kRDPqA$P*nq=!@bbQ#RzDpVIfYc*x9=}2N^*2z1E%3epP)i30 z>M4^xlbnuWe_MAGRTTb?O*?TCw6v5$6bS)qZqo=w4J~*9i;eVx4NwO!crrOjhE8U( z&P-ZZU9$We^ubqNd73QDTJqqV55D;u{1?`JQre~$mu9WZ%=z|x?{A;q|NiAy0GH5U z*nIM2xww(4aBEe#)zoy#s-^NN%WJl5hX=Oj8cnY%e+ZYt5!@FfY;fPO8p2xj+f6?; zUE_`~@~KwcX!4d}D<7hA<#M$$MY^)MV_$1K4gr3H8yA&|Ten>yr0v!TT@%u$ScDfR zrzVR=Rjj3cjDj)fWv?wQanp7LL)Me^LS6EzBMR%1w^~9L%8&g(G;d3f4uLKFIqs5J zYKSlle?R1Fyx?%RURbI;6jq>Nh+(uYf`e8J=hO2&ZQCoTU^AKRV>_^&!W{P-3%oVM zaQqOcL1!4cYP)vuF~dMQb1#lKj_HWu4TgBXPYuJQYWv&8km~(7Mlh=5I8HE}*mJ#? zmxhx%#+9e>eorO0)eg#m6uhb7G^KSg`Cbxlf9XizZH9>B@hZcqJ*7VTp6)w1tHLB1 z1}(?)MI0$rLIUS0;Z^atECLmzzb6FE#PKdBl;L{}$M%UdWEi4$AS4ew$#8O?ZRr(G z4syuHkcGi8a#*gRz@QP|7R93=j*A$L;eA}9id+JyWjkK`Mod00;{&DlA!QJFR3&lj zf1vI*O1ec{(V=0QA?ELLVls-W``ELsu7M`3`vI4MzhVcpJ!9#^KGjq|#b-J`!F7h$ z{dUEFmBLuMbYu>nV^(S3q+UC;7s@e_qZG#+N=oo0o$G1>6Y0a{9@&9;EU2+8k|7P6 zp?HMh|8#X5UnwpxGbHw;%WXHXn_~8nedvw09V+G$(lhoq7L}=qb+OaPSD&;$TuUtG(4;py( zh)8|Nord(*d1ZH-Dmw1MqU&RKiI)26r-hE(pqnmo4uixe^`qea7(_HA_R2KjdJ4$g!)7ve&Q^b1Tf+{(Vd6vInCd>i725IomG^(Ez(D8L!4qlUAX=)EV9!3JfWLB4n1z)!ums&0UuuVLUH zP)i30*5f6tnvk?lbhL{|8I78X7|_cA3p(L9<~X5y1L3{K8Sf*xL|5gToDT;aYig?m8z^z zQ`XdEMJqC#*O|ho!7x~+MzT<5g$turF~pS;RSY&GR;6TxR)3Q+&%yG`3&ngIwR*qK&t{TERu@0|fDrKKw3=RE&t-)Xh-$i& zl5|>BSn5)z)hg3d?<~8msU=ye>CHWR!9yT;PU|$KP*qADf(V?zj^n^g~nykv^I)Uz3{78Ty81{n~ zZsS&7WH)#Ach3%UyVD1s=Ahvw9*%Wt z<42vTt%|niux3Zww13+oK)-d~G>VKHM0ov>KXKaUH(Cc)#9GFVSc4EoUbnRudxi}T z8J!VNY=4g*Y7C*Ho7#^wUVt&67&ea4^1oBw%@h^ z+YZ+eK^VI5573*KZosq?pMj(u5257?^lBu&LF9`ao`sYf9&zx;uK2iv&$;8{ z4nFUSFF5$3JHFuHORo5YgFkV{CmcNEicdQDvO7NM;484|f=_+6!)x%g1CL;L9DE%% zT=1xaKZ8v-+-@x1OZ;|0_a9J82MFd71j+6K002-1li@}jlN6Rde_awnSQ^R>8l%uQ zO&WF!6qOdxN;eu7Q-nHAUeckHnK(0P3kdECiu+2%6$MdLP?%OK@`LB_gMXCA`(~0R zX;Tm9uJ&d7>n z%9A~GP*{Z zrpyh7B^|a-)|8b<&(!>OhWQ08$LV}WQ`RD4Od8d3O-;%vhK7#W<7u;XvbxQo0JX@f zY(C0RS6^zcd>jo287k@<4tg;k3q5e5hLHE@&4ooC)S|`w7N|jm>3tns$G}U4o!(2g=!}xLHp?+qF zvj$ztd<%96=4tCKGG@ADSX{=mNZ@ho6rr?EOQ1(G2i@2;GXb&S#U3YtCuVwc*4rJc zPm$kZf2+|!X~X6%(QMj{4u)mZOi!(P(dF3hX4ra9l=RKQ$v(kJFS#;ib+z9K^#Gle z6LKa>&4oMFJ4C&NBJ7hhPSIjcOno$M6iq+l;ExpH9rF68@D3-EgCCf}JJSgVPbI1$ z?JjPPX!_88InA}KX&=#cFH#s3Ix<6LeY==wf5DK*jP`hqF%u+|sI)3HfyywfAj=0O zMNUX2pLR;T(8c+$g&}Z#q9L>(D~t~l&X^VFXp@&w92f8tq+KXMZ&o!an%$#uo^hJh z^9-RjEvqE_s%H8{qw(juo4?SC{YhO*`|H*ibxm%ZF6r=2QC)bE`d3oZ(~?;a-(mX)b!|i%p!VVP>DN6tg*Ry97gUPUJj<}OxaYL1nXE}h zxs-O{twImUw z43Eo6nJ4_RTDIQALB8H!3nq37cE6>oNG;jZZhXh!vORPsMKfzJ8_*?O7DfGmcrL8A z(_NAhSH+JE?u?`xR1|ZThDb;2Dt`9hC;UQ%94^20-MA*;<$KO0{3b&9y(ENIe@&xj z6>X23)Ftc?ax=4pL5FZ06CPOjgG%2*lbx;+sVm6EHifaku2RZ6dm2zO1s^4+O| zX?^Rl!e{47y>uJGVh+yEaNe$4U2tTYyJ3nqt9nkQP8+X`9>;yxHT1=;SB4=QU*?nq zndTZfT|OzWa_zE$8FPQtuK2+Z>H-NyCcc=wWX>wq$q7{vij#xqCQBclE;KU_SpRHh zW?)cb0G=uW2QHH@&UKOjUxp5p-v+$&z!*iIUwCrEeC5gh!qSr;%oC7--UiJO%g(@H zgQD=VC|Kd1c_uQ*S7+LyC@PW!E7G5DDhEzd%(QbXn4J;PQoYKo1+C zI4^v%{X#z$(3LimCoU9YO4kMJJG0PS25}<7q9LXMM{Esm6)13%7{fk7Wdx5wm$C1R5emYB+b4!_g{ zCYC2a7ogf;<2t!#hh+G05lGD55CT^#LlBoxIEo9C9q6 zV^AjZEfZsU6$%s=ojiXT+hlLxY4o6EhgiZ7JP-%P5cLSCVgnh(`W^-bB@{)=b3uwG zE!U6%u3dpFT>%EaE{d8bl@K+c6+w`+ju^dTU{F9&yQvzYmVNS(GoZm{D-R;bE=#wApMmV(yJpr(t7y*s2{B8_zE)_ yL|YQw3&NAZiu6_*%Ye#&V4x{Sc^DWpP)tgl235p9dFD!GE+Jk92JyL|;s5}0b2K*q delta 34555 zcmX7vV`H6d(}mmEwr$(CZQE$vU^m*aZQE(=WXEZ2+l}qF_w)XN>&rEBu9;)4>0JOD zo(HR^Mh47P)@z^^pH!4#b(O8!;$>N+S+v5K5f8RrQ+Qv0_oH#e!pI2>yt4ij>fI9l zW&-hsVAQg%dpn3NRy$kb_vbM2sr`>bZ48b35m{D=OqX;p8A${^Dp|W&J5mXvUl#_I zN!~GCBUzj~C%K?<7+UZ_q|L)EGG#_*2Zzko-&Kck)Qd2%CpS3{P1co1?$|Sj1?E;PO z7alI9$X(MDly9AIEZ-vDLhpAKd1x4U#w$OvBtaA{fW9)iD#|AkMrsSaNz(69;h1iM1#_ z?u?O_aKa>vk=j;AR&*V-p3SY`CI}Uo%eRO(Dr-Te<99WQhi>y&l%UiS%W2m(d#woD zW?alFl75!1NiUzVqgqY98fSQNjhX3uZ&orB08Y*DFD;sjIddWoJF;S_@{Lx#SQk+9 zvSQ-620z0D7cy8-u_7u?PqYt?R0m2k%PWj%V(L|MCO(@3%l&pzEy7ijNv(VXU9byn z@6=4zL|qk*7!@QWd9imT9i%y}1#6+%w=s%WmsHbw@{UVc^?nL*GsnACaLnTbr9A>B zK)H-$tB`>jt9LSwaY+4!F1q(YO!E7@?SX3X-Ug4r($QrmJnM8m#;#LN`kE>?<{vbCZbhKOrMpux zTU=02hy${;n&ikcP8PqufhT9nJU>s;dyl;&~|Cs+o{9pCu{cRF+0{iyuH~6=tIZXVd zR~pJBC3Hf-g%Y|bhTuGyd~3-sm}kaX5=T?p$V?48h4{h2;_u{b}8s~Jar{39PnL7DsXpxcX#3zx@f9K zkkrw9s2*>)&=fLY{=xeIYVICff2Id5cc*~l7ztSsU@xuXYdV1(lLGZ5)?mXyIDf1- zA7j3P{C5s?$Y-kg60&XML*y93zrir8CNq*EMx)Kw)XA(N({9t-XAdX;rjxk`OF%4-0x?ne@LlBQMJe5+$Ir{Oj`@#qe+_-z!g5qQ2SxKQy1ex_x^Huj%u+S@EfEPP-70KeL@7@PBfadCUBt%`huTknOCj{ z;v?wZ2&wsL@-iBa(iFd)7duJTY8z-q5^HR-R9d*ex2m^A-~uCvz9B-1C$2xXL#>ow z!O<5&jhbM&@m=l_aW3F>vjJyy27gY}!9PSU3kITbrbs#Gm0gD?~Tub8ZFFK$X?pdv-%EeopaGB#$rDQHELW!8bVt`%?&>0 zrZUQ0!yP(uzVK?jWJ8^n915hO$v1SLV_&$-2y(iDIg}GDFRo!JzQF#gJoWu^UW0#? z*OC-SPMEY!LYYLJM*(Qov{#-t!3Z!CfomqgzFJld>~CTFKGcr^sUai5s-y^vI5K={ z)cmQthQuKS07e8nLfaIYQ5f}PJQqcmokx?%yzFH*`%k}RyXCt1Chfv5KAeMWbq^2MNft;@`hMyhWg50(!jdAn;Jyx4Yt)^^DVCSu?xRu^$*&&=O6#JVShU_N3?D)|$5pyP8A!f)`| z>t0k&S66T*es5(_cs>0F=twYJUrQMqYa2HQvy)d+XW&rai?m;8nW9tL9Ivp9qi2-` zOQM<}D*g`28wJ54H~1U!+)vQh)(cpuf^&8uteU$G{9BUhOL| zBX{5E1**;hlc0ZAi(r@)IK{Y*ro_UL8Ztf8n{Xnwn=s=qH;fxkK+uL zY)0pvf6-iHfX+{F8&6LzG;&d%^5g`_&GEEx0GU=cJM*}RecV-AqHSK@{TMir1jaFf&R{@?|ieOUnmb?lQxCN!GnAqcii9$ z{a!Y{Vfz)xD!m2VfPH=`bk5m6dG{LfgtA4ITT?Sckn<92rt@pG+sk>3UhTQx9ywF3 z=%B0LZN<=6-B4+UbYWxfQUOe8cmEDY3QL$;mOw&X2;q9x9qNz3J97)3^jb zdlzkDYLKm^5?3IV>t3fdWwNpq3qY;hsj=pk9;P!wVmjP|6Dw^ez7_&DH9X33$T=Q{>Nl zv*a*QMM1-2XQ)O=3n@X+RO~S`N13QM81^ZzljPJIFBh%x<~No?@z_&LAl)ap!AflS zb{yFXU(Uw(dw%NR_l7%eN2VVX;^Ln{I1G+yPQr1AY+0MapBnJ3k1>Zdrw^3aUig*! z?xQe8C0LW;EDY(qe_P!Z#Q^jP3u$Z3hQpy^w7?jI;~XTz0ju$DQNc4LUyX}+S5zh> zGkB%~XU+L?3pw&j!i|x6C+RyP+_XYNm9`rtHpqxvoCdV_MXg847oHhYJqO+{t!xxdbsw4Ugn($Cwkm^+36&goy$vkaFs zrH6F29eMPXyoBha7X^b+N*a!>VZ<&Gf3eeE+Bgz7PB-6X7 z_%2M~{sTwC^iQVjH9#fVa3IO6E4b*S%M;#WhHa^L+=DP%arD_`eW5G0<9Tk=Ci?P@ z6tJXhej{ZWF=idj32x7dp{zmQY;;D2*11&-(~wifGXLmD6C-XR=K3c>S^_+x!3OuB z%D&!EOk;V4Sq6eQcE{UEDsPMtED*;qgcJU^UwLwjE-Ww54d73fQ`9Sv%^H>juEKmxN+*aD=0Q+ZFH1_J(*$~9&JyUJ6!>(Nj zi3Z6zWC%Yz0ZjX>thi~rH+lqv<9nkI3?Ghn7@!u3Ef){G(0Pvwnxc&(YeC=Kg2-7z zr>a^@b_QClXs?Obplq@Lq-l5>W);Y^JbCYk^n8G`8PzCH^rnY5Zk-AN6|7Pn=oF(H zxE#8LkI;;}K7I^UK55Z)c=zn7OX_XVgFlEGSO}~H^y|wd7piw*b1$kA!0*X*DQ~O` z*vFvc5Jy7(fFMRq>XA8Tq`E>EF35{?(_;yAdbO8rrmrlb&LceV%;U3haVV}Koh9C| zTZnR0a(*yN^Hp9u*h+eAdn)d}vPCo3k?GCz1w>OOeme(Mbo*A7)*nEmmUt?eN_vA; z=~2}K_}BtDXJM-y5fn^v>QQo+%*FdZQFNz^j&rYhmZHgDA-TH47#Wjn_@iH4?6R{J z%+C8LYIy>{3~A@|y4kN8YZZp72F8F@dOZWp>N0-DyVb4UQd_t^`P)zsCoygL_>>x| z2Hyu7;n(4G&?wCB4YVUIVg0K!CALjRsb}&4aLS|}0t`C}orYqhFe7N~h9XQ_bIW*f zGlDCIE`&wwyFX1U>}g#P0xRRn2q9%FPRfm{-M7;}6cS(V6;kn@6!$y06lO>8AE_!O z{|W{HEAbI0eD$z9tQvWth7y>qpTKQ0$EDsJkQxAaV2+gE28Al8W%t`Pbh zPl#%_S@a^6Y;lH6BfUfZNRKwS#x_keQ`;Rjg@qj zZRwQXZd-rWngbYC}r6X)VCJ-=D54A+81%(L*8?+&r7(wOxDSNn!t(U}!;5|sjq zc5yF5$V!;%C#T+T3*AD+A({T)#p$H_<$nDd#M)KOLbd*KoW~9E19BBd-UwBX1<0h9 z8lNI&7Z_r4bx;`%5&;ky+y7PD9F^;Qk{`J@z!jJKyJ|s@lY^y!r9p^75D)_TJ6S*T zLA7AA*m}Y|5~)-`cyB+lUE9CS_`iB;MM&0fX**f;$n($fQ1_Zo=u>|n~r$HvkOUK(gv_L&@DE0b4#ya{HN)8bNQMl9hCva zi~j0v&plRsp?_zR zA}uI4n;^_Ko5`N-HCw_1BMLd#OAmmIY#ol4M^UjLL-UAat+xA+zxrFqKc@V5Zqan_ z+LoVX-Ub2mT7Dk_ z<+_3?XWBEM84@J_F}FDe-hl@}x@v-s1AR{_YD!_fMgagH6s9uyi6pW3gdhauG>+H? zi<5^{dp*5-9v`|m*ceT&`Hqv77oBQ+Da!=?dDO&9jo;=JkzrQKx^o$RqAgzL{ zjK@n)JW~lzxB>(o(21ibI}i|r3e;17zTjdEl5c`Cn-KAlR7EPp84M@!8~CywES-`mxKJ@Dsf6B18_!XMIq$Q3rTDeIgJ3X zB1)voa#V{iY^ju>*Cdg&UCbx?d3UMArPRHZauE}c@Fdk;z85OcA&Th>ZN%}=VU%3b9={Q(@M4QaeuGE(BbZ{U z?WPDG+sjJSz1OYFpdImKYHUa@ELn%n&PR9&I7B$<-c3e|{tPH*u@hs)Ci>Z@5$M?lP(#d#QIz}~()P7mt`<2PT4oHH}R&#dIx4uq943D8gVbaa2&FygrSk3*whGr~Jn zR4QnS@83UZ_BUGw;?@T zo5jA#potERcBv+dd8V$xTh)COur`TQ^^Yb&cdBcesjHlA3O8SBeKrVj!-D3+_p6%P zP@e{|^-G-C(}g+=bAuAy8)wcS{$XB?I=|r=&=TvbqeyXiuG43RR>R72Ry7d6RS;n^ zO5J-QIc@)sz_l6%Lg5zA8cgNK^GK_b-Z+M{RLYk5=O|6c%!1u6YMm3jJg{TfS*L%2 zA<*7$@wgJ(M*gyTzz8+7{iRP_e~(CCbGB}FN-#`&1ntct@`5gB-u6oUp3#QDxyF8v zOjxr}pS{5RpK1l7+l(bC)0>M;%7L?@6t}S&a zx0gP8^sXi(g2_g8+8-1~hKO;9Nn%_S%9djd*;nCLadHpVx(S0tixw2{Q}vOPCWvZg zjYc6LQ~nIZ*b0m_uN~l{&2df2*ZmBU8dv`#o+^5p>D5l%9@(Y-g%`|$%nQ|SSRm0c zLZV)45DS8d#v(z6gj&6|ay@MP23leodS8-GWIMH8_YCScX#Xr)mbuvXqSHo*)cY9g z#Ea+NvHIA)@`L+)T|f$Etx;-vrE3;Gk^O@IN@1{lpg&XzU5Eh3!w;6l=Q$k|%7nj^ z|HGu}c59-Ilzu^w<93il$cRf@C(4Cr2S!!E&7#)GgUH@py?O;Vl&joXrep=2A|3Vn zH+e$Ctmdy3B^fh%12D$nQk^j|v=>_3JAdKPt2YVusbNW&CL?M*?`K1mK*!&-9Ecp~>V1w{EK(429OT>DJAV21fG z=XP=%m+0vV4LdIi#(~XpaUY$~fQ=xA#5?V%xGRr_|5WWV=uoG_Z&{fae)`2~u{6-p zG>E>8j({w7njU-5Lai|2HhDPntQ(X@yB z9l?NGoKB5N98fWrkdN3g8ox7Vic|gfTF~jIfXkm|9Yuu-p>v3d{5&hC+ZD%mh|_=* zD5v*u(SuLxzX~owH!mJQi%Z=ALvdjyt9U6baVY<88B>{HApAJ~>`buHVGQd%KUu(d z5#{NEKk6Vy08_8*E(?hqZe2L?P2$>!0~26N(rVzB9KbF&JQOIaU{SumX!TsYzR%wB z<5EgJXDJ=1L_SNCNZcBWBNeN+Y`)B%R(wEA?}Wi@mp(jcw9&^1EMSM58?68gwnXF` zzT0_7>)ep%6hid-*DZ42eU)tFcFz7@bo=<~CrLXpNDM}tv*-B(ZF`(9^RiM9W4xC%@ZHv=>w(&~$Wta%)Z;d!{J;e@z zX1Gkw^XrHOfYHR#hAU=G`v43E$Iq}*gwqm@-mPac0HOZ0 zVtfu7>CQYS_F@n6n#CGcC5R%4{+P4m7uVlg3axX}B(_kf((>W?EhIO&rQ{iUO$16X zv{Abj3ZApUrcar7Ck}B1%RvnR%uocMlKsRxV9Qqe^Y_5C$xQW@9QdCcF%W#!zj;!xWc+0#VQ*}u&rJ7)zc+{vpw+nV?{tdd&Xs`NV zKUp|dV98WbWl*_MoyzM0xv8tTNJChwifP!9WM^GD|Mkc75$F;j$K%Y8K@7?uJjq-w zz*|>EH5jH&oTKlIzueAN2926Uo1OryC|CmkyoQZABt#FtHz)QmQvSX35o`f z<^*5XXxexj+Q-a#2h4(?_*|!5Pjph@?Na8Z>K%AAjNr3T!7RN;7c)1SqAJfHY|xAV z1f;p%lSdE8I}E4~tRH(l*rK?OZ>mB4C{3e%E-bUng2ymerg8?M$rXC!D?3O}_mka? zm*Y~JMu+_F7O4T;#nFv)?Ru6 z92r|old*4ZB$*6M40B;V&2w->#>4DEu0;#vHSgXdEzm{+VS48 z7U1tVn#AnQ3z#gP26$!dmS5&JsXsrR>~rWA}%qd{92+j zu+wYAqrJYOA%WC9nZ>BKH&;9vMSW_59z5LtzS4Q@o5vcrWjg+28#&$*8SMYP z!l5=|p@x6YnmNq>23sQ(^du5K)TB&K8t{P`@T4J5cEFL@qwtsCmn~p>>*b=37y!kB zn6x{#KjM{S9O_otGQub*K)iIjtE2NfiV~zD2x{4r)IUD(Y8%r`n;#)ujIrl8Sa+L{ z>ixGoZJ1K@;wTUbRRFgnltN_U*^EOJS zRo4Y+S`cP}e-zNtdl^S5#%oN#HLjmq$W^(Y6=5tM#RBK-M14RO7X(8Gliy3+&9fO; zXn{60%0sWh1_g1Z2r0MuGwSGUE;l4TI*M!$5dm&v9pO7@KlW@j_QboeDd1k9!7S)jIwBza-V#1)(7ht|sjY}a19sO!T z2VEW7nB0!zP=Sx17-6S$r=A)MZikCjlQHE)%_Ka|OY4+jgGOw=I3CM`3ui^=o0p7u z?xujpg#dRVZCg|{%!^DvoR*~;QBH8ia6%4pOh<#t+e_u!8gjuk_Aic=|*H24Yq~Wup1dTRQs0nlZOy+30f16;f7EYh*^*i9hTZ`h`015%{i|4 z?$7qC3&kt#(jI#<76Biz=bl=k=&qyaH>foM#zA7}N`Ji~)-f-t&tR4^do)-5t?Hz_Q+X~S2bZx{t+MEjwy3kGfbv(ij^@;=?H_^FIIu*HP_7mpV)NS{MY-Rr7&rvWo@Wd~{Lt!8|66rq`GdGu% z@<(<7bYcZKCt%_RmTpAjx=TNvdh+ZiLkMN+hT;=tC?%vQQGc7WrCPIYZwYTW`;x|N zrlEz1yf95FiloUU^(onr3A3>+96;;6aL?($@!JwiQ2hO|^i)b4pCJ7-y&a~B#J`#FO!3uBp{5GG*Cni@K85&o0q~6#LtppE&cVY z3Bv{xQ-;i}LN-60B2*1suMd=Fi%Y|7@52axZ|b=Wiwk^5eg{9X4}(q%4D5N5_Gm)` zg~VyFCwfkIKW(@@ZGAlTra6CO$RA_b*yz#){B82N7AYpQ9)sLQfhOAOMUV7$0|d$=_y&jl>va$3u-H z_+H*|UXBPLe%N2Ukwu1*)kt!$Y>(IH3`YbEt; znb1uB*{UgwG{pQnh>h@vyCE!6B~!k}NxEai#iY{$!_w54s5!6jG9%pr=S~3Km^EEA z)sCnnau+ZY)(}IK#(3jGGADw8V7#v~<&y5cF=5_Ypkrs3&7{}%(4KM7) zuSHVqo~g#1kzNwXc39%hL8atpa1Wd#V^uL=W^&E)fvGivt)B!M)?)Y#Ze&zU6O_I?1wj)*M;b*dE zqlcwgX#eVuZj2GKgBu@QB(#LHMd`qk<08i$hG1@g1;zD*#(9PHjVWl*5!;ER{Q#A9 zyQ%fu<$U?dOW=&_#~{nrq{RRyD8upRi}c-m!n)DZw9P>WGs>o1vefI}ujt_`O@l#Z z%xnOt4&e}LlM1-0*dd?|EvrAO-$fX8i{aTP^2wsmSDd!Xc9DxJB=x1}6|yM~QQPbl z0xrJcQNtWHgt*MdGmtj%x6SWYd?uGnrx4{m{6A9bYx`m z$*UAs@9?3s;@Jl19%$!3TxPlCkawEk12FADYJClt0N@O@Pxxhj+Kk(1jK~laR0*KGAc7%C4nI^v2NShTc4#?!p{0@p0T#HSIRndH;#Ts0YECtlSR}~{Uck+keoJq6iH)(Zc~C!fBe2~4(Wd> zR<4I1zMeW$<0xww(@09!l?;oDiq zk8qjS9Lxv$<5m#j(?4VLDgLz;8b$B%XO|9i7^1M;V{aGC#JT)c+L=BgCfO5k>CTlI zOlf~DzcopV29Dajzt*OcYvaUH{UJPaD$;spv%>{y8goE+bDD$~HQbON>W*~JD`;`- zZEcCPSdlCvANe z=?|+e{6AW$f(H;BND>uy1MvQ`pri>SafK5bK!YAE>0URAW9RS8#LWUHBOc&BNQ9T+ zJpg~Eky!u!9WBk)!$Z?!^3M~o_VPERYnk1NmzVYaGH;1h+;st==-;jzF~2LTn+x*k zvywHZg7~=aiJe=OhS@U>1fYGvT1+jsAaiaM;) zay2xsMKhO+FIeK?|K{G4SJOEt*eX?!>K8jpsZWW8c!X|JR#v(1+Ey5NM^TB1n|_40 z@Db2gH}PNT+3YEyqXP8U@)`E|Xat<{K5K;eK7O0yV72m|b!o43!e-!P>iW>7-9HN7 zmmc7)JX0^lPzF#>$#D~nU^3f!~Q zQWly&oZEb1847&czU;dg?=dS>z3lJkADL1innNtE(f?~OxM`%A_PBp?Lj;zDDomdg zn+lVJBnzA5DamDVIk!-AoSMv~QchAOt&5fk#G=s!$FD}9rL0yDjwDkw<9>|UUuyVm z&o7y|6Ut5WI0!G$M?NiMUy%;s3ugPKJU_+B!Z$eMFm}A**6Z8jHg)_qVmzG-uG7bj zfb6twRQ2wVgd)WY00}ux=jqy@YH4ldI*;T^2iAk+@0u`r_Fu(hmc3}!u-Pb>BDIf{ zCNDDv_Ko`U@})TZvuE=#74~E4SUh)<>8kxZ=7`E?#|c zdDKEoHxbEq;VVpkk^b&~>-y`uO~mX=X0bmP!=F1G1YiluyeEg!D*8Fq-h=NyE-2S;^F6j=QMtUzN4oPedvc*q(BCpbg~*As!D@U z3(sz|;Pe1hn08P_cDQ(klZ6 z;P`q(5_V?*kJYBBrA1^yDgJD|)X1FV_*~sO>?8Sy~I9WdK5K8bc7aeNC zDb{Fe>y3N^{mrD1+GyH{F?@9}YQ2Om3t`nt zQ(}MS8M?6Vk>B=*j*yibz6QCdR=ALgTUcKx61){O@1WkPp-v$$4}e#KgK`HG~2@#A?`BF8em`ah6+8hH-DNA2>@02WWk9(fzhL_iz|~H~qEViQ(*{ zV;3tjb<%&r!whm6B`XtWmmrMWi=#ZO&`{h9`->HVxQ)^_oOS{W z!BzVRjdx5@pCXl#87ovlp<^QU;s<*d$)+|vI;Ai(!8Tjll^mi6!o~CpnlgZAK>6=V zm38^kT`D$_$v@UYeFyVhnsMZI1m`E&8<{V07>bBEI1=fg3cji*N?7pBzuamD`X|^^ zm!)2v?s|6T&H-_^y`KM&$!0!9tai9x&)5<(&sY6B`3D{$$KMAX3@&`SW;X0 zB-}obt^I;|#o_bR>eOv?P>=UC6CGTXIM+lSu?Uy+R9~O;q|c2+FafBP;E)B5M9HJgRIpF|GvRi*E+JTBI~T?T*X}r) zefUd*(+3n_YHZZS(g8)+7=pNV9QR^>Qs8t+iEpbJS!9;wio&9rn=19C0G#Ax zM-tWHp_YlJvXWsUqJUr^`OYFA4wkgL`cSOV;w4?tp>GT1jq}-qPoN zp&G}*;+#+Zh&vqDOp>gRL#^O7;s2yWqs+U4_+R4`{l9rEt-ud(kZ*JZm#0M{4K(OH zb<7kgkgbakPE=G&!#cNkvSgpU{KLkc6)dNU$}BQelv+t+gemD5;)F-0(%cjYUFcm{ zxaUt??ycI({X5Gkk@KIR$WCqy4!wkeO_j)?O7=lFL@zJDfz zrJJRDePaPzCAB)hPOL%05T5D*hq|L5-GG&s5sB97pCT23toUrTxRB{!lejfX_xg(y z;VQ+X91I;EUOB;=mTkswkW0~F$ zS%M}ATlKkIg??F?I|%gdYBhU(h$LqkhE!Xx$7kPS{2U4wLujF_4O+d8^ej{ zgSo(;vA)|(KT8R_n_aQ$YqDQaI9Stqi7u=+l~~*u^3-WsfA$=w=VX6H%gf!6X|O#X z*U6Wg#naq%yrf&|`*$O!?cS94GD zk}Gx%{UU!kx|HFb+{f(RA2h+t#A!32`fxL}QlXUM{QF3m&{=7+hz@aXMq*FirZk?W zoQ~ZCOx>S?o>3`+tC&N0x4R`%m)%O$b@BkW;6zE+aBzeYi47~78w$d~uypaV*p$kQ zJf34Q+pp~vg6)yeTT&qWbnR2|SifwK2gA7fzy#W(DyM^bdCjnee42Ws>5mM9W6_`j zC(|n5Fa&=MT$$@?p~)!IlLezYa}=Uw21^Fz-I#?_AOk(7Ttxm;#>RDD_9EloqhvrS z&7fpbd$q_e21Al+bcz|o{(^p}AG>jX0B}ZZRfzk$WLbNLC{y|lZ|&a(=bOE6Mxum{ zM=Nd+-I2A-N&2giWM2oAH`O&QecJn6%uYl0GWlpx&2*)BIfl3h&2E(>#ODt4oG}Dq z__73?sw2-TOWq@d&gmYKdh`a}-_6YQ5```}bEBEmWLj))O z?*eUM4tw0Cwrr+4Ml^9JkKW9e4|_^oal0*sS-u_Xovjo8RJ18x_m7v!j$eR@-{2(Y z?&K4ZR8^T{MGHL#C(+ZAs6&k}r07Xqo1WzaMLo9V;I<9a6jx2wH2qeU?kv25MJxoj zJKzX`Un|;_e&KY%R2jU~<5lm-`$EjIJLDP~11_5?&W#t3I{~+0Ze++pOh2B4c1Mde zSgj$ODQQm7gk&w{wwfE1_@V(g!C=2Hd%Gwj{{-_K4S|nZu+vk}@k(?&13iccsLkQo z_t8#Ah$HVB-MRyzpab*OHOp zl`$tEcUcF9_=3*qh8KTaW$znGztA7Obzb`QW5IQN+8XC=l%+$FVgZ|*XCU?G4w)}! zmEY+2!(!%R5;h`>W(ACqB|7`GTSp4{d)eEC8O)Mhsr$dQG}WVBk$aN1->sTSV7E)K zBqr;^#^bZJJX4E_{9gdPo8e?Ry>ZrE&qM)zF5z20DP0`)IIm_!vm&s2mzl z2;EPI{HgFH-Mp&fIL^6f74>19^>o^AOj`uyL0+Nb##Slvi9K4LQSs>f+$j?cn9Z__C zAkyZ9C;#uRi3cDYoTA>AT<|*pt{K70oZKG*S1F$r?KE=$4~W3!u53yUvh~(kMrClS zXC?Dmgv4iS`>~wBPJJFL_C8x2tEg*PCDX2=rHQ@z+Zs)Kkr;FYG`GnbUXqdipzvHE z1aZ>G6|e`}Q#)Kru0)(SZnUCN#dN2H zd1}r&xGsaAeEed9#?|0HzMGA7pl2=aehy_zsRV8RKV6+^I8woDd%4J8v9hs$x{ zl*V61wSumovRVWtetd1eJ%i^#z`_~~^B;aeuD`6LgHL66F0b^G5@om^&_3REtGmhz z%j^9{U`BH7-~P_>c_yu9sE+kk)|2`C)-ygYhR?g~gH`OK@JFAGg0O)ng-JzSZMjw< z2f&vA7@qAhrVyoz64A!JaTVa>jb5=I0cbRuTv;gMF@4bX3DVV#!VWZEo>PWHeMQtU!!7ptMzb{H ze`E4ZG!rr4A8>j2AK(A0Vh6mNY0|*1BbLhs4?>jmi6fRaQwed-Z?0d=eT@Hg zLS(%af5#q%h@txY2KaYmJBu>}ZESUv-G02~cJ-(ADz6u8rLVECbAR7+KV~a!DI83H zd!Z(Ekz%vjA-|%4-YpgfymMzxm_RjZg%ruo zT4^x)f*%Ufvg_n`&55cK;~QChP6~Fy_Z67HA`UtdW)@$Xk-2+|opk6A@y0~3Qb;V% z%+B@ArKl|Q^DJW&xuBZD#~SurH7XXf*uE0@|ccNd&MA%Ts*1 zg7TU!xY}~*AOY+tAnFR(Fu)e@^9V!Rm65$;G$-?6e%7w7p9WT098%-R?u#J+zLot@ z4H7R>G8;q~_^uxC_Z=-548YRA`r`CsPDL!^$v0Yy<^M=Jryxz5ZVR_<+qP}nwrxzi z-)Y;nZQHhO+db{>IrD$#DkHP%swyKhV(qn`H9~3h0Bd33H*DAP0S!ypZqPF^1^tZJ z{z;HN?$WJ5{0jQNzYOc|KbJ(Pr42~YhW5ohNdY*rEk=({8q+F}hy)&ziN(@q1;>jL zBN<9(k1N!p2D%uHF0NxFut`XwEMc@ZH-|95>U)PY@}C=bmV_*dakL}J5DUpNZi-y& z+{i0>H@c-g|DBO)HJ>7$VVtn)z3X}H`FuN-t>gcqLas?Lk@MJb5?u@BTn0Q}E(}S~ zXrNX`ysRv*iOn1v@fBDeSDvvR>+;o>kj ztRqEZOWN!fqp(`XQ3ppvC)c{AeyS6b_8pN1M*~0=$U;P31!~Px`Obrz;GNs(8RrJvONy<{Dk1x0z zJJzhQBt{J@&DP6cHugB!q?xi~O`yJYHUsTI zmgulx%I<*?vPSl(!tj;LL$K*k zH(*d31iyB9aYAzw49W&qDi0>f;b5kA31nz(%2W`QFJqaX0&hM`KP1gfdRw?7@}$XB z!^cUI%C!?X!QVQxbqEFSbuP0>_3MTCof6!e4LMAfGRd0;Lt+w0WK@b4EkGHRqX!h{ zrYxwwH&-fM67X7zP&Qpup&vAOaKH|S*pcbI{ksFg@tfw)paaK)5khkys0GSTnAtfC z{mVJkCXt|G-SYwt0O4dM8Hf{L*&^nOeQ271ECyc5Y&z5R0%hCq6~} z$XW$kcz!nnCTAl}NyB0#ikwyg_M};inG%*x38`EYJ%FXdj&A`g)-wJ(R=C`O^r{W` z8$1r{G0X4g`uD+}vw4`H5!*B8TTsmeaYGk3x0{&aar7ocO6?dlGbyV480<#{%^93y zF(ei<%{OYi?n?L9#HL_R-00#zRzbbwVnJ0zt}4f|KNBkT6&=Kb=$E(@aC03vU~p)7$XA@ zq5*`*4Y&u*=Ju>+x}q&Xxsjn;Dd)6Otudner9zi z<*LpeG}*vJ58#P4|qXF-ul1|u*;=-@oGPtmBnQW6VY9(s`5GMsO@!;s_PKo_? z3HbGokZ|vaAA-guf5W0JDwpV}1u8;7XJ=wD;NgcLIJW8S5w!c%O*zU0%~)0M)`!Al-+OFsmPW1zniB%fqF;klqxz`Y z2@srWa3e?B3ot|nhE|Q7VIjr+$D7F^n?wm5g8w?Ro0i72K3u^g)&&F^9~@eHd33YY z9LR!!orc0vq$sd~eR~hW{4?R3Di;~mz{^G1X?#-!|Cli(#0-sm|GHYpcab`ZA=zi3 z5*m>sJyOij{!PgIJa?A0%wL*Ur1fLJdJW$a>&Xj5p_IO=SwyTp@nn&@6L4vIfT79aPyo{LQ4DhIz1 z5g*+hII!(cLGHc5ROH&^^o=02r*x>MxMPx{JFMmNvzJ?AI8p!u_H8L1a`{6~bF@L* zxszth=`>%Vi`=E{jJKd-+6pf^vo93EzqFfTcr)A&V{rERu__UAQVyE1imol78AFmB z7T;pNFxW^M+O3#;Tz^e*`AqsD?M*wPT6pnBFPA^kOTnZYHr@O(JUQ^#6bD&CC*?HG zRAKSXYv9DU)L{V(wM=te@V@Db3}97Sn9r2nroOz06!qV=)+%EKB^MR_K}p$zM5OD1 zzhYv+?%A`7dBrU(#&1hXF;7lzH`nENZKP2I{qp^NxBA8~N>?1H@uZ~Do{d+|KYx9I z_z)J7O(;xu0%0n3o4y7LnJKRPK?RV@_v_YLogYPH;}`>cZmDVyO#%-IMQVq6z9r>@ z?*AQC$=?|aqrY8xGx%vfk0ZeByTz18IrP0XTVlJyRx5!NALYPyjcn|)U5jl^<)_KZ z2C?1|dkBZ;h8e#)3gUPfdf80xu^8evspE%Xf~x zs%phX&YuB{y}>%PuOG>s&EW}5Y0`dyseV)!C|`1(U{Nd4c4>07ZFmdTJS2T3+dEw8 zK%f_x!O?H8+_Qd>$DsYNY!?tC^H;N+!fQS{!4-9c^;uXx)D3|joo_FlBTTdDM4nx{ zPve})D_u{PG>&^G=>$2N-dZ!eMx?9X7FmPNo)7|>Z|A-mNZ0{+884L6=f-{Q4bN3y zAWL{oJIh(js2$bDTaV&bh4Fn=4^M?@N~+$IXxytdnI4{RkYA$8j(}sb2TO$~49JHz z0$K$WB@axSqKsyG>m7&3IVR+?xXLfs7ytuJHH8{`ewhkH;?H7#an)*hPiBLi22jAI z{|tZ;dU=nDUVyfIurEm0VoB6kiaK#ju6RV?{3qaV`NQ4&$)fc4AAVKiXu_1$86nxh zX)Mif*|y>N;S~7UCXQhs3-%nqNuTu>=8wqtp$-#tC?bwc-{&k&0>0nRBku-b5X931zqll&%fn$1$->@El+EIA;L zfEYJY)kaTI%H z{A%hpZ?Xt=;#(++B0e)B>4_a3E7h#8upWz!G;VQBX0rjzKvy9N2LECS2@wrBoS;4G z1PgI50DD!wtwsZ&JoAGuum9s&+0NI&_n}!kUTvpD{tyG9jlSXyQ)m9H8VXoDY$j!w zo;imjJKl;E5u|n4Q?HQsy`*&=VY`SG+YFUqG*+;A9(wKfm_|6^SWh_6>1u63)H3zEGm5Uk)#z>J0XC1L+&pzieqnAo+7zlr$M4kl;-h zjo^h7U5Y3tbY@(_{#h1et^{nbOP9Nw*tJOD;WejSG-4d{(2X$tDM@-rK8SbUqMe}%IPqxOV}m#%mq0)auvNwT2R9)$1-o(2o zpIS;qwy8m^tEBC99O}bYKd7ALbB~$d<=eGd>WML+U0aAl>{Uc8CB|oVWMt zbPe9+6&V{l2Th1)Jx`K64?gUC_<>x#Wk*SOSA<&A=j2q zo_M`Lznpsg1h-W546hm(q@Rf=xL@w5QJ;HxIp?O`;sOMovgc4n%D5`kiDO6%Rhe2^ zzPa=8pd(2&HN-=5JzsiJ^(ZlLVpZD^5!$(rt0PVLQCzh7s#6_N1dRKtQv_vTgSQT5 z63+e@K`67zjbb@QdwMNF8G29tcxAl36SZAGxolCj9aS%>(Tl*6a0eW@3j4!&d!12v z%+~Xc=>VJqBcW!D#JX3#yk4O^;#|O3!ol;J%t8>wc!*6`+`~%?-QE_M{wa&vg14R~ z(M1VT-&l-M(N1>3pNjVfvCIk}d|H4&*7{*8!W-;^tFgD31O%~NtUaK_*-m7CSEt}T zm^Z02X#cQ$Mcw}TG{>1I`vmvNoxujnPra4aSwP55x37=0VvyV<)68QB-b$o-h7p*V z#QQ8?A7`=m`*+dTfYdm=;i1ptR|In}rUF^r&{bKbI@5DT$JEo;?-N}Z13}n16v?G2 z{?@ny^7|!rg(on8b97#GupiPA<(g=o;@P`4 zEx06)SiGKkIKFHzK1M`ctf?vQV#b-{ws=+0U^*LYoTK*pu;A#NB$$I=Tv{LLVQin~ z@aGTp?J<(c_1M!Jr8MK;XA8fcB+*DkFF@oAhQ=B1o*$<@;ZdGs_5O!BKi8XjF2L4n zA&(?SaRDWm+p0UTFXj1prs!*v$(q+s=8S1h(*H8pd5*8%HGN0mgw3yvfsxr4QYT)o zzdjal^6zA56|Z@csYH^3Qr2~ZR#p|Huuh0Yt|$~>oQZJDF75aeH%UlQv)fQ=3P{i1 zRt99gL`$b61Q`pdos?W6yd&%2IWK#}$wWOa9wJW&($J4h0M|9sFtQu9k)ZtYEQ#vu zS+uD(3`7T~t?I;f%z8N~nG&FVwxGXrTL!k9s#LB}FSo;a+V-j}H^myGwQq@jTIycD zP5A{w+a;^kOQW^C%9W{j^&o@)3!v~U(?wx42E5G*bd82&a1p6ax|pk)#8nG9risCw zOERH8;tq?Q4ymxf*9_aF-sTpLvETwD#sB#ID1D+WohEt0s557Ij5)ldexY+diQJ*l ziBo;1v*vx(F|lI8udAo450QIQTmPqf(7oULr5*0dE9i>i#D&k%WyfM*4{*?_%9k>g zg1_1%x?#`Xm7M@YZ?!zJs$AxS&8sBLI@c|-vSiG<*OZyw>CL*p6#N~p z#VywqpWdZ;{ylc5d7W8E7Jx_H+5e#N$h#{ni@#TlGqz`yah-qCC_;P8?N*>CPJ03b ze(YVDvbIR$#lJEkuf}L7F8q$fKCWz&>{uFg9JgTOmA*Rux-{|#+pO`!s!!4;PlE%9ys+;|)oK%&V$*FH!G2%|y(zz>X zUwdXer0HIIJkelANg_W!ofsyiN{zi2=}G1UL{`V81}1D1Sz zviLV^w-$RE9fE4@H+ys>u;OY!sgqe&V-oFE9Fn$P9HbpOI{}esLIvc zV5S-9(XjFzn1qzo2owwg_d%7_)cR*!d&%@S&D($cFFMXXd!GdUxw5tZ_W@zRbjVfU zzx13(Hc!$teqA2WOYo^+SHpRz16DOcYqaXHSMZl2Ax$)f^WC??al8lfX9)O_p9#Ml}LB(N8yJ! zj&_UD9K54Rt#yqvhklEMZ3bRC&)(^h`#kzq-#_QN?J6eLT$ zMWG-mP;HkB@5;2*lAP&1*4C)HWEs{gtp15Y%y|*%(3UOMu*v4kTi0@pWvg2Y%7yI* z%XNlZa$@AZ(Z#Elv`5MUei~VFCjF8El)@g&>(v;E; z;laavf&ANfk9*0LA@oP4QmbCBF-lB^Mj~wo)eGG57gqAKC>Hd80Eb+7b;iJzV5RsL z8>ddQH8PnC;l{M(t4c$M=q78GW6=*d#c`-jK$q#-{9c)UNO4eLm9c!DWcCth4O-FU zboSKPhL-lq3q<)m8Xw7+l=Z)H=rGgMI0H?KrPjc;iDzY5g|Ve$8?SE`8*sb1u*>dm zD~f9~j2H~6Oo2`_1 zq@_mmUbFQV25E7XJ)zBRQktT12@qHHy-@aCdAFWv4iZVN0B3}E;k(jg>X|eqOrqgM z4yBUuA*BHdnN9v;5>3#L$NFREyHW&Q*rWYa_q zhC~>M&bMFgXC6AeQ`P-s<}Ot_x^cb51r7ArPbRRs&Dd_TEeugnjR(O#V5i6OYjzRF zw1@Rvo;_wEfQA@P%I^9ljrhxxuqf9g^cWSKq~+kiVxa`&EBDqmB=C1G+XB7`TQeiV zR_k?`$&W&+ntIPeEtM9hqcj|yfW>x7&1Ht1@;!d#Wo%1hO+^Q{E?VD|`-OvV9G?tp;6{sI%L-u)Hw z;|`uN6~VqZ!g~K#B@W7?wDcbO?XS4hnW9kS1Hbi=U_m*~7`N~3oK;qFTX$$LQ#CkL z6I?a(HkF8SKJU8mT{K35ekfP3`05!M{gmrV0E-=IyqP=N;K<&jOnPcjdXrbk$%)z9cUe|#I0unK5^+qGx8#2 zz_!bmzVG*Uat*&f4P>&sV2RswlITV}wPz?_;(S;19}e}54fP|K5l_c2kU5(-Zh!7t zz=B2HktD~ap{s%*CDEl?x6o+91T-xH895-S1}M=*KhFM7Nm&1$OB++Robv0T`OBcJ zXNX%Xio0_ryjr)!Osc7au35UM`B}Ru4zN_o+C!+s&e7|}Zc;5?whP$@J@DE`>w-XH zlVmbrI4|-Z^2^I^EzuYKD+JA@8lx%>aLFZq7KT1~lAu}8cj$<-JJ4ljkcSA;{PNr)d-6P5Z!6Q=t!t*8%X)a|;_92=XXN=WMV))*gWR-wHzU(G6FPTfSjd9) zm8e1mfj4qFmlXO*a3};$&jgc$nfG>NR&iao(jYk`%E75h=K~dJ{Jqs%UH|aGHL8)-1MOyS2B?OJsyeA_YbGMDpE+>=NFcyoI;N z>1>3G4QR2~EP{L{x2e@E1U0jGGV5H$aeigDq&Dr zQ3FwJ+& zndX7VK+XD)t06uUY=)Cfo!ke%uDpOmq^bpEB`iv6(CKTGgEZUi4ddfNXJi_z4;)ob z?R+qj2SYX*zi8z=DXChEEDW+Cy>w-0agE|A7MoRJ4}-(|go-rP#sr%a(5k%wV z&Jllj+6XuSoIfZX9|mK!bbd)7TuaHBvoa(`9C$*XUh}hH1;Q7cTJQR)c>h}Hfr$aS z64c7#D^f{mN3s#2=SEf1$(*Vj{vZjF6Qc{a=VbTske7L^EY&A1I1sgXaYSH7(lF1V zZ<7`Rq33WZuu`!HK$wRr1=uE}#&JMftnZ&(P17gWF;>$TA&$ZQnIz>blTrW@49Z&H9yhgLBpFw(57K1dbIQW4fn1X(IiFWEKmPzV8gAa|ak)HAsmcQ7stP|q0hEzBNL=4YdXEkyfS zF+K+CVB#~(qd7eeZqR-VKIYJVmK2ePk``4I^PfQ*C7NUR z`w9lb?iHv2$4_p-+a+O}Fq6SnPiz>aV!~d=l3VdgDuwAPMR9eR`)b_`lg~{oX0lf1(zbBrnj4+-q zOl^#`)XKn=`()B-jExviKVTYrAKa27KAg3cboG+}D6*R;<`GC-b?i=e;aV7n(}XDS zK5xAEV=T^r#eThV+3C<^H>SuvAP&fw;Yn67eY%4=Y(p$~!`~h12 zQHM|f0#pQP_s$Q+TtMMvBdjQbLWw9cW?gl_+P z)2T94UJaYG2!yXITYjYl-@#5_47g{N|5=P~m|e}-F)*^L+{7O$#wv2e##5Y=A{>jN z6NhQSor9ulwP3gfxTF?V`P7AJ#E)ij$I`gc2fnmp&9w6qS2-Ct}6 z$#O%mKtP>I2VUBMt^Xm3LjP*D=xEyV?|8Psb91ZEj=gM(C3^Kcfvbx*$NK+MhP>W;OneZ{Q>eFEmxv}%ZCJ32=zr_OZd>6~v@ z6+3JzX%9qOvKS393r&R9O+te&#?{Q9nLkOV-eLg9!{WK}WyUWLZ7bQ5u26*u9c*T1 z_s1)j1k5&b8&5@YnmtS{tsmQaLW2%8D*8G-9w#PcVQh6sQY`!tBpU=8EZR!zfB{f{ za<+Err#ZNM4JEx5n9!zuC#KmeI*%tRXP}jpswzymT7J{YpXdzA{J7K)j1tBF8B3DL zZXkec{`rT_{__t_`!E7veO1rg1tFzVeUTBjut*3ZOq}A$r%sWXn4v4|rA+7uMvy9n zL~2WHKLg$BeD2Wq%?frTUM^c}?K?3#L+Q2-?PR+e1Fn-XUThl8^}8JOyDZz-wcFh5 zYJCJ%J_Pf~bX(0A?Z4hGw(mY?J$j#Vo&@9O>in*f)*`H6&(Z-5xx5}$V@dR)-lxgN z=DMA_EJO4+^w_+D7N>4=%{6AbvpDG<(b)xE5Ezo~oEg~cEM?mwyY?3ZtFE;RyDS`u z(^sa_s%B<)vktqh=1|?Uv6DXsA`D^B9%_mXqx1C=a#KurOE?49)P_ixiHAA)D)oqEjQ6_v0UC9mTtMu&kf8&7uRiiigPD{$Cf(&DuOj0 zr*5{zPyO@Kq(|Ttu@wxKanV=^OPOjh-_$MbNz})ou6*9nq_XQo86WJ@JN~-b=Ln_8>Nz_ZS#QpRGt+bzH*-;{#x7PFqie+ z7p5e})fcDq)J2z=z~%nrFGFjbVu~0ICDHW3=HgtCW)?Z(%Cx$z!QuszcOCe&3!Al2 z`793RnB{Jj4QpQ2N#oKT>aY~aNxz_6B2&vPdJadbC4qp#H^<@o50}m>7WR?NO0$ZI z9OKTM+jxMFWX9mi7(@j)1Ji6~?HLU!KT0Y5a^-?|XH^B?R@T zn&a_U_XFAsGrNX@S~g1<=uz@~dCcZO=1??VC@PML{g}lbuN?j|_1S=dJgbT~o}}hs zP_uYZ&0+mWY1fupe(+6nn6<9-)Xluk97yX-!!lqSXq~!kL-=+4$Dy>O$sKO7M^1QY zhZGZfiNQu+?sef?E>5sqj$kHmf;kMv<>Gu)!^4!#7T009vBzq(m2aoHu#+93HBq7T z;Fs8IHvUlmxCB2hkDbm&xwFQcXUD_&sdeu|EYhFpf7v5_LCcVua9aunVe)qoGmyg# zIGlj&IrLKg=id@t7s916d&Gf(%X7^FFR9^bz-;*o1~Sa=`cKfJ0i}X+pBKN=?}!dP zg`ZMtP6xSuvHb=5HYH%ELaGxwqH{ zpY>Ic^}J!OwM!VmNM!$nUg$qN9DLtKuBvn1(x-P+tA*UHoOc727>5?^J;JFo_ac@) zU57%w^U2ME z@z^ZsB!AhyOscE8;~Ft$)NL)GcLteq4d32fw??L0QuWt_M9IJMgZ71Jm%2khx|QN+ zkm4zQ@OjyM+l=Rv(!k?%cYwnf7HWs^M+P^zo5o?7;E)V0v*zf}(;?ms0oUK)wKmZY)mSTGN4X@2=ZU!Gy73M(ftmHJHLFKQDcu`d% zeqiW{G`?}AtEP zKCnHuWzXZ_Hc>{cP@h~M$#q}kG{52%zmhATR3AbNGR~*6(%^Gs@UZ3i%7%PJ1mB^S zcdcrFDbD6lEJGZ4k6JT;eB_JbgIkkOqkz0I{q`d^kWl6a!%w4V?Y!;8%uU(-UA4Ti z{pv2+5CN^ba{ALpu1&qm`sMP@_L=-a)@-zC1*`f)uV5MU$xJj51%?S^ zoo@;kqY@4Zw0B!+hIvTT8KK*~9H@u54r>s{MX_|#z`Z$55bDJo#=hz~k)7CTbf>Gn z=!u;@JViT~(>P7UDdIOL;6kPDzOZNl16jLo5tHS4a%~T&AlicnCwZ5pZ;+WIB3tJE zv|J^!X0Kb|8njISx#zoB(Pv#!6=D}Uq(6Dg*ll##3kfDxdHdBXN*8dZOM0I{eLTO4 z=L}zF35GJX4Wee`#h=aCB+ZV0xcaZiLCH3bOFYTmEn0qf?uC#lOPC7>+nVeO1KQ@S zcZ5Z0gfk8hH03QrC@NnEKNi15bWP;FEKsGi0iUHN4L&2_auv%tIM}UFfgRyp5HWt()pn#0P9+xF2H!8zMqf`WJ*9YB zq~m+%xLtVjza4>CO4*%thB2k;Gv1Ani%8)IP6Pm^BAigXgOUHWcQDEgB??AtdsOx5 z+pXKfU4>+8ViRUJ;h()e88jRLEzSN7%O|=MovCW3@VxK@Z*xS$WLG=u_Nenb0wP@Y z6zs##uQ7oFvcSdh5?6kZ!%8l$Xuz^Rc!lv4q?e$mv(=#@x)s_VFF50vGuE_Nr{4zXB>y?7FOMC5^sBZr`mS*t_@%LYN9wl z+lsqD#V5JR63GEr9^&9*f)kFs zJ-A(>>!h~d0%9*wd+AY+&oryzurfV{QP{&-AtDs}#iq;dal?A9jE;huq2gExb3z+- zVQB@UHlVfsy1$)dF`dcZuc(GLnim09jrI9nJ6<#=03FVrkuINg2`RTPloS^^@KYD6 z1-C-Oj2OI0y9Tdx>=dNHhOYVvx!J#4EMhold-PGClLuLA~k2VDl6cPuV4lI5c(w9@7sllth~H@)0+v~XYqqC6&*fSX~S4Bii^0& z=M)D(5FoZsKxB&M$J_7lbS>$kF=@B|Z$#D|LHJQIr$aO51ta6s96Ug*Jk;|>9Yd$! zoF2W+)lFzY)J<>U$PHwbe9>BKLAeo~e%=Qy#qhvK&`)b2 z(U9#8bba`eGr9tr$SvM4`y`lLavOzPm`l<%-(R<1urb(AX0RE=R=#&QI)klkwrJ5%D5YHZ!~s zGwK?zKZeX|uO*Y|xLjO#6uzO%iXWsSE8#zLOWc! z&2L8sdT;bhUW495)_fGCcOLM-@DfGcb1xjf(ezYJxYOv<7YE$lBCrkbfBA{`I(GH- z(yHy1h=bg~fE$aIbB_3l`|p$R_p0b(+aL(~b<-Am9H@?s!T2*7{+*Vj?pCpV5&WJO z*GbW%PLj|(hbd!fQK5Y-kgDHV!-I$y6G>Y|&uo9+79v}}$s=l$>#F-_F{TjUn~-!M zBN>n)@(LkzI0Sg?f1s}uBZi`wRB}ywU7wqq-PwaS%3nitaXb{&Q=x!xvOPfiQmmkd zWpe2@y7?wbI;hF|hlqf@x+3@a4$wLdJ1PZBoRc9oRGgdM+vm*;5XBZcMZ+@4_{aPUS|`NsD4YP2JUM zZEvA&!QLB$K*%gHy~y-RVs-C zkN^usP)S1pZXjj)nugy#?&vpiE^DS|QlhiBOc?nC$9CK}Ze)ihI{p-m$pgYV^5L~B zQTU>)x*fvKCNK*9j$@Gyt@@I2LF8c7YvDJDCf%1h0zVyNg7E~R$`6JE1EQk~-c1xG zE@xT)TesWHs}ny!5_7F_AyGL9K?Q~mP?>Vs!(oWZR42kf?*iTV*h5>tnzpljZL8IR zb7}l8q%Ckfh{^e3k^3pQMk=gLu60`Ja8HdkzVbeAU*exs*ajmRVp}O}l)TqX!?G7e z{4-~g?Gq%~)IJJ7p1k*WSnL3jqECe1OU}5nirS66_-$3FzMT5t3X zg{jgP^5?%zb(vMa!S|1cOYk4W!vG2KKd{YFIbPCk3_74HL`fWJASs{fxpzY@$(}Q- zK5I4TKS~`mfiDoDOm;XycF6mi|K|+d=lh=@U?9_V)BDDaZAnEw43`Ls1677I-+uFi zG?^$Fbc*pPun65{D!fH=3Oyp$WZAY!{JhzaUtIgYCWXf@)AkTa@x4xGjp0c zs7@JB012~&;z=SMbCp8d=Ga{l0(iwx<@o(f!OwmyH-gBN6wewq7A_h)oKg)koFPft zNfdie%F63S?rGDQR(N=bPuK>G0t^ax$0P8`N_cvR8rOf(O9T7$9#5!B;#!XUpLZXu z5C(OESAmE*2+hV}!bg$4K%`cQHBk!>##tW>1RbC%am`*|5IbvoLh!BqpAi2OmdXqf zHp%|!N;d!LN_26809n^14YVJJBe7aL87U~>HZ)VK%d|rZp(~zwNH#VGuX!vfal&Vv z-c)h33DOB@xl*~m5ZZ22sVRK>8I9+)QMVtsAB>r~SMkGMZaQ;Xi|?~Xxnmx;cYwYx z^nNxRxGcq7I!sO#b%$!0vQ(OqXm6T4mTilvMlYj|*i|=MK%kT2df;bZGW@NrgeX>( zf7eBsjJv}pNuEuHPEs42>}a`ut-O9lZDNh)_CsBpeHKvPKnpcWh^bC2QtnB5a4qy) zSrZhafuAkk5{yiM|zdiecKh zuc2R;6^;@i07fmepeofAJdX*knDzBA{3tyVYu6z#z;Lsi&x_bzzLEpfXtH*NrY_G`= z^X!;eI#hV*mmjjEOlo{TxQwSdUv0P$!Qvijpv9plBI@FUU#RJ)8Vn1ZGA$ATqF&s= zvcTS>Z8pepd>k=sjPY^3fpCB@aW8$Oq%fW;R?GpYoT@ki@N#2LxgTk1dYZHNrk@lx z7=yYr0FT$I>z~I0nXpPp$t3)}D?2^<@KWH#E{irFy2`)5r{AyvWHYzn`5@h;GVj0@ zJ@1fbD9gX=vQNR7PG5i}jFE}9#!;ote)FHdW?VVe6v4dWEz(R?!HC4KeVde*DGr=F zRotamm=!I~=_{|m;mCI4#5{C3_gBXan1<>!K!8O|)&K?O_L`}=uKCJ-s&+!XTk?wi z%Bwa_&k>4}`a` zFCG!c^Cdj#Bc2z2PXBCW$G)<%9X6;oZiigwvMLXQ$0f+2bKDCKCGR*cG>+;UTQ2bj z(2r#Od&Ulv*{?U~hq`j8W&8aggxHo<6*$&cDG#k;GS?mLx0^7mda35tz zHTnFA6vB^rczV1Ai8I&XyJX?jiEcQ}n;PYCl~EUPIxF@V%#c7LW`44<>ezAiG>1ff zeOSeCd#PW2z5z+<4Y?Qc#tb&+uH++5^G@!BaaDeVN8x=3ZB{R=Z5e+zf&13+nz{l% z{{#>B^OaIK}1Xh z;}?)W)sfwuf~?Ov1!oiQ-@WVG>D#(JL4Ob-h*l`y&hBY*!EkULKFdt9+VGJ?E=r85 zl*~dE)e4&l8Fdq`I@T2BAme(u7_)}y$TNu^lWWK-M8UQ(ZuBcA(qHG3; z&7bO_w9Cp!REZ3VB`&kfYOCmrNQxu7pbLoFkf)9Jkas&36ZnTBL?~cDug+T3bw?o! z$U-GUnOTkujjaB8vxcenWsZ4UrH*vMmACDj!95aG?gE5-g<6v8X9%kXThF|rP(0eu za*9aK6%^Qu4oyr(1t4hqmPX~~L7tB(;C{DH&MWDzUG+6I(;TGeM)jR#hK~O13LRwk zRc2;#m|qsRADyxC<6XC8u+lvVXoH+-HNTQXImy0_oM&D=ngI3OP?c>&k8&P2iV%hg zq{#n%P=0$dYJ2o$clJWqpVH&Q;S5Hv`T0-)mU2aa$XL#RH`0~|_g zmmfHkP7#d=iuiU1lL&5T+egS~-01WrWiiA=({_yWBnY@x5eX}`?y?3Xdic;`1dn5T zxTwLw{;Qt1MSWowZ}r+U?8Q+R46Avz>o>^}4zhvZaa_*Jd(2A!dP8ah=_*lh!W#a~ zNUm{^sD#HbDq!m*EK}(GzVn4N2GeNpEp8Z<_tctC_id9X=Irqhb_{b^H;~}qwZI&F z3t^MPXp4BuDv9@1Kr3*u zZ|&i`IKW!_Rv5(CaTJBndmX9B{YL8HJ2}u)`_>#J_-m{T-xpj%|2|{xmnVF#+X3=* zY*5{hDkk6M{+!Ved>d}mD@q^#{3qo9ZYb-+75cj*gH%I+d=}E+qSCK>vj4p z81UxB7>Gz}5QU^Pv-AJ*EHMW3g`EwB^^}ps>1E2$#r*H_{O{u)J@@1m$?Pu=va`3n z?so1N_WbU8U+4Nb|AN$Gv|%%33+!xpvv3iSLv&=qIUrD|3^*|rn7cNTWHgpaH0mTS zbXS-J>ZVOG~>BOwxVSa1sk6ivguYJD`$YgKkB!awl#vZ1NenaIidf zIo;H>3%L>R^l(kGI`c9&1a9H-s~68yw>3t6~N-Bv<9hyv4@0XlT|13}n_wh4#^(`bgWSiUFD z?SO{pz~eEqAvU|UZ-MPN$ZoAzAm@B5l}5B&MB(X&#FQ{BiwixOTe9@pn>F;%(9zOZ zly7ELHP0wS+Ikfr4P>I383O6E%8Ps6HYh5VLs3+bL1$J`TkTm6$wnI&{gh;r(^g9_ zB1RO-zhYoFDSl^oIQ*3Sm`H4%TTjHtuLbN&=j+P%iuVlxfEi zjsZUV9XdHY8m9muB8q5Vz z(`L%J6y+JTwbc>-nW(k@1!b!V8X7{S8M4^jErN(9CY}WtZ%l(hygPSA0+WuRy2zYP z{I1rh;dEB2eq9TUxCz{Gyr5B`eQAc=V{W%c+@W5W-mHRf!`2j21`y@SR^7Oz6_2Pt zkOomwUO=FaWS0^zE_8fOUJ%bwuxpLG@_{*8@bC&b7t2Op`l< z@kNX+GMUc*Zm2{Mv|>~c3<+pti9iF4V#K8sFm1soxJDi@ z0hJgP6;T1hrbc}rAns8Ko;#S9v5&XknRCva_O>&b{J*(Da_#Ad?20`5$%Xl&Puge2 zx?l9eH%e}NIwyYKT%Sue)L;7I7JYB)tpVNP7pm4j0n6@>Y|3y<8rov)IM#WzE@P_p zpPF3p<9y7UBK}GHof5CwW07klGghQ%{IeT#5013G-@n^&IFHZTJJ6g~ zCL1d0jcUJO-+8y)#+Wl0=`qCJo^!~ia8$-;rOBE~#*_zRZ*s~5n>IEYEtin@n6TMCEC;3v*irJ77~dTlkH+Ea~ni&gW~z zEBWCpC22aJfc1md!}q~j@)~H{%|IZpVtGYMh}wWjmPAVGFG{e*)g0Ukf*24y3)BXV zL{F7d(CXNXPzVFQlu~e}UL~fsmSnqLDoUS5FIMR1VZnVc3TinGDcHznFA6zTs<73? z4WUqG_@f*^v&jR_Q>a63^$bI30RuiF&nnl+1=px4kSzi_XB+AxOARqt@H;ZXlCce# zxlDYVFRiA{;DaYx(}XclB2S^eT1Q#1;p=9y6{`}J_sm<1Th)5PG zzzBlA<6+TFhl2c=Jl_@yJ}518aXJd2YFCAVu-7TMwT$KZefT7 zs5NxjtWvoM1u)bqHBp$PBs0RBf))u;m?bp>hDT6vTw&Lr!dBTtgj5XtcKJWphk_H; zeH09+T|vQZQ8Efz6lS0!cG`T`QE*MzYzhh@C0zhrg|>NSMAtY9%Huc+TF>Ppkl@@zX1imQDFMlS23i7E;Qs+kyyrF{7O&UZxN+ z-QgiSOj1$l30gw2$s1etFkp1{tI8Eq=&i{Q(-jkZqNBkxHjo*)Mn|Eg=J}ZZ*M!@$ m8X&e#V;O~v<{(@8u;?|riGH1;*CyBcIM_}B>Hc%VBjPV`^lBFX diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index f5bdef81deb70..39a291b258efb 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.8-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=f8b4f4772d302c8ff580bc40d0f56e715de69b163546944f787c87abf209c961 +distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 diff --git a/gradlew b/gradlew index 1aa94a4269074..f5feea6d6b116 100755 --- a/gradlew +++ b/gradlew @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# ############################################################################## # @@ -55,7 +57,7 @@ # Darwin, MinGW, and NonStop. # # (3) This script is generated from the Groovy template -# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt # within the Gradle project. # # You can find Gradle at https://github.com/gradle/gradle/. @@ -84,7 +86,8 @@ done # shellcheck disable=SC2034 APP_BASE_NAME=${0##*/} # Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) -APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit +APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s +' "$PWD" ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD=maximum diff --git a/gradlew.bat b/gradlew.bat index 7101f8e4676fc..9b42019c7915b 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -13,6 +13,8 @@ @rem See the License for the specific language governing permissions and @rem limitations under the License. @rem +@rem SPDX-License-Identifier: Apache-2.0 +@rem @if "%DEBUG%"=="" @echo off @rem ########################################################################## diff --git a/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java b/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java index 8d8a4c7895339..52162e3df0c1c 100644 --- a/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java +++ b/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java @@ -473,4 +473,17 @@ public void testPublicApiWithProtectedInterface() { assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); } + + /** + * The constructor arguments have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi} + */ + public void testPublicApiConstructorAnnotatedInternalApi() { + final CompilerResult result = compile("PublicApiConstructorAnnotatedInternalApi.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } } diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/InternalApiAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/InternalApiAnnotated.java index 9996ba8b736aa..b0b542e127285 100644 --- a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/InternalApiAnnotated.java +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/InternalApiAnnotated.java @@ -8,9 +8,9 @@ package org.opensearch.common.annotation.processor; -import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.annotation.InternalApi; -@PublicApi(since = "1.0.0") +@InternalApi public class InternalApiAnnotated { } diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorAnnotatedInternalApi.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorAnnotatedInternalApi.java new file mode 100644 index 0000000000000..d355a6b770391 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorAnnotatedInternalApi.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiConstructorAnnotatedInternalApi { + /** + * The constructors have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi} + */ + @InternalApi + public PublicApiConstructorAnnotatedInternalApi(NotAnnotated arg) {} +} diff --git a/libs/core/licenses/jackson-core-2.17.1.jar.sha1 b/libs/core/licenses/jackson-core-2.17.1.jar.sha1 deleted file mode 100644 index 82dab5981e652..0000000000000 --- a/libs/core/licenses/jackson-core-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.17.2.jar.sha1 b/libs/core/licenses/jackson-core-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..e15f2340980bc --- /dev/null +++ b/libs/core/licenses/jackson-core-2.17.2.jar.sha1 @@ -0,0 +1 @@ +969a35cb35c86512acbadcdbbbfb044c877db814 \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.12.0-snapshot-847316d.jar.sha1 b/libs/core/licenses/lucene-core-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..e3fd1708ea428 --- /dev/null +++ b/libs/core/licenses/lucene-core-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +51ff4940eb1024184bbaa5dae39695d2392c5bab \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.12.0-snapshot-c896995.jar.sha1 b/libs/core/licenses/lucene-core-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index 299283562fddc..0000000000000 --- a/libs/core/licenses/lucene-core-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -826b328c37ea7f27c05d685db03bf8d2b00457ff \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index da43894863432..28cb989185ff7 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -106,7 +106,9 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_14_1 = new Version(2140199, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_2_15_0 = new Version(2150099, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_2_15_1 = new Version(2150199, org.apache.lucene.util.Version.LUCENE_9_10_0); - public static final Version V_2_16_0 = new Version(2160099, org.apache.lucene.util.Version.LUCENE_9_11_0); + public static final Version V_2_16_0 = new Version(2160099, org.apache.lucene.util.Version.LUCENE_9_11_1); + public static final Version V_2_16_1 = new Version(2160199, org.apache.lucene.util.Version.LUCENE_9_11_1); + public static final Version V_2_17_0 = new Version(2170099, org.apache.lucene.util.Version.LUCENE_9_11_1); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_12_0); public static final Version CURRENT = V_3_0_0; diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractXContentParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractXContentParser.java index 4efaacecd0e67..4605aa684db1c 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractXContentParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractXContentParser.java @@ -375,7 +375,7 @@ private static void skipToListStart(XContentParser parser) throws IOException { } } - // read a list without bounds checks, assuming the the current parser is always on an array start + // read a list without bounds checks, assuming the current parser is always on an array start private static List readListUnsafe(XContentParser parser, Supplier> mapFactory) throws IOException { assert parser.currentToken() == Token.START_ARRAY; ArrayList list = new ArrayList<>(); diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/filtering/FilterPath.java b/libs/core/src/main/java/org/opensearch/core/xcontent/filtering/FilterPath.java index 5389538a8c7dd..b8da9787165f8 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/filtering/FilterPath.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/filtering/FilterPath.java @@ -46,7 +46,6 @@ public class FilterPath { static final FilterPath EMPTY = new FilterPath(); - private final String filter; private final String segment; private final FilterPath next; @@ -99,32 +98,29 @@ public static FilterPath[] compile(Set filters) { List paths = new ArrayList<>(); for (String filter : filters) { - if (filter != null) { + if (filter != null && !filter.isEmpty()) { filter = filter.trim(); if (filter.length() > 0) { - paths.add(parse(filter, filter)); + paths.add(parse(filter)); } } } return paths.toArray(new FilterPath[0]); } - private static FilterPath parse(final String filter, final String segment) { - int end = segment.length(); - - for (int i = 0; i < end;) { - char c = segment.charAt(i); + private static FilterPath parse(final String filter) { + // Split the filter into segments using a regex + // that avoids splitting escaped dots. + String[] segments = filter.split("(?= 0; i--) { + // Replace escaped dots with actual dots in the current segment. + String segment = segments[i].replaceAll("\\\\.", "."); + next = new FilterPath(filter, segment, next); } - return new FilterPath(filter, segment.replaceAll("\\\\.", "."), EMPTY); + + return next; } @Override diff --git a/libs/core/src/test/java/org/opensearch/core/xcontent/filtering/FilterPathTests.java b/libs/core/src/test/java/org/opensearch/core/xcontent/filtering/FilterPathTests.java index 0c5a17b70a956..d3191609f6119 100644 --- a/libs/core/src/test/java/org/opensearch/core/xcontent/filtering/FilterPathTests.java +++ b/libs/core/src/test/java/org/opensearch/core/xcontent/filtering/FilterPathTests.java @@ -35,6 +35,7 @@ import org.opensearch.common.util.set.Sets; import org.opensearch.test.OpenSearchTestCase; +import java.util.HashSet; import java.util.Set; import static java.util.Collections.singleton; @@ -369,4 +370,20 @@ public void testMultipleFilterPaths() { assertThat(filterPath.getSegment(), is(emptyString())); assertSame(filterPath, FilterPath.EMPTY); } + + public void testCompileWithEmptyString() { + Set filters = new HashSet<>(); + filters.add(""); + FilterPath[] filterPaths = FilterPath.compile(filters); + assertNotNull(filterPaths); + assertEquals(0, filterPaths.length); + } + + public void testCompileWithNull() { + Set filters = new HashSet<>(); + filters.add(null); + FilterPath[] filterPaths = FilterPath.compile(filters); + assertNotNull(filterPaths); + assertEquals(0, filterPaths.length); + } } diff --git a/libs/grok/src/main/java/org/opensearch/grok/Grok.java b/libs/grok/src/main/java/org/opensearch/grok/Grok.java index 7aa3347ba4f4b..aa5b1a936b99d 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/opensearch/grok/Grok.java @@ -37,14 +37,18 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; +import java.util.Deque; +import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Stack; +import java.util.Set; import java.util.function.Consumer; import org.jcodings.specific.UTF8Encoding; @@ -86,6 +90,7 @@ public final class Grok { UTF8Encoding.INSTANCE, Syntax.DEFAULT ); + private static final int MAX_PATTERN_DEPTH_SIZE = 500; private static final int MAX_TO_REGEX_ITERATIONS = 100_000; // sanity limit @@ -128,7 +133,7 @@ private Grok( expressionBytes.length, Option.DEFAULT, UTF8Encoding.INSTANCE, - message -> logCallBack.accept(message) + logCallBack::accept ); List captureConfig = new ArrayList<>(); @@ -144,7 +149,7 @@ private Grok( */ private void validatePatternBank() { for (String patternName : patternBank.keySet()) { - validatePatternBank(patternName, new Stack<>()); + validatePatternBank(patternName); } } @@ -156,33 +161,84 @@ private void validatePatternBank() { * a reference to another named pattern. This method will navigate to all these named patterns and * check for a circular reference. */ - private void validatePatternBank(String patternName, Stack path) { - String pattern = patternBank.get(patternName); - boolean isSelfReference = pattern.contains("%{" + patternName + "}") || pattern.contains("%{" + patternName + ":"); - if (isSelfReference) { - throwExceptionForCircularReference(patternName, pattern); - } else if (path.contains(patternName)) { - // current pattern name is already in the path, fetch its predecessor - String prevPatternName = path.pop(); - String prevPattern = patternBank.get(prevPatternName); - throwExceptionForCircularReference(prevPatternName, prevPattern, patternName, path); - } - path.push(patternName); - for (int i = pattern.indexOf("%{"); i != -1; i = pattern.indexOf("%{", i + 1)) { - int begin = i + 2; - int syntaxEndIndex = pattern.indexOf('}', begin); - if (syntaxEndIndex == -1) { - throw new IllegalArgumentException("Malformed pattern [" + patternName + "][" + pattern + "]"); + private void validatePatternBank(String initialPatternName) { + Deque stack = new ArrayDeque<>(); + Set visitedPatterns = new HashSet<>(); + Map> pathMap = new HashMap<>(); + + List initialPath = new ArrayList<>(); + initialPath.add(initialPatternName); + pathMap.put(initialPatternName, initialPath); + stack.push(new Frame(initialPatternName, initialPath, 0)); + + while (!stack.isEmpty()) { + Frame frame = stack.peek(); + String patternName = frame.patternName; + List path = frame.path; + int startIndex = frame.startIndex; + String pattern = patternBank.get(patternName); + + if (visitedPatterns.contains(patternName)) { + stack.pop(); + continue; + } + + visitedPatterns.add(patternName); + boolean foundDependency = false; + + for (int i = startIndex; i < pattern.length(); i++) { + if (pattern.startsWith("%{", i)) { + int begin = i + 2; + int syntaxEndIndex = pattern.indexOf('}', begin); + if (syntaxEndIndex == -1) { + throw new IllegalArgumentException("Malformed pattern [" + patternName + "][" + pattern + "]"); + } + + int semanticNameIndex = pattern.indexOf(':', begin); + int end = semanticNameIndex == -1 ? syntaxEndIndex : Math.min(syntaxEndIndex, semanticNameIndex); + + String dependsOnPattern = pattern.substring(begin, end); + + if (dependsOnPattern.equals(patternName)) { + throwExceptionForCircularReference(patternName, pattern); + } + + if (pathMap.containsKey(dependsOnPattern)) { + throwExceptionForCircularReference(patternName, pattern, dependsOnPattern, path.subList(0, path.size() - 1)); + } + + List newPath = new ArrayList<>(path); + newPath.add(dependsOnPattern); + pathMap.put(dependsOnPattern, newPath); + + stack.push(new Frame(dependsOnPattern, newPath, 0)); + frame.startIndex = i + 1; + foundDependency = true; + break; + } } - int semanticNameIndex = pattern.indexOf(':', begin); - int end = syntaxEndIndex; - if (semanticNameIndex != -1) { - end = Math.min(syntaxEndIndex, semanticNameIndex); + + if (!foundDependency) { + pathMap.remove(patternName); + stack.pop(); + } + + if (stack.size() > MAX_PATTERN_DEPTH_SIZE) { + throw new IllegalArgumentException("Pattern references exceeded maximum depth of " + MAX_PATTERN_DEPTH_SIZE); } - String dependsOnPattern = pattern.substring(begin, end); - validatePatternBank(dependsOnPattern, path); } - path.pop(); + } + + private static class Frame { + String patternName; + List path; + int startIndex; + + Frame(String patternName, List path, int startIndex) { + this.patternName = patternName; + this.path = path; + this.startIndex = startIndex; + } } private static void throwExceptionForCircularReference(String patternName, String pattern) { @@ -192,13 +248,13 @@ private static void throwExceptionForCircularReference(String patternName, Strin private static void throwExceptionForCircularReference( String patternName, String pattern, - String originPatterName, - Stack path + String originPatternName, + List path ) { StringBuilder message = new StringBuilder("circular reference in pattern ["); message.append(patternName).append("][").append(pattern).append("]"); - if (originPatterName != null) { - message.append(" back to pattern [").append(originPatterName).append("]"); + if (originPatternName != null) { + message.append(" back to pattern [").append(originPatternName).append("]"); } if (path != null && path.size() > 1) { message.append(" via patterns [").append(String.join("=>", path)).append("]"); @@ -217,9 +273,7 @@ private String groupMatch(String name, Region region, String pattern) { int begin = region.getBeg(number); int end = region.getEnd(number); return new String(pattern.getBytes(StandardCharsets.UTF_8), begin, end - begin, StandardCharsets.UTF_8); - } catch (StringIndexOutOfBoundsException e) { - return null; - } catch (ValueException e) { + } catch (StringIndexOutOfBoundsException | ValueException e) { return null; } } diff --git a/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java b/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java index a37689e051c67..8476d541aa46e 100644 --- a/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java +++ b/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java @@ -377,6 +377,16 @@ public void testCircularReference() { "circular reference in pattern [NAME5][!!!%{NAME1}!!!] back to pattern [NAME1] " + "via patterns [NAME1=>NAME2=>NAME3=>NAME4]", e.getMessage() ); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new TreeMap<>(); + for (int i = 1; i <= 501; i++) { + bank.put("NAME" + i, "!!!%{NAME" + (i + 1) + "}!!!"); + } + String pattern = "%{NAME1}"; + new Grok(bank, pattern, false, logger::warn); + }); + assertEquals("Pattern references exceeded maximum depth of 500", e.getMessage()); } public void testMalformedPattern() { diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java new file mode 100644 index 0000000000000..2f33eb513c165 --- /dev/null +++ b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm; + +import java.security.BasicPermission; + +/** + * Permission to utilize methods in the ThreadContext class that are normally not accessible + * + * @see ThreadGroup + * @see SecureSM + */ +public final class ThreadContextPermission extends BasicPermission { + + /** + * Creates a new ThreadContextPermission object. + * + * @param name target name + */ + public ThreadContextPermission(String name) { + super(name); + } + + /** + * Creates a new ThreadContextPermission object. + * This constructor exists for use by the {@code Policy} object to instantiate new Permission objects. + * + * @param name target name + * @param actions ignored + */ + public ThreadContextPermission(String name, String actions) { + super(name, actions); + } +} diff --git a/libs/task-commons/build.gradle b/libs/task-commons/build.gradle new file mode 100644 index 0000000000000..dde3acd8effcf --- /dev/null +++ b/libs/task-commons/build.gradle @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +dependencies { + api project(':libs:opensearch-common') + + testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testImplementation "junit:junit:${versions.junit}" + testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" + testImplementation(project(":test:framework")) { + exclude group: 'org.opensearch', module: 'opensearch-task-commons' + } +} + +tasks.named('forbiddenApisMain').configure { + replaceSignatureFiles 'jdk-signatures' +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskListRequest.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskListRequest.java new file mode 100644 index 0000000000000..625e15dfb3b6d --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskListRequest.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.clients; + +import org.opensearch.task.commons.task.TaskStatus; +import org.opensearch.task.commons.task.TaskType; +import org.opensearch.task.commons.worker.WorkerNode; + +/** + * Request object for listing tasks + */ +public class TaskListRequest { + + /** + * Filters listTasks response by specific task status' + */ + private TaskStatus[] taskStatus; + + /** + * Filter listTasks response by specific task types + */ + private TaskType[] taskTypes; + + /** + * Filter listTasks response by specific worker node + */ + private WorkerNode workerNodes; + + /** + * Depicts the start page number for the list call. + * + * @see TaskManagerClient#listTasks(TaskListRequest) + */ + private int startPageNumber; + + /** + * Depicts the page size for the list call. + * + * @see TaskManagerClient#listTasks(TaskListRequest) + */ + private int pageSize; + + /** + * Default constructor + */ + public TaskListRequest() {} + + /** + * Update task types to filter with in the request + * @param taskTypes TaskType[] + * @return ListTaskRequest + */ + public TaskListRequest taskType(TaskType... taskTypes) { + this.taskTypes = taskTypes; + return this; + } + + /** + * Update task status to filter with in the request + * @param taskStatus TaskStatus[] + * @return ListTaskRequest + */ + public TaskListRequest taskType(TaskStatus... taskStatus) { + this.taskStatus = taskStatus; + return this; + } + + /** + * Update worker node to filter with in the request + * @param workerNode WorkerNode + * @return ListTaskRequest + */ + private TaskListRequest workerNode(WorkerNode workerNode) { + this.workerNodes = workerNode; + return this; + } + + /** + * Update page number to start with when fetching the list of tasks + * @param startPageNumber startPageNumber + * @return ListTaskRequest + */ + public TaskListRequest startPageNumber(int startPageNumber) { + this.startPageNumber = startPageNumber; + return this; + } + + /** + * Update page size for the list tasks response + * @param pageSize int + * @return ListTaskRequest + */ + public TaskListRequest pageSize(int pageSize) { + this.pageSize = pageSize; + return this; + } +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskManagerClient.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskManagerClient.java new file mode 100644 index 0000000000000..23ad8eabdc365 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskManagerClient.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.clients; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.task.commons.task.Task; +import org.opensearch.task.commons.task.TaskId; +import org.opensearch.task.commons.worker.WorkerNode; + +import java.util.List; + +/** + * Client used to interact with Task Store/Queue. + * + * TODO: TaskManager can be something not running an opensearch process. + * We need to come up with a way to allow this interface to be used with in and out opensearch as well + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface TaskManagerClient { + + /** + * Get task from TaskStore/Queue + * + * @param taskId TaskId of the task to be retrieved + * @return Task corresponding to TaskId + */ + Task getTask(TaskId taskId); + + /** + * Update task in TaskStore/Queue + * + * @param task Task to be updated + */ + void updateTask(Task task); + + /** + * Mark task as cancelled. + * Ongoing Tasks can be cancelled as well if the corresponding worker supports cancellation + * + * @param taskId TaskId of the task to be cancelled + */ + void cancelTask(TaskId taskId); + + /** + * List all tasks applying all the filters present in listTaskRequest + * + * @param taskListRequest TaskListRequest + * @return list of all the task matching the filters in listTaskRequest + */ + List listTasks(TaskListRequest taskListRequest); + + /** + * Assign Task to a particular WorkerNode. This ensures no 2 worker Nodes work on the same task. + * This API can be used in both pull and push models of task assignment. + * + * @param taskId TaskId of the task to be assigned + * @param node WorkerNode task is being assigned to + * @return true if task is assigned successfully, false otherwise + */ + boolean assignTask(TaskId taskId, WorkerNode node); +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskProducerClient.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskProducerClient.java new file mode 100644 index 0000000000000..18f3421f9aa97 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskProducerClient.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.clients; + +import org.opensearch.task.commons.task.Task; + +/** + * Producer interface used to submit new tasks for execution on worker nodes. + */ +public interface TaskProducerClient { + + /** + * Submit a new task to TaskStore/Queue + * + * @param task Task to be submitted for execution on offline nodes + */ + void submitTask(Task task); +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskWorkerClient.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskWorkerClient.java new file mode 100644 index 0000000000000..59abe189434dd --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskWorkerClient.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.clients; + +import org.opensearch.task.commons.task.Task; +import org.opensearch.task.commons.task.TaskId; + +import java.util.List; + +/** + * Consumer interface used to find new tasks assigned to a {@code WorkerNode} for execution. + */ +public interface TaskWorkerClient { + + /** + * List all tasks assigned to a WorkerNode. + * Useful when the implementation uses a separate store for Task assignments to Worker nodes + * + * @param taskListRequest TaskListRequest + * @return list of all tasks assigned to a WorkerNode + */ + List getAssignedTasks(TaskListRequest taskListRequest); + + /** + * Sends task heart beat to Task Store/Queue + * + * @param taskId TaskId of Task to send heartbeat for + * @param timestamp timestamp of heartbeat to be recorded in TaskStore/Queue + */ + void sendTaskHeartbeat(TaskId taskId, long timestamp); + +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/package-info.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/package-info.java similarity index 71% rename from plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/package-info.java rename to libs/task-commons/src/main/java/org/opensearch/task/commons/clients/package-info.java index 3cb9cacf7fd1c..1329f3888248c 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/package-info.java +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/package-info.java @@ -7,6 +7,6 @@ */ /** - * Listeners for Query Insights + * Contains task client related classes */ -package org.opensearch.plugin.insights.core.listener; +package org.opensearch.task.commons.clients; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/package-info.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/package-info.java similarity index 72% rename from plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/package-info.java rename to libs/task-commons/src/main/java/org/opensearch/task/commons/package-info.java index 7164411194f85..4cb773ace62ce 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/package-info.java +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/package-info.java @@ -7,6 +7,6 @@ */ /** - * Query Insights exporter + * Contains offline tasks related classes */ -package org.opensearch.plugin.insights.core.exporter; +package org.opensearch.task.commons; diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/Task.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/Task.java new file mode 100644 index 0000000000000..7ad567b57bd42 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/Task.java @@ -0,0 +1,361 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.task.commons.worker.WorkerNode; + +/** + * A Background Task to be run on Offline Node. + */ +@ExperimentalApi +public class Task { + + /** + * Task identifier used to uniquely identify a Task + */ + private final TaskId taskId; + + /** + * Depicts latest state of the Task + */ + private final TaskStatus taskStatus; + + /** + * Various params to used for Task execution + */ + private final TaskParams params; + + /** + * Type/Category of the Task + */ + private final TaskType taskType; + + /** + * Worker Node on which the Task is to be executed + */ + private final WorkerNode assignedNode; + + /** + * Timestamp at which the Task was created + */ + private final long createdAt; + + /** + * Timestamp at which the Task was assigned to a worker + */ + private final long assignedAt; + + /** + * Timestamp at which the Task was started execution on worker + */ + private final long startedAt; + + /** + * Timestamp at which the Task was either completed/failed/cancelled + */ + private final long completedAt; + + /** + * Timestamp at which last heartbeat was sent by the worker + */ + private final long lastHeartbeatAt; + + /** + * Constructor for Task + * + * @param taskId Task identifier + * @param taskStatus Task status + * @param params Task Params + * @param taskType Task Type + * @param createdAt Timestamp at which the Task was created + * @param assignedAt Timestamp at which the Task was assigned to a worker + * @param startedAt Timestamp at which the Task was started execution on worker + * @param completedAt Timestamp at which the Task was either completed/failed/cancelled + * @param lastHeartbeatAt Timestamp at which last heartbeat was sent by the worker + * @param assignedNode Worker Node on which the Task is to be executed + */ + public Task( + TaskId taskId, + TaskStatus taskStatus, + TaskParams params, + TaskType taskType, + long createdAt, + @Nullable long assignedAt, + @Nullable long startedAt, + @Nullable long completedAt, + @Nullable long lastHeartbeatAt, + @Nullable WorkerNode assignedNode + ) { + this.taskId = taskId; + this.taskStatus = taskStatus; + this.params = params; + this.taskType = taskType; + this.createdAt = createdAt; + this.assignedAt = assignedAt; + this.startedAt = startedAt; + this.completedAt = completedAt; + this.lastHeartbeatAt = lastHeartbeatAt; + this.assignedNode = assignedNode; + } + + /** + * Get TaskId + * @return TaskId + */ + public TaskId getTaskId() { + return taskId; + } + + /** + * Get TaskStatus + * @return TaskStatus + */ + public TaskStatus getTaskStatus() { + return taskStatus; + } + + /** + * Get TaskParams + * @return TaskParams + */ + public TaskParams getParams() { + return params; + } + + /** + * Get TaskType + * @return TaskType + */ + public TaskType getTaskType() { + return taskType; + } + + /** + * Get Task Creation Time + * @return createdAt + */ + public long getCreatedAt() { + return createdAt; + } + + /** + * Get Task Assignment Time + * @return assignedAt + */ + public long getAssignedAt() { + return assignedAt; + } + + /** + * Get Task Start Time + * @return startedAt + */ + public long getStartedAt() { + return startedAt; + } + + /** + * Get Task Completion Time + * @return completedAt + */ + public long getCompletedAt() { + return completedAt; + } + + /** + * Get Last Heartbeat Time + * @return lastHeartbeatAt + */ + public long getLastHeartbeatAt() { + return lastHeartbeatAt; + } + + /** + * Get Task Assigned Node + * @return assignedNode + */ + public WorkerNode getAssignedNode() { + return assignedNode; + } + + /** + * Builder class for Task. + */ + public static class Builder { + /** + * Task identifier used to uniquely identify a Task + */ + private final TaskId taskId; + + /** + * Depicts latest state of the Task + */ + private TaskStatus taskStatus; + + /** + * Various params to used for Task execution + */ + private final TaskParams params; + + /** + * Type/Category of the Task + */ + private final TaskType taskType; + + /** + * Type/Category of the Task + */ + private WorkerNode assignedNode; + + /** + * Timestamp at which the Task was created + */ + private final long createdAt; + + /** + * Timestamp at which the Task was assigned to a worker + */ + private long assignedAt; + + /** + * Timestamp at which the Task was started execution on worker + */ + private long startedAt; + + /** + * Timestamp at which the Task was either completed/failed/cancelled + */ + private long completedAt; + + /** + * Timestamp at which last heartbeat was sent by the worker + */ + private long lastHeartbeatAt; + + /** + * Constructor for Task Builder + * + * @param taskId Task identifier + * @param taskStatus Task status + * @param params Task Params + * @param taskType Task Type + * @param createdAt Task Creation Time + */ + private Builder(TaskId taskId, TaskStatus taskStatus, TaskParams params, TaskType taskType, long createdAt) { + this.taskId = taskId; + this.taskStatus = taskStatus; + this.params = params; + this.taskType = taskType; + this.createdAt = createdAt; + } + + /** + * Build Builder from Task + * @param task Task to build from + * @return Task.Builder + */ + public static Builder builder(Task task) { + Builder builder = new Builder( + task.getTaskId(), + task.getTaskStatus(), + task.getParams(), + task.getTaskType(), + task.getCreatedAt() + ); + builder.assignedAt(task.getAssignedAt()); + builder.startedAt(task.getStartedAt()); + builder.completedAt(task.getCompletedAt()); + builder.lastHeartbeatAt(task.getLastHeartbeatAt()); + builder.assignedNode(task.getAssignedNode()); + return builder; + } + + /** + * Build Builder from various Task attributes + * @param taskId Task identifier + * @param taskStatus TaskStatus + * @param params TaskParams + * @param taskType TaskType + * @param createdAt Task Creation Time + * @return Task.Builder + */ + public static Builder builder(TaskId taskId, TaskStatus taskStatus, TaskParams params, TaskType taskType, long createdAt) { + return new Builder(taskId, taskStatus, params, taskType, createdAt); + } + + /** + * Set Task Assignment Time + * @param assignedAt Timestamp at which the Task was assigned to a worker + */ + public void assignedAt(long assignedAt) { + this.assignedAt = assignedAt; + } + + /** + * Set Task Start Time + * @param startedAt Timestamp at which the Task was started execution on worker + */ + public void startedAt(long startedAt) { + this.startedAt = startedAt; + } + + /** + * Set Task Completion Time + * @param completedAt Timestamp at which the Task was either completed/failed/cancelled + */ + public void completedAt(long completedAt) { + this.completedAt = completedAt; + } + + /** + * Set Task Last Heartbeat Time for the task + * @param lastHeartbeatAt Timestamp at which last heartbeat was sent by the worker + */ + public void lastHeartbeatAt(long lastHeartbeatAt) { + this.lastHeartbeatAt = lastHeartbeatAt; + } + + /** + * Update the Task Status + * @param taskStatus {@link TaskStatus} - current status of the Task + */ + public void taskStatus(TaskStatus taskStatus) { + this.taskStatus = taskStatus; + } + + /** + * Set Task Assigned Node + * @param node Worker Node on which the Task is to be executed + */ + public void assignedNode(WorkerNode node) { + this.assignedNode = node; + } + + /** + * Build Task from Builder + * @return Task + */ + public Task build() { + return new Task( + taskId, + taskStatus, + params, + taskType, + createdAt, + assignedAt, + startedAt, + completedAt, + lastHeartbeatAt, + assignedNode + ); + } + } +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskId.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskId.java new file mode 100644 index 0000000000000..7fb7e1536dc17 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskId.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Objects; + +/** + * Class encapsulating Task identifier + */ +@ExperimentalApi +public class TaskId { + + /** + * Identified of the Task + */ + private final String id; + + /** + * Constructor to initialize TaskId + * @param id String value of Task id + */ + public TaskId(String id) { + this.id = id; + } + + /** + * Get id value + * @return id + */ + public String getValue() { + return id; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + TaskId other = (TaskId) obj; + return this.id.equals(other.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskParams.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskParams.java new file mode 100644 index 0000000000000..ac6126a02cf24 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskParams.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Base class for all TaskParams implementation of various TaskTypes + */ +@ExperimentalApi +public abstract class TaskParams { + + /** + * Default constructor + */ + public TaskParams() {} +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskStatus.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskStatus.java new file mode 100644 index 0000000000000..e44e5c7fee620 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskStatus.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Task status enum + */ +@ExperimentalApi +public enum TaskStatus { + + /** + * TaskStatus of a Task which is not yet assigned to or picked up by a worker + */ + UNASSIGNED, + + /** + * TaskStatus of a Task which is assigned to or picked up by a worker but hasn't started execution yet. + * This status confirms that a worker will execute this task and no other worker should pick it up. + */ + ASSIGNED, + + /** + * TaskStatus of an in progress Task + */ + ACTIVE, + + /** + * TaskStatus of a finished Task + */ + SUCCESS, + + /** + * TaskStatus of a Task which failed in 1 or more attempts + */ + FAILED, + + /** + * TaskStatus of a cancelled Task + */ + CANCELLED +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskType.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskType.java new file mode 100644 index 0000000000000..ef267a86e3da4 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskType.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Enum for task type + */ +@ExperimentalApi +public enum TaskType { + /** + * For all segment merge related tasks + */ + MERGE, + + /** + * For all snapshot related tasks + */ + SNAPSHOT +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/package-info.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/package-info.java similarity index 73% rename from plugins/query-insights/src/main/java/org/opensearch/plugin/insights/package-info.java rename to libs/task-commons/src/main/java/org/opensearch/task/commons/task/package-info.java index 04d1f9bfff7e1..e99e4079cc7af 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/package-info.java +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/package-info.java @@ -7,6 +7,6 @@ */ /** - * Base Package of Query Insights + * Contains tasks related classes */ -package org.opensearch.plugin.insights; +package org.opensearch.task.commons.task; diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/TaskWorker.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/TaskWorker.java new file mode 100644 index 0000000000000..6d9fd1fed23e4 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/TaskWorker.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.worker; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.task.commons.task.Task; + +/** + * Task Worker that executes the Task + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface TaskWorker { + + /** + * Execute the Task + * + * @param task Task to be executed + */ + void executeTask(Task task); + +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/WorkerNode.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/WorkerNode.java new file mode 100644 index 0000000000000..1b9426d472184 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/WorkerNode.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.worker; + +import java.util.Objects; + +/** + * Represents a worker node in the fleet + */ +public class WorkerNode { + /** + * The unique identifier of the worker node. + */ + private final String id; + + /** + * The name of the worker node. + */ + private final String name; + + /** + * The IP address of the worker node. + */ + private final String ip; + + /** + * Creates a new worker node with the given ID, name, and IP address. + * + * @param id The unique identifier of the worker node. + * @param name The name of the worker node. + * @param ip The IP address of the worker node. + */ + private WorkerNode(String id, String name, String ip) { + this.id = id; + this.name = name; + this.ip = ip; + } + + /** + * Creates a new worker node with the given ID, name, and IP address. + * + * @param id The unique identifier of the worker node. + * @param name The name of the worker node. + * @param ip The IP address of the worker node. + * @return The created worker node. + */ + public static WorkerNode createWorkerNode(String id, String name, String ip) { + return new WorkerNode(id, name, ip); + } + + /** + * Returns the unique identifier of the worker node. + * + * @return The ID of the worker node. + */ + public String getId() { + return id; + } + + /** + * Returns the name of the worker node. + * + * @return The name of the worker node. + */ + public String getName() { + return name; + } + + /** + * Returns the IP address of the worker node. + * + * @return The IP address of the worker node. + */ + public String getIp() { + return ip; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + WorkerNode that = (WorkerNode) o; + return Objects.equals(id, that.id) && Objects.equals(name, that.name) && Objects.equals(ip, that.ip); + } + + @Override + public int hashCode() { + return Objects.hash(id, name, ip); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/package-info.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/package-info.java similarity index 71% rename from plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/package-info.java rename to libs/task-commons/src/main/java/org/opensearch/task/commons/worker/package-info.java index f3152bbf966cb..d74fa30e8b661 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/package-info.java +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/package-info.java @@ -7,6 +7,6 @@ */ /** - * Settings for Query Insights Plugin + * Contains task worker related classes */ -package org.opensearch.plugin.insights.settings; +package org.opensearch.task.commons.worker; diff --git a/libs/task-commons/src/test/java/org/opensearch/task/commons/mocks/MockTaskParams.java b/libs/task-commons/src/test/java/org/opensearch/task/commons/mocks/MockTaskParams.java new file mode 100644 index 0000000000000..21d2a3dd725b6 --- /dev/null +++ b/libs/task-commons/src/test/java/org/opensearch/task/commons/mocks/MockTaskParams.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.mocks; + +import org.opensearch.task.commons.task.TaskParams; + +public class MockTaskParams extends TaskParams { + + private final String value; + + public MockTaskParams(String mockValue) { + super(); + value = mockValue; + } + + public String getValue() { + return value; + } +} diff --git a/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskIdTests.java b/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskIdTests.java new file mode 100644 index 0000000000000..e58382e9fc056 --- /dev/null +++ b/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskIdTests.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.test.OpenSearchTestCase; + +/** + * Tests for {@link TaskId} + */ +public class TaskIdTests extends OpenSearchTestCase { + + public void testConstructorAndGetValue() { + TaskId taskId = new TaskId("123"); + assertEquals("123", taskId.getValue()); + } + + public void testEqualsWithSameId() { + TaskId taskId1 = new TaskId("456"); + TaskId taskId2 = new TaskId("456"); + assertEquals(taskId1, taskId2); + } + + public void testEqualsWithDifferentId() { + TaskId taskId1 = new TaskId("789"); + TaskId taskId2 = new TaskId("987"); + assertNotEquals(taskId1, taskId2); + } + + public void testEqualsWithNull() { + TaskId taskId = new TaskId("abc"); + assertNotEquals(null, taskId); + } + + public void testEqualsWithDifferentClass() { + TaskId taskId = new TaskId("def"); + assertNotEquals(taskId, new Object()); + } + + public void testHashCode() { + TaskId taskId1 = new TaskId("456"); + TaskId taskId2 = new TaskId("456"); + assertEquals(taskId1.hashCode(), taskId2.hashCode()); + + TaskId taskId3 = new TaskId("4567"); + assertNotEquals(taskId1.hashCode(), taskId3.hashCode()); + } +} diff --git a/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskTests.java b/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskTests.java new file mode 100644 index 0000000000000..37c5f543daa02 --- /dev/null +++ b/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskTests.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.task.commons.mocks.MockTaskParams; +import org.opensearch.task.commons.worker.WorkerNode; +import org.opensearch.test.OpenSearchTestCase; + +/** + * Test for {@link Task} + */ +public class TaskTests extends OpenSearchTestCase { + + public void testTaskConstructorAndGetters() { + TaskId taskId = new TaskId("123"); + TaskStatus taskStatus = TaskStatus.UNASSIGNED; + TaskParams params = new MockTaskParams("mock"); + TaskType taskType = TaskType.MERGE; + long createdAt = System.currentTimeMillis(); + long assignedAt = createdAt + 1000; + long startedAt = createdAt + 2000; + long completedAt = createdAt + 3000; + long lastHeartbeatAt = createdAt + 2500; + WorkerNode assignedNode = WorkerNode.createWorkerNode("node1", "nodeip", "nodename"); + + Task task = new Task( + taskId, + taskStatus, + params, + taskType, + createdAt, + assignedAt, + startedAt, + completedAt, + lastHeartbeatAt, + assignedNode + ); + + assertEquals(taskId, task.getTaskId()); + assertEquals(taskStatus, task.getTaskStatus()); + assertEquals(params, task.getParams()); + assertEquals(taskType, task.getTaskType()); + assertEquals(createdAt, task.getCreatedAt()); + assertEquals(assignedAt, task.getAssignedAt()); + assertEquals(startedAt, task.getStartedAt()); + assertEquals(completedAt, task.getCompletedAt()); + assertEquals(lastHeartbeatAt, task.getLastHeartbeatAt()); + assertEquals(assignedNode, task.getAssignedNode()); + } + + public void testBuilderFromTask() { + TaskId taskId = new TaskId("123"); + TaskStatus taskStatus = TaskStatus.UNASSIGNED; + TaskParams params = new MockTaskParams("mock"); + TaskType taskType = TaskType.MERGE; + long createdAt = System.currentTimeMillis(); + long assignedAt = createdAt + 1000; + long startedAt = createdAt + 2000; + long completedAt = createdAt + 3000; + long lastHeartbeatAt = createdAt + 2500; + WorkerNode assignedNode = WorkerNode.createWorkerNode("node1", "nodeip", "nodename"); + + Task originalTask = new Task( + taskId, + taskStatus, + params, + taskType, + createdAt, + assignedAt, + startedAt, + completedAt, + lastHeartbeatAt, + assignedNode + ); + + Task.Builder builder = Task.Builder.builder(originalTask); + Task newTask = builder.build(); + + assertEquals(originalTask.getTaskId(), newTask.getTaskId()); + assertEquals(originalTask.getTaskStatus(), newTask.getTaskStatus()); + assertEquals(originalTask.getParams(), newTask.getParams()); + assertEquals(originalTask.getTaskType(), newTask.getTaskType()); + assertEquals(originalTask.getCreatedAt(), newTask.getCreatedAt()); + assertEquals(originalTask.getAssignedAt(), newTask.getAssignedAt()); + assertEquals(originalTask.getStartedAt(), newTask.getStartedAt()); + assertEquals(originalTask.getCompletedAt(), newTask.getCompletedAt()); + assertEquals(originalTask.getLastHeartbeatAt(), newTask.getLastHeartbeatAt()); + assertEquals(originalTask.getAssignedNode(), newTask.getAssignedNode()); + } + + public void testBuilderFromAttributes() { + TaskId taskId = new TaskId("123"); + TaskStatus taskStatus = TaskStatus.UNASSIGNED; + TaskParams params = new MockTaskParams("mock"); + TaskType taskType = TaskType.MERGE; + long createdAt = System.currentTimeMillis(); + + Task.Builder builder = Task.Builder.builder(taskId, taskStatus, params, taskType, createdAt); + builder.assignedAt(createdAt + 1000); + builder.startedAt(createdAt + 2000); + builder.completedAt(createdAt + 3000); + builder.lastHeartbeatAt(createdAt + 2500); + builder.assignedNode(WorkerNode.createWorkerNode("node1", "nodeip", "nodename")); + builder.taskStatus(TaskStatus.ACTIVE); + + Task task = builder.build(); + + assertEquals(taskId, task.getTaskId()); + assertEquals(TaskStatus.ACTIVE, task.getTaskStatus()); + assertEquals(params, task.getParams()); + assertEquals(taskType, task.getTaskType()); + assertEquals(createdAt, task.getCreatedAt()); + assertEquals(createdAt + 1000, task.getAssignedAt()); + assertEquals(createdAt + 2000, task.getStartedAt()); + assertEquals(createdAt + 3000, task.getCompletedAt()); + assertEquals(createdAt + 2500, task.getLastHeartbeatAt()); + assertEquals(WorkerNode.createWorkerNode("node1", "nodeip", "nodename"), task.getAssignedNode()); + } + + public void testBuilderWithNullOptionalFields() { + TaskId taskId = new TaskId("123"); + TaskStatus taskStatus = TaskStatus.UNASSIGNED; + TaskParams params = new MockTaskParams("mock"); + TaskType taskType = TaskType.MERGE; + long createdAt = System.currentTimeMillis(); + + Task.Builder builder = Task.Builder.builder(taskId, taskStatus, params, taskType, createdAt); + Task task = builder.build(); + + assertEquals(taskId, task.getTaskId()); + assertEquals(taskStatus, task.getTaskStatus()); + assertEquals(params, task.getParams()); + assertEquals(taskType, task.getTaskType()); + assertEquals(createdAt, task.getCreatedAt()); + assertEquals(0, task.getAssignedAt()); + assertEquals(0, task.getStartedAt()); + assertEquals(0, task.getCompletedAt()); + assertEquals(0, task.getLastHeartbeatAt()); + assertNull(task.getAssignedNode()); + } +} diff --git a/libs/task-commons/src/test/java/org/opensearch/task/commons/worker/WorkerNodeTests.java b/libs/task-commons/src/test/java/org/opensearch/task/commons/worker/WorkerNodeTests.java new file mode 100644 index 0000000000000..07570bf947a27 --- /dev/null +++ b/libs/task-commons/src/test/java/org/opensearch/task/commons/worker/WorkerNodeTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.worker; + +import org.opensearch.test.OpenSearchTestCase; + +/** + * Tests for {@link WorkerNode} + */ +public class WorkerNodeTests extends OpenSearchTestCase { + + public void testCreateWorkerNode() { + WorkerNode worker = WorkerNode.createWorkerNode("1", "Worker1", "192.168.1.1"); + assertNotNull(worker); + assertEquals("1", worker.getId()); + assertEquals("Worker1", worker.getName()); + assertEquals("192.168.1.1", worker.getIp()); + } + + public void testEquality() { + WorkerNode worker1 = WorkerNode.createWorkerNode("5", "Worker5", "192.168.1.5"); + WorkerNode worker2 = WorkerNode.createWorkerNode("5", "Worker5", "192.168.1.5"); + WorkerNode worker3 = WorkerNode.createWorkerNode("6", "Worker6", "192.168.1.6"); + + assertEquals(worker1, worker2); + assertNotEquals(worker1, worker3); + assertNotEquals(worker2, worker3); + } + + public void testHashCode() { + WorkerNode worker1 = WorkerNode.createWorkerNode("7", "Worker7", "192.168.1.7"); + WorkerNode worker2 = WorkerNode.createWorkerNode("7", "Worker7", "192.168.1.7"); + + assertEquals(worker1.hashCode(), worker2.hashCode()); + } + + public void testNotEqualToNull() { + WorkerNode worker = WorkerNode.createWorkerNode("8", "Worker8", "192.168.1.8"); + assertNotEquals(null, worker); + } + + public void testNotEqualToDifferentClass() { + WorkerNode worker = WorkerNode.createWorkerNode("9", "Worker9", "192.168.1.9"); + assertNotEquals(worker, new Object()); + } +} diff --git a/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 deleted file mode 100644 index 82dab5981e652..0000000000000 --- a/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.17.2.jar.sha1 b/libs/x-content/licenses/jackson-core-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..e15f2340980bc --- /dev/null +++ b/libs/x-content/licenses/jackson-core-2.17.2.jar.sha1 @@ -0,0 +1 @@ +969a35cb35c86512acbadcdbbbfb044c877db814 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 deleted file mode 100644 index ff42ed1f92cfe..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba5d8e6ecc62aa0e49c0ce935b8696352dbebc71 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..069e088413ef1 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 @@ -0,0 +1 @@ +57fa7c1b5104bbc4599278d13933a937ee058e68 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 deleted file mode 100644 index 47d19067cf2a6..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -89683ac4f0a0c2c4f69ea56b90480ed40266dac8 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..28d8c8382aed3 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 @@ -0,0 +1 @@ +20e956b9b6f67138edd39fab7a506ded19638bcb \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 deleted file mode 100644 index 7946e994c7104..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4c7b8a9ea3f398116a75c146b982b22afebc4ee \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f3e25b7eb253c --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 @@ -0,0 +1 @@ +78d2c73dbec62044d7cf3b544b2e0d24a1a093b0 \ No newline at end of file diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index cf2736a8583d2..f14e499081ce9 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -75,6 +75,7 @@ import org.apache.lucene.analysis.eu.BasqueAnalyzer; import org.apache.lucene.analysis.fa.PersianAnalyzer; import org.apache.lucene.analysis.fa.PersianNormalizationFilter; +import org.apache.lucene.analysis.fa.PersianStemFilter; import org.apache.lucene.analysis.fi.FinnishAnalyzer; import org.apache.lucene.analysis.fr.FrenchAnalyzer; import org.apache.lucene.analysis.ga.IrishAnalyzer; @@ -315,6 +316,7 @@ public Map> getTokenFilters() { filters.put("pattern_capture", requiresAnalysisSettings(PatternCaptureGroupTokenFilterFactory::new)); filters.put("pattern_replace", requiresAnalysisSettings(PatternReplaceTokenFilterFactory::new)); filters.put("persian_normalization", PersianNormalizationFilterFactory::new); + filters.put("persian_stem", PersianStemTokenFilterFactory::new); filters.put("porter_stem", PorterStemTokenFilterFactory::new); filters.put( "predicate_token_filter", @@ -558,6 +560,7 @@ public List getPreConfiguredTokenFilters() { ); })); filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new)); + filters.add(PreConfiguredTokenFilter.singleton("persian_stem", true, PersianStemFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("reverse", false, ReverseStringFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("russian_stem", false, input -> new SnowballFilter(input, "Russian"))); diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java index 8d29a347caeb8..181ebe5500ee5 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java @@ -54,11 +54,16 @@ */ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundWordTokenFilterFactory { + private final boolean noSubMatches; + private final boolean noOverlappingMatches; private final HyphenationTree hyphenationTree; HyphenationCompoundWordTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, env, name, settings); + noSubMatches = settings.getAsBoolean("no_sub_matches", false); + noOverlappingMatches = settings.getAsBoolean("no_overlapping_matches", false); + String hyphenationPatternsPath = settings.get("hyphenation_patterns_path", null); if (hyphenationPatternsPath == null) { throw new IllegalArgumentException("hyphenation_patterns_path is a required setting."); @@ -85,7 +90,9 @@ public TokenStream create(TokenStream tokenStream) { minWordSize, minSubwordSize, maxSubwordSize, - onlyLongestMatch + onlyLongestMatch, + noSubMatches, + noOverlappingMatches ); } } diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MinHashTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MinHashTokenFilterFactory.java index e76354ae3a765..40655b84794d5 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MinHashTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MinHashTokenFilterFactory.java @@ -65,10 +65,10 @@ private Map convertSettings(Settings settings) { if (settings.hasValue("hash_count")) { settingMap.put("hashCount", settings.get("hash_count")); } - if (settings.hasValue("bucketCount")) { + if (settings.hasValue("bucket_count")) { settingMap.put("bucketCount", settings.get("bucket_count")); } - if (settings.hasValue("hashSetSize")) { + if (settings.hasValue("hash_set_size")) { settingMap.put("hashSetSize", settings.get("hash_set_size")); } if (settings.hasValue("with_rotation")) { diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/PersianStemTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/PersianStemTokenFilterFactory.java new file mode 100644 index 0000000000000..afe8058343e17 --- /dev/null +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/PersianStemTokenFilterFactory.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.analysis.common; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.fa.PersianStemFilter; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.analysis.AbstractTokenFilterFactory; + +public class PersianStemTokenFilterFactory extends AbstractTokenFilterFactory { + + PersianStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + super(indexSettings, name, settings); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + return new PersianStemFilter(tokenStream); + } +} diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java index 5506626e40da0..e81f3c6cc09cc 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java @@ -47,6 +47,7 @@ import org.apache.lucene.analysis.en.KStemFilter; import org.apache.lucene.analysis.en.PorterStemFilter; import org.apache.lucene.analysis.es.SpanishLightStemFilter; +import org.apache.lucene.analysis.fa.PersianStemFilter; import org.apache.lucene.analysis.fi.FinnishLightStemFilter; import org.apache.lucene.analysis.fr.FrenchLightStemFilter; import org.apache.lucene.analysis.fr.FrenchMinimalStemFilter; @@ -239,6 +240,8 @@ public TokenStream create(TokenStream tokenStream) { return new NorwegianLightStemFilter(tokenStream, NorwegianLightStemmer.NYNORSK); } else if ("minimal_nynorsk".equalsIgnoreCase(language) || "minimalNynorsk".equalsIgnoreCase(language)) { return new NorwegianMinimalStemFilter(tokenStream, NorwegianLightStemmer.NYNORSK); + } else if ("persian".equalsIgnoreCase(language)) { + return new PersianStemFilter(tokenStream); // Portuguese stemmers } else if ("portuguese".equalsIgnoreCase(language)) { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java index 11713f52f5b18..7e3140f8bcba3 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java @@ -158,6 +158,7 @@ protected Map> getTokenFilters() { filters.put("brazilianstem", BrazilianStemTokenFilterFactory.class); filters.put("czechstem", CzechStemTokenFilterFactory.class); filters.put("germanstem", GermanStemTokenFilterFactory.class); + filters.put("persianstem", PersianStemTokenFilterFactory.class); filters.put("telugunormalization", TeluguNormalizationFilterFactory.class); filters.put("telugustem", TeluguStemFilterFactory.class); // this filter is not exposed and should only be used internally @@ -220,6 +221,7 @@ protected Map> getPreConfiguredTokenFilters() { filters.put("ngram", null); filters.put("nGram", null); filters.put("persian_normalization", null); + filters.put("persian_stem", null); filters.put("porter_stem", null); filters.put("reverse", ReverseStringFilterFactory.class); filters.put("russian_stem", SnowballPorterFilterFactory.class); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java index a681d9a104ecf..bfe30da50c055 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java @@ -50,8 +50,12 @@ import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.MatcherAssert; +import org.junit.Before; import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -63,17 +67,27 @@ import static org.hamcrest.Matchers.instanceOf; public class CompoundAnalysisTests extends OpenSearchTestCase { + + Settings[] settingsArr; + + @Before + public void initialize() throws IOException { + final Path home = createTempDir(); + copyHyphenationPatternsFile(home); + this.settingsArr = new Settings[] { getJsonSettings(home), getYamlSettings(home) }; + } + public void testDefaultsCompoundAnalysis() throws Exception { - Settings settings = getJsonSettings(); - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings); - AnalysisModule analysisModule = createAnalysisModule(settings); - TokenFilterFactory filterFactory = analysisModule.getAnalysisRegistry().buildTokenFilterFactories(idxSettings).get("dict_dec"); - MatcherAssert.assertThat(filterFactory, instanceOf(DictionaryCompoundWordTokenFilterFactory.class)); + for (Settings settings : this.settingsArr) { + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings); + AnalysisModule analysisModule = createAnalysisModule(settings); + TokenFilterFactory filterFactory = analysisModule.getAnalysisRegistry().buildTokenFilterFactories(idxSettings).get("dict_dec"); + MatcherAssert.assertThat(filterFactory, instanceOf(DictionaryCompoundWordTokenFilterFactory.class)); + } } public void testDictionaryDecompounder() throws Exception { - Settings[] settingsArr = new Settings[] { getJsonSettings(), getYamlSettings() }; - for (Settings settings : settingsArr) { + for (Settings settings : this.settingsArr) { List terms = analyze(settings, "decompoundingAnalyzer", "donaudampfschiff spargelcremesuppe"); MatcherAssert.assertThat(terms.size(), equalTo(8)); MatcherAssert.assertThat( @@ -83,6 +97,26 @@ public void testDictionaryDecompounder() throws Exception { } } + // Hyphenation Decompounder tests mimic the behavior of lucene tests + // lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestHyphenationCompoundWordTokenFilterFactory.java + public void testHyphenationDecompounder() throws Exception { + for (Settings settings : this.settingsArr) { + List terms = analyze(settings, "hyphenationAnalyzer", "min veninde som er lidt af en læsehest"); + MatcherAssert.assertThat(terms.size(), equalTo(10)); + MatcherAssert.assertThat(terms, hasItems("min", "veninde", "som", "er", "lidt", "af", "en", "læsehest", "læse", "hest")); + } + } + + // Hyphenation Decompounder tests mimic the behavior of lucene tests + // lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestHyphenationCompoundWordTokenFilterFactory.java + public void testHyphenationDecompounderNoSubMatches() throws Exception { + for (Settings settings : this.settingsArr) { + List terms = analyze(settings, "hyphenationAnalyzerNoSubMatches", "basketballkurv"); + MatcherAssert.assertThat(terms.size(), equalTo(3)); + MatcherAssert.assertThat(terms, hasItems("basketballkurv", "basketball", "kurv")); + } + } + private List analyze(Settings settings, String analyzerName, String text) throws IOException { IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings); AnalysisModule analysisModule = createAnalysisModule(settings); @@ -111,21 +145,28 @@ public Map> getTokenFilters() { })); } - private Settings getJsonSettings() throws IOException { + private void copyHyphenationPatternsFile(Path home) throws IOException { + InputStream hyphenation_patterns_path = getClass().getResourceAsStream("da_UTF8.xml"); + Path config = home.resolve("config"); + Files.createDirectory(config); + Files.copy(hyphenation_patterns_path, config.resolve("da_UTF8.xml")); + } + + private Settings getJsonSettings(Path home) throws IOException { String json = "/org/opensearch/analysis/common/test1.json"; return Settings.builder() .loadFromStream(json, getClass().getResourceAsStream(json), false) .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(Environment.PATH_HOME_SETTING.getKey(), home.toString()) .build(); } - private Settings getYamlSettings() throws IOException { + private Settings getYamlSettings(Path home) throws IOException { String yaml = "/org/opensearch/analysis/common/test1.yml"; return Settings.builder() .loadFromStream(yaml, getClass().getResourceAsStream(yaml), false) .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(Environment.PATH_HOME_SETTING.getKey(), home.toString()) .build(); } } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java index e86a939dc857b..b9f09033f49f3 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java @@ -50,14 +50,10 @@ public void testDefault() throws IOException { int default_bucket_size = 512; int default_hash_set_size = 1; Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( - settings, - new CommonAnalysisModulePlugin() - ); + OpenSearchTestCase.TestAnalysis analysis = getTestAnalysisFromSettings(settings); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("min_hash"); String source = "the quick brown fox"; - Tokenizer tokenizer = new WhitespaceTokenizer(); - tokenizer.setReader(new StringReader(source)); + Tokenizer tokenizer = getTokenizer(source); // with_rotation is true by default, and hash_set_size is 1, so even though the source doesn't // have enough tokens to fill all the buckets, we still expect 512 tokens. @@ -73,17 +69,63 @@ public void testSettings() throws IOException { .put("index.analysis.filter.test_min_hash.with_rotation", false) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( - settings, - new CommonAnalysisModulePlugin() - ); + OpenSearchTestCase.TestAnalysis analysis = getTestAnalysisFromSettings(settings); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("test_min_hash"); String source = "sushi"; - Tokenizer tokenizer = new WhitespaceTokenizer(); - tokenizer.setReader(new StringReader(source)); + Tokenizer tokenizer = getTokenizer(source); // despite the fact that bucket_count is 2 and hash_set_size is 1, // because with_rotation is false, we only expect 1 token here. assertStreamHasNumberOfTokens(tokenFilter.create(tokenizer), 1); } + + public void testBucketCountSetting() throws IOException { + // Correct case with "bucket_count" + Settings settingsWithBucketCount = Settings.builder() + .put("index.analysis.filter.test_min_hash.type", "min_hash") + .put("index.analysis.filter.test_min_hash.hash_count", "1") + .put("index.analysis.filter.test_min_hash.bucket_count", "3") + .put("index.analysis.filter.test_min_hash.hash_set_size", "1") + .put("index.analysis.filter.test_min_hash.with_rotation", false) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + + OpenSearchTestCase.TestAnalysis analysisWithBucketCount = getTestAnalysisFromSettings(settingsWithBucketCount); + + TokenFilterFactory tokenFilterWithBucketCount = analysisWithBucketCount.tokenFilter.get("test_min_hash"); + String sourceWithBucketCount = "salmon avocado roll uramaki"; + Tokenizer tokenizerWithBucketCount = getTokenizer(sourceWithBucketCount); + // Expect 3 tokens due to bucket_count being set to 3 + assertStreamHasNumberOfTokens(tokenFilterWithBucketCount.create(tokenizerWithBucketCount), 3); + } + + public void testHashSetSizeSetting() throws IOException { + // Correct case with "hash_set_size" + Settings settingsWithHashSetSize = Settings.builder() + .put("index.analysis.filter.test_min_hash.type", "min_hash") + .put("index.analysis.filter.test_min_hash.hash_count", "1") + .put("index.analysis.filter.test_min_hash.bucket_count", "1") + .put("index.analysis.filter.test_min_hash.hash_set_size", "2") + .put("index.analysis.filter.test_min_hash.with_rotation", false) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + + OpenSearchTestCase.TestAnalysis analysisWithHashSetSize = getTestAnalysisFromSettings(settingsWithHashSetSize); + + TokenFilterFactory tokenFilterWithHashSetSize = analysisWithHashSetSize.tokenFilter.get("test_min_hash"); + String sourceWithHashSetSize = "salmon avocado roll uramaki"; + Tokenizer tokenizerWithHashSetSize = getTokenizer(sourceWithHashSetSize); + // Expect 2 tokens due to hash_set_size being set to 2 and bucket_count being 1 + assertStreamHasNumberOfTokens(tokenFilterWithHashSetSize.create(tokenizerWithHashSetSize), 2); + } + + private static OpenSearchTestCase.TestAnalysis getTestAnalysisFromSettings(Settings settingsWithBucketCount) throws IOException { + return AnalysisTestsHelper.createTestAnalysisFromSettings(settingsWithBucketCount, new CommonAnalysisModulePlugin()); + } + + private static Tokenizer getTokenizer(String sourceWithBucketCount) { + Tokenizer tokenizerWithBucketCount = new WhitespaceTokenizer(); + tokenizerWithBucketCount.setReader(new StringReader(sourceWithBucketCount)); + return tokenizerWithBucketCount; + } } diff --git a/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/da_UTF8.xml b/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/da_UTF8.xml new file mode 100644 index 0000000000000..2c8d203be6881 --- /dev/null +++ b/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/da_UTF8.xml @@ -0,0 +1,1208 @@ + + + + + + + + + + +aA +bB +cC +dD +eE +fF +gG +hH +iI +jJ +kK +lL +mM +nN +oO +pP +qQ +rR +sS +tT +uU +vV +wW +xX +yY +zZ +æÆ +øØ +åÅ + + + +.ae3 +.an3k +.an1s +.be5la +.be1t +.bi4tr +.der3i +.diagno5 +.her3 +.hoved3 +.ne4t5 +.om1 +.ove4 +.po1 +.til3 +.yd5r +ab5le +3abst +a3c +ade5la +5adg +a1e +5afg +5a4f1l +af3r +af4ri +5afs +a4gef +a4gi +ag5in +ag5si +3agti +a4gy +a3h +ais5t +a3j +a5ka +a3ke +a5kr +aku5 +a3la +a1le +a1li +al3k +4alkv +a1lo +al5si +a3lu +a1ly +am4pa +3analy +an4k5r +a3nu +3anv +a5o +a5pe +a3pi +a5po +a1ra +ar5af +1arb +a1re +5arg +a1ri +a3ro +a3sa +a3sc +a1si +a3sk +a3so +3a3sp +a3ste +a3sti +a1ta1 +a1te +a1ti +a4t5in +a1to +ato5v +a5tr +a1tu +a5va +a1ve +a5z +1ba +ba4ti +4bd +1be +be1k +be3ro +be5ru +be1s4 +be1tr +1bi +bi5sk +b1j +4b1n +1bo +bo4gr +bo3ra +bo5re +1br4 +4bs +bs5k +b3so +b1st +b5t +3bu +bu4s5tr +b5w +1by +by5s +4c1c +1ce +ce5ro +3ch +4ch. +ci4o +ck3 +5cy +3da +4d3af +d5anta +da4s +d1b +d1d4 +1de +de5d +4de4lem +der5eri +de4rig +de5sk +d1f +d1g +d3h +1di +di1e +di5l +d3j +d1k +d1l +d1m +4d1n +3do +4dop +d5ov +d1p +4drett +5d4reve +3drif +3driv +d5ros +d5ru +ds5an +ds5in +d1ski +d4sm +d4su +dsu5l +ds5vi +d3ta +d1te +dt5o +d5tr +dt5u +1du +dub5 +d1v +3dy +e5ad +e3af +e5ag +e3ak +e1al +ea4la +e3an +e5ap +e3at +e3bl +ebs3 +e1ci +ed5ar +edde4 +eddel5 +e4do +ed5ra +ed3re +ed3rin +ed4str +e3e +3eff +e3fr +3eft +e3gu +e1h +e3in +ei5s +e3je +e4j5el +e1ka +e3ke +e3kl +4e1ko +e5kr +ek5sa +3eksem +3eksp +e3ku +e1kv +e5ky +e3lad +el3ak +el3ar +e1las +e3le +e4lek +3elem +e1li +5elim +e3lo +el5sa +e5lu +e3ly +e4mad +em4p5le +em1s +en5ak +e4nan +4enn +e4no +en3so +e5nu +e5ol +e3op +e1or +e3ov +epi3 +e1pr +e3ra +er3af +e4rag +e4rak +e1re +e4ref +er5ege +5erhv +e1ri +e4rib +er1k +ero5d +er5ov +er3s +er5tr +e3rum +er5un +e5ry +e1ta +e1te +etek4s +e1ti +e3tj +e1to +e3tr +e3tu +e1ty +e3um +e3un +3eur +e1va +e3ve +e4v3erf +e1vi +e5x +1fa +fa4ce +fags3 +f1b +f1d +1fe +fej4 +fejl1 +f1f +f1g +f1h +1fi +f1k +3fl +1fo +for1en +fo4ri +f1p +f1s4 +4ft +f3ta +f1te +f1ti +f5to +f5tvi +1fu +f1v +3fy +1ga +g3art +g1b +g1d +1ge +4g5enden +ger3in +ge3s +g3f +g1g +g1h +1gi +gi4b +gi3st +5gj +g3k +g1l +g1m +3go +4g5om +g5ov +g3p +1gr +gs1a +gsde4len +g4se +gsha4 +g5sla +gs3or +gs1p +g5s4tide +g4str +gs1v +g3ta +g1te +g1ti +g5to +g3tr +gt4s +g3ud +gun5 +g3v +1gy +g5yd +4ha. +heds3 +he5s +4het +hi4e +hi4n5 +hi3s +ho5ko +ho5ve +4h3t +hun4 +hund3 +hvo4 +i1a +i3b +i4ble +i1c +i3dr +ids5k +i1el +i1en +i3er +i3et. +if3r +i3gu +i3h +i5i +i5j +i1ka +i1ke +ik1l +i5ko +ik3re +ik5ri +iks5t +ik4tu +i3ku +ik3v +i3lag +il3eg +il5ej +il5el +i3li +i4l5id +il3k +i1lo +il5u +i3mu +ind3t +5inf +ings1 +in3s +in4sv +inter1 +i3nu +i3od +i3og +i5ok +i3ol +ion4 +ions1 +i5o5r +i3ot +i5pi +i3pli +i5pr +i3re +i3ri +ir5t +i3sc +i3si +i4sm +is3p +i1ster +i3sti +i5sua +i1ta +i1te +i1ti +i3to +i3tr +it5re. +i1tu +i3ty +i1u +i1va +i1ve +i1vi +j3ag +jde4rer +jds1 +jek4to +4j5en. +j5k +j3le +j3li +jlmeld5 +jlmel4di +j3r +jre5 +ju3s +5kap +k5au +5kav +k5b +kel5s +ke3sk +ke5st +ke4t5a +k3h +ki3e +ki3st +k1k +k5lak +k1le +3klu +k4ny +5kod +1kon +ko3ra +3kort +ko3v +1kra +5kry +ks3an +k1si +ks3k +ks1p +k3ste +k5stu +ks5v +k1t +k4tar +k4terh +kti4e +kt5re +kt5s +3kur +1kus +3kut +k4vo +k4vu +5lab +lad3r +5lagd +la4g3r +5lam +1lat +l1b +ldiagnos5 +l3dr +ld3st +1le. +5led +4lele +le4mo +3len +1ler +1les +4leu +l1f +lfin4 +lfind5 +l1go1 +l3h +li4ga +4l5ins +4l3int +li5o +l3j +l1ke +l1ko +l3ky +l1l +l5mu +lo4du +l3op +4l5or +3lov +4l3p +l4ps +l3r +4ls +lses1 +ls5in +l5sj +l1ta +l4taf +l1te +l4t5erf +l3ti +lt3o +l3tr +l3tu +lu5l +l3ve +l3vi +1ma +m1b +m3d +1me +4m5ej +m3f +m1g +m3h +1mi +mi3k +m5ing +mi4o +mi5sty +m3k +m1l +m1m +mmen5 +m1n +3mo +mo4da +4mop +4m5ov +m1pe +m3pi +m3pl +m1po +m3pr +m1r +mse5s +ms5in +m5sk +ms3p +m3ste +ms5v +m3ta +m3te +m3ti +m3tr +m1ud +1mul +mu1li +3my +3na +4nak +1nal +n1b +n1c +4nd +n3dr +nd5si +nd5sk +nd5sp +1ne +ne5a +ne4da +nemen4 +nement5e +neo4 +n3erk +n5erl +ne5sl +ne5st +n1f +n4go +4n1h +1ni +4nim +ni5o +ni3st +n1ke +n1ko +n3kr +n3ku +n5kv +4n1l +n1m +n1n +1no +n3ord +n5p +n3r +4ns +n3si +n1sku +ns3po +n1sta +n5sti +n1ta +nta4le +n1te +n1ti +ntiali4 +n3to +n1tr +nt4s5t +nt4su +n3tu +n3ty +4n1v +3ny +n3z +o3a +o4as +ob3li +o1c +o4din +od5ri +od5s +od5un +o1e +of5r +o4gek +o4gel +o4g5o +og5re +og5sk +o5h +o5in +oi6s5e +o1j +o3ka +o1ke +o3ku +o3la +o3le +o1li +o1lo +o3lu +o5ly +1omr +on3k +ook5 +o3or +o5ov +o3pi +op3l +op3r +op3s +3opta +4or. +or1an +3ordn +ord5s +o3re. +o3reg +o3rek +o3rer +o3re3s +o3ret +o3ri +3orient +or5im +o4r5in +or3k +or5o +or3sl +or3st +o3si +o3so +o3t +o1te +o5un +ov4s +3pa +pa5gh +p5anl +p3d +4pec +3pen +1per +pe1ra +pe5s +pe3u +p3f +4p5h +1pla +p4lan +4ple. +4pler +4ples +p3m +p3n +5pok +4po3re +3pot +4p5p4 +p4ro +1proc +p3sk +p5so +ps4p +p3st +p1t +1pu +pu5b +p5ule +p5v +5py3 +qu4 +4raf +ra5is +4rarb +r1b +r4d5ar +r3dr +rd4s3 +4reks +1rel +re5la +r5enss +5rese +re5spo +4ress +re3st +re5s4u +5rett +r1f +r1gu +r1h +ri1e +ri5la +4rimo +r4ing +ringse4 +ringso4r +4rinp +4rint +r3ka +r1ke +r1ki +rk3so +r3ku +r1l +rmo4 +r5mu +r1n +ro1b +ro3p +r3or +r3p +r1r +rre5s +rro4n5 +r1sa +r1si +r5skr +r4sk5v +rs4n +r3sp +r5stu +r5su +r3sv +r5tal +r1te +r4teli +r1ti +r3to +r4t5or +rt5rat +rt3re +r5tri +r5tro +rt3s +r5ty +r3ud +run4da +5rut +r3va +r1ve +r3vi +ry4s +s3af +1sam +sa4ma +s3ap +s1ar +1sat +4s1b +s1d +sdy4 +1se +s4ed +5s4er +se4se +s1f +4s1g4 +4s3h +si4bl +1sig +s5int +5sis +5sit +5siu +s5ju +4sk. +1skab +1ske +s3kl +sk5s4 +5sky +s1le +s1li +slo3 +5slu +s5ly +s1m +s4my +4snin +s4nit +so5k +5sol +5som. +3somm +s5oms +5somt +3son +4s1op +sp4 +3spec +4sper +3s4pi +s1pl +3sprog. +s5r4 +s1s4 +4st. +5s4tam +1stan +st5as +3stat +1stav +1ste. +1sted +3stel +5stemo +1sten +5step +3ster. +3stes +5stet +5stj +3sto +st5om +1str +s1ud +3sul +s3un +3sur +s3ve +3s4y +1sy1s +5ta. +1tag +tands3 +4tanv +4tb +tede4l +teds5 +3teg +5tekn +teo1 +5term +te5ro +4t1f +6t3g +t1h +tialis5t +3tid +ti4en +ti3st +4t3k +4t1l +tli4s5 +t1m +t1n +to5ra +to1re +to1ri +tor4m +4t3p +t4ra +4tres +tro5v +1try +4ts +t3si +ts4pa +ts5pr +t3st +ts5ul +4t1t +t5uds +5tur +t5ve +1typ +u1a +5udl +ud5r +ud3s +3udv +u1e +ue4t5 +uge4ri +ugs3 +u5gu +u3i +u5kl +uk4ta +uk4tr +u1la +u1le +u5ly +u5pe +up5l +u5q +u3ra +u3re +u4r3eg +u1rer +u3ro +us5a +u3si +u5ska +u5so +us5v +u1te +u1ti +u1to +ut5r +ut5s4 +5u5v +va5d +3varm +1ved +ve4l5e +ve4reg +ve3s +5vet +v5h +vi4l3in +1vis +v5j +v5k +vl4 +v3le +v5li +vls1 +1vo +4v5om +v5p +v5re +v3st +v5su +v5t +3vu +y3a +y5dr +y3e +y3ke +y5ki +yk3li +y3ko +yk4s5 +y3kv +y5li +y5lo +y5mu +yns5 +y5o +y1pe +y3pi +y3re +yr3ek +y3ri +y3si +y3ti +y5t3r +y5ve +zi5o + +.så3 +.ær5i +.øv3r +a3tø +a5væ +brød3 +5bæ +5drøv +dstå4 +3dæ +3dø +e3læ +e3lø +e3rø +er5øn +e5tæ +e5tø +e1væ +e3æ +e5å +3fæ +3fø +fø4r5en +giø4 +g4sø +g5så +3gæ +3gø1 +3gå +i5tæ +i3ø +3kø +3kå +lingeniø4 +l3væ +5løs +m5tå +1mæ +3mø +3må +n3kæ +n5tæ +3næ +4n5æb +5nø +o5læ +or3ø +o5å +5præ +5pæd +på3 +r5kæ +r5tæ +r5tø +r3væ +r5æl +4røn +5rør +3råd +r5år +s4kå +3slå +s4næ +5stø +1stå +1sæ +4s5æn +1sø +s5øk +så4r5 +ti4ø +3træk. +t4sø +t5så +t3væ +u3læ +3værd +1værk +5vå +y5væ +æb3l +æ3c +æ3e +æg5a +æ4gek +æ4g5r +ægs5 +æ5i +æ5kv +ælle4 +æn1dr +æ5o +æ1re +ær4g5r +æ3ri +ær4ma +ær4mo +ær5s +æ5si +æ3so +æ3ste +æ3ve +øde5 +ø3e +ø1je +ø3ke +ø3le +øms5 +øn3st +øn4t3 +ø1re +ø3ri +ørne3 +ør5o +ø1ve +å1d +å1e +å5h +å3l +å3re +års5t +å5sk +å3t + + diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml index 802c79c780689..c6b075571f221 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml @@ -1781,6 +1781,37 @@ - length: { tokens: 1 } - match: { tokens.0.token: abschliess } +--- +"persian_stem": + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_persian_stem: + type: persian_stem + - do: + indices.analyze: + index: test + body: + text: جامدات + tokenizer: keyword + filter: [my_persian_stem] + - length: { tokens: 1 } + - match: { tokens.0.token: جامد } + + # Test pre-configured token filter too: + - do: + indices.analyze: + body: + text: جامدات + tokenizer: keyword + filter: [persian_stem] + - length: { tokens: 1 } + - match: { tokens.0.token: جامد } + --- "russian_stem": - do: diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java index bf9e9b71b8491..5b2db9ff940e7 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java @@ -165,7 +165,7 @@ private Map filterForAllowlistSetting(Settings settin // Assert that no unknown processors are defined in the allowlist final Set unknownAllowlistProcessors = allowlist.stream() .filter(p -> map.containsKey(p) == false) - .collect(Collectors.toSet()); + .collect(Collectors.toUnmodifiableSet()); if (unknownAllowlistProcessors.isEmpty() == false) { throw new IllegalArgumentException( "Processor(s) " diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml index edb7b77eb8d28..ecd56ea7f277e 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml @@ -41,6 +41,10 @@ teardown: ingest.delete_pipeline: id: "pipeline2" ignore: 404 + - do: + indices.delete_index_template: + name: test_index_template_for_bulk + ignore: 404 --- "Test bulk request without default pipeline": @@ -168,47 +172,45 @@ teardown: id: test_id3 - match: { _source: {"f1": "v2", "f2": 47, "field1": "value1"}} +# related issue: https://github.com/opensearch-project/OpenSearch/issues/12888 --- -"Test bulk API with batch enabled happy case": +"Test bulk upsert honors default_pipeline and final_pipeline when the auto-created index matches with the index template": - skip: - version: " - 2.13.99" - reason: "Added in 2.14.0" + version: " - 2.15.99" + reason: "fixed in 2.16.0" + features: allowed_warnings + - do: + allowed_warnings: + - "index template [test_for_bulk_upsert_index_template] has index patterns [test_bulk_upsert_*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test_for_bulk_upsert_index_template] will take precedence during new index creation" + indices.put_index_template: + name: test_for_bulk_upsert_index_template + body: + index_patterns: test_bulk_upsert_* + template: + settings: + number_of_shards: 1 + number_of_replicas: 0 + default_pipeline: pipeline1 + final_pipeline: pipeline2 - do: bulk: refresh: true - batch_size: 2 - pipeline: "pipeline1" body: - - '{"index": {"_index": "test_index", "_id": "test_id1"}}' - - '{"text": "text1"}' - - '{"index": {"_index": "test_index", "_id": "test_id2"}}' - - '{"text": "text2"}' - - '{"index": {"_index": "test_index", "_id": "test_id3"}}' - - '{"text": "text3"}' - - '{"index": {"_index": "test_index", "_id": "test_id4"}}' - - '{"text": "text4"}' - - '{"index": {"_index": "test_index", "_id": "test_id5", "pipeline": "pipeline2"}}' - - '{"text": "text5"}' - - '{"index": {"_index": "test_index", "_id": "test_id6", "pipeline": "pipeline2"}}' - - '{"text": "text6"}' + - '{"update": {"_index": "test_bulk_upsert_index", "_id": "test_id3"}}' + - '{"upsert": {"f1": "v2", "f2": 47}, "doc": {"x": 1}}' - match: { errors: false } + - match: { items.0.update.result: created } - do: get: - index: test_index - id: test_id5 - - match: { _source: {"text": "text5", "field2": "value2"}} - - - do: - get: - index: test_index + index: test_bulk_upsert_index id: test_id3 - - match: { _source: { "text": "text3", "field1": "value1" } } + - match: { _source: {"f1": "v2", "f2": 47, "field1": "value1", "field2": "value2"}} --- -"Test bulk API with batch_size missing": +"Test bulk API with default batch size": - skip: version: " - 2.13.99" reason: "Added in 2.14.0" @@ -222,20 +224,28 @@ teardown: - '{"text": "text1"}' - '{"index": {"_index": "test_index", "_id": "test_id2"}}' - '{"text": "text2"}' + - '{"index": {"_index": "test_index", "_id": "test_id3"}}' + - '{"text": "text3"}' + - '{"index": {"_index": "test_index", "_id": "test_id4"}}' + - '{"text": "text4"}' + - '{"index": {"_index": "test_index", "_id": "test_id5", "pipeline": "pipeline2"}}' + - '{"text": "text5"}' + - '{"index": {"_index": "test_index", "_id": "test_id6", "pipeline": "pipeline2"}}' + - '{"text": "text6"}' - match: { errors: false } - do: get: index: test_index - id: test_id1 - - match: { _source: { "text": "text1", "field1": "value1" } } + id: test_id5 + - match: { _source: {"text": "text5", "field2": "value2"}} - do: get: index: test_index - id: test_id2 - - match: { _source: { "text": "text2", "field1": "value1" } } + id: test_id3 + - match: { _source: { "text": "text3", "field1": "value1" } } --- "Test bulk API with invalid batch_size": diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.17.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.17.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java index 524d1719c37c0..742b923f05199 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java @@ -44,6 +44,7 @@ import org.opensearch.common.cache.CacheBuilder; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.ingest.Processor; import org.opensearch.plugins.IngestPlugin; @@ -62,10 +63,18 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.Function; +import java.util.stream.Collectors; import java.util.stream.Stream; public class IngestGeoIpModulePlugin extends Plugin implements IngestPlugin, Closeable { + static final Setting> PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting( + "ingest.geoip.processors.allowed", + List.of(), + Function.identity(), + Setting.Property.NodeScope + ); public static final Setting CACHE_SIZE = Setting.longSetting("ingest.geoip.cache_size", 1000, 0, Setting.Property.NodeScope); static String[] DEFAULT_DATABASE_FILENAMES = new String[] { "GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb" }; @@ -74,7 +83,7 @@ public class IngestGeoIpModulePlugin extends Plugin implements IngestPlugin, Clo @Override public List> getSettings() { - return Arrays.asList(CACHE_SIZE); + return Arrays.asList(CACHE_SIZE, PROCESSORS_ALLOWLIST_SETTING); } @Override @@ -90,7 +99,10 @@ public Map getProcessors(Processor.Parameters paramet } catch (IOException e) { throw new RuntimeException(e); } - return Collections.singletonMap(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(cacheSize))); + return filterForAllowlistSetting( + parameters.env.settings(), + Map.of(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(cacheSize))) + ); } /* @@ -175,6 +187,30 @@ public void close() throws IOException { } } + private Map filterForAllowlistSetting(Settings settings, Map map) { + if (PROCESSORS_ALLOWLIST_SETTING.exists(settings) == false) { + return Map.copyOf(map); + } + final Set allowlist = Set.copyOf(PROCESSORS_ALLOWLIST_SETTING.get(settings)); + // Assert that no unknown processors are defined in the allowlist + final Set unknownAllowlistProcessors = allowlist.stream() + .filter(p -> map.containsKey(p) == false) + .collect(Collectors.toUnmodifiableSet()); + if (unknownAllowlistProcessors.isEmpty() == false) { + throw new IllegalArgumentException( + "Processor(s) " + + unknownAllowlistProcessors + + " were defined in [" + + PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist" + ); + } + return map.entrySet() + .stream() + .filter(e -> allowlist.contains(e.getKey())) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); + } + /** * The in-memory cache for the geoip data. There should only be 1 instance of this class.. * This cache differs from the maxmind's {@link NodeCache} such that this cache stores the deserialized Json objects to avoid the diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java index f6d42d1d65670..9446ec1228532 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java @@ -35,8 +35,20 @@ import com.maxmind.geoip2.model.AbstractResponse; import org.opensearch.common.network.InetAddresses; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.TestEnvironment; +import org.opensearch.ingest.Processor; import org.opensearch.ingest.geoip.IngestGeoIpModulePlugin.GeoIpCache; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.StreamsUtils; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Set; import static org.mockito.Mockito.mock; @@ -77,4 +89,87 @@ public void testInvalidInit() { IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new GeoIpCache(-1)); assertEquals("geoip max cache size must be 0 or greater", ex.getMessage()); } + + public void testAllowList() throws IOException { + runAllowListTest(List.of()); + runAllowListTest(List.of("geoip")); + } + + public void testInvalidAllowList() throws IOException { + List invalidAllowList = List.of("set"); + Settings.Builder settingsBuilder = Settings.builder() + .putList(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), invalidAllowList); + createDb(settingsBuilder); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> plugin.getProcessors(createParameters(settingsBuilder.build())) + ); + assertEquals( + "Processor(s) " + + invalidAllowList + + " were defined in [" + + IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist", + e.getMessage() + ); + } + } + + public void testAllowListNotSpecified() throws IOException { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.remove(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey()); + createDb(settingsBuilder); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + final Set expected = Set.of("geoip"); + assertEquals(expected, plugin.getProcessors(createParameters(settingsBuilder.build())).keySet()); + } + } + + private void runAllowListTest(List allowList) throws IOException { + Settings.Builder settingsBuilder = Settings.builder(); + createDb(settingsBuilder); + final Settings settings = settingsBuilder.putList(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), allowList).build(); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + assertEquals(Set.copyOf(allowList), plugin.getProcessors(createParameters(settings)).keySet()); + } + } + + private void createDb(Settings.Builder settingsBuilder) throws IOException { + Path configDir = createTempDir(); + Path userAgentConfigDir = configDir.resolve("ingest-geoip"); + Files.createDirectories(userAgentConfigDir); + settingsBuilder.put("ingest.geoip.database_path", configDir).put("path.home", configDir); + try { + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-City.mmdb")), + configDir.resolve("GeoLite2-City.mmdb") + ); + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-Country.mmdb")), + configDir.resolve("GeoLite2-Country.mmdb") + ); + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-ASN.mmdb")), + configDir.resolve("GeoLite2-ASN.mmdb") + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private static Processor.Parameters createParameters(Settings settings) { + return new Processor.Parameters( + TestEnvironment.newEnvironment(settings), + null, + null, + null, + () -> 0L, + (a, b) -> null, + null, + null, + $ -> {}, + null + ); + } } diff --git a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java index cd96fcf2347ef..bac90d20b44e1 100644 --- a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java +++ b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java @@ -33,6 +33,7 @@ package org.opensearch.ingest.useragent; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.ingest.Processor; import org.opensearch.plugins.IngestPlugin; import org.opensearch.plugins.Plugin; @@ -47,10 +48,19 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; import java.util.stream.Stream; public class IngestUserAgentModulePlugin extends Plugin implements IngestPlugin { + static final Setting> PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting( + "ingest.useragent.processors.allowed", + List.of(), + Function.identity(), + Setting.Property.NodeScope + ); private final Setting CACHE_SIZE_SETTING = Setting.longSetting( "ingest.user_agent.cache_size", 1000, @@ -77,7 +87,34 @@ public Map getProcessors(Processor.Parameters paramet } catch (IOException e) { throw new RuntimeException(e); } - return Collections.singletonMap(UserAgentProcessor.TYPE, new UserAgentProcessor.Factory(userAgentParsers)); + return filterForAllowlistSetting( + parameters.env.settings(), + Collections.singletonMap(UserAgentProcessor.TYPE, new UserAgentProcessor.Factory(userAgentParsers)) + ); + } + + private Map filterForAllowlistSetting(Settings settings, Map map) { + if (PROCESSORS_ALLOWLIST_SETTING.exists(settings) == false) { + return Map.copyOf(map); + } + final Set allowlist = Set.copyOf(PROCESSORS_ALLOWLIST_SETTING.get(settings)); + // Assert that no unknown processors are defined in the allowlist + final Set unknownAllowlistProcessors = allowlist.stream() + .filter(p -> map.containsKey(p) == false) + .collect(Collectors.toUnmodifiableSet()); + if (unknownAllowlistProcessors.isEmpty() == false) { + throw new IllegalArgumentException( + "Processor(s) " + + unknownAllowlistProcessors + + " were defined in [" + + PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist" + ); + } + return map.entrySet() + .stream() + .filter(e -> allowlist.contains(e.getKey())) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); } static Map createUserAgentParsers(Path userAgentConfigDirectory, UserAgentCache cache) throws IOException { diff --git a/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java new file mode 100644 index 0000000000000..31fdafff1188a --- /dev/null +++ b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.useragent; + +import org.opensearch.common.settings.Settings; +import org.opensearch.env.TestEnvironment; +import org.opensearch.ingest.Processor; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Set; + +public class IngestUserAgentModulePluginTests extends OpenSearchTestCase { + private Settings.Builder settingsBuilder; + + @Before + public void setup() throws IOException { + Path configDir = createTempDir(); + Path userAgentConfigDir = configDir.resolve("ingest-user-agent"); + Files.createDirectories(userAgentConfigDir); + settingsBuilder = Settings.builder().put("ingest-user-agent", configDir).put("path.home", configDir); + + // Copy file, leaving out the device parsers at the end + String regexWithoutDevicesFilename = "regexes_without_devices.yml"; + try ( + BufferedReader reader = new BufferedReader( + new InputStreamReader(UserAgentProcessor.class.getResourceAsStream("/regexes.yml"), StandardCharsets.UTF_8) + ); + BufferedWriter writer = Files.newBufferedWriter(userAgentConfigDir.resolve(regexWithoutDevicesFilename)); + ) { + String line; + while ((line = reader.readLine()) != null) { + if (line.startsWith("device_parsers:")) { + break; + } + + writer.write(line); + writer.newLine(); + } + } + } + + public void testAllowList() throws IOException { + runAllowListTest(List.of()); + runAllowListTest(List.of("user_agent")); + } + + public void testInvalidAllowList() throws IOException { + List invalidAllowList = List.of("set"); + final Settings settings = settingsBuilder.putList( + IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), + invalidAllowList + ).build(); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> plugin.getProcessors(createParameters(settings)) + ); + assertEquals( + "Processor(s) " + + invalidAllowList + + " were defined in [" + + IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist", + e.getMessage() + ); + } + } + + public void testAllowListNotSpecified() throws IOException { + settingsBuilder.remove(IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey()); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + final Set expected = Set.of("user_agent"); + assertEquals(expected, plugin.getProcessors(createParameters(settingsBuilder.build())).keySet()); + } + } + + private void runAllowListTest(List allowList) throws IOException { + final Settings settings = settingsBuilder.putList(IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), allowList) + .build(); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + assertEquals(Set.copyOf(allowList), plugin.getProcessors(createParameters(settings)).keySet()); + } + } + + private static Processor.Parameters createParameters(Settings settings) { + return new Processor.Parameters( + TestEnvironment.newEnvironment(settings), + null, + null, + null, + () -> 0L, + (a, b) -> null, + null, + null, + $ -> {}, + null + ); + } +} diff --git a/modules/lang-expression/licenses/lucene-expressions-9.12.0-snapshot-847316d.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..83dd8e657bdd5 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +b866103bbaca4141c152deca9252bd137026dafc \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.12.0-snapshot-c896995.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index 6d8d3be59f945..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f0321cf2d34fca3f1f9334fdfee2b79d9d27444 \ No newline at end of file diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index bcf5c07ea8c64..a836124f94b41 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -38,7 +38,7 @@ opensearchplugin { } dependencies { - api "com.github.spullara.mustache.java:compiler:0.9.13" + api "com.github.spullara.mustache.java:compiler:0.9.14" } restResources { diff --git a/modules/lang-mustache/licenses/compiler-0.9.13.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.13.jar.sha1 deleted file mode 100644 index 70d53aac260eb..0000000000000 --- a/modules/lang-mustache/licenses/compiler-0.9.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -60666500a7dce7a5d3e17c09b46ea6f037192bd5 \ No newline at end of file diff --git a/modules/lang-mustache/licenses/compiler-0.9.14.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.14.jar.sha1 new file mode 100644 index 0000000000000..29069ac90817a --- /dev/null +++ b/modules/lang-mustache/licenses/compiler-0.9.14.jar.sha1 @@ -0,0 +1 @@ +e6df8b5aabb80d6eb6d8fef312a56d66b7659ba6 \ No newline at end of file diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index fb51a0bb7f157..7075901979e3b 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -33,6 +33,7 @@ import com.github.jengelman.gradle.plugins.shadow.ShadowBasePlugin apply plugin: 'opensearch.validate-rest-spec' apply plugin: 'opensearch.yaml-rest-test' +apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'An easy, safe and fast scripting language for OpenSearch' @@ -46,6 +47,7 @@ ext { testClusters.all { module ':modules:mapper-extras' + module ':modules:aggs-matrix-stats' systemProperty 'opensearch.scripting.update.ctx_in_params', 'false' // TODO: remove this once cname is prepended to transport.publish_address by default in 8.0 systemProperty 'opensearch.transport.cname_in_publish_address', 'true' diff --git a/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java b/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java new file mode 100644 index 0000000000000..df327bf4871c6 --- /dev/null +++ b/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java @@ -0,0 +1,231 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.painless; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.plugins.Plugin; +import org.opensearch.script.Script; +import org.opensearch.script.ScriptType; +import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.AggregationBuilders; +import org.opensearch.search.aggregations.bucket.composite.InternalComposite; +import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; +import org.opensearch.search.aggregations.bucket.terms.Terms; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class SimplePainlessIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SimplePainlessIT(Settings nodeSettings) { + super(nodeSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() } + ); + } + + @Override + protected Collection> nodePlugins() { + return List.of(PainlessModulePlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), "4") + .build(); + } + + @Override + public void setupSuiteScopeCluster() throws Exception { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() + .startObject() + .field("dynamic", "false") + .startObject("_meta") + .field("schema_version", 5) + .endObject() + .startObject("properties") + .startObject("entity") + .field("type", "nested") + .endObject() + .endObject() + .endObject(); + + assertAcked( + prepareCreate("test").setMapping(xContentBuilder) + .setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + ); + + assertAcked( + prepareCreate("test-df").setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + ); + + client().prepareIndex("test") + .setId("a") + .setSource( + "{\"entity\":[{\"name\":\"ip-field\",\"value\":\"1.2.3.4\"},{\"name\":\"keyword-field\",\"value\":\"field-1\"}]}", + MediaTypeRegistry.JSON + ) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test") + .setId("b") + .setSource( + "{\"entity\":[{\"name\":\"ip-field\",\"value\":\"5.6.7.8\"},{\"name\":\"keyword-field\",\"value\":\"field-2\"}]}", + MediaTypeRegistry.JSON + ) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test") + .setId("c") + .setSource( + "{\"entity\":[{\"name\":\"ip-field\",\"value\":\"1.6.3.8\"},{\"name\":\"keyword-field\",\"value\":\"field-2\"}]}", + MediaTypeRegistry.JSON + ) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test") + .setId("d") + .setSource( + "{\"entity\":[{\"name\":\"ip-field\",\"value\":\"2.6.4.8\"},{\"name\":\"keyword-field\",\"value\":\"field-2\"}]}", + MediaTypeRegistry.JSON + ) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + ensureSearchable("test"); + + client().prepareIndex("test-df") + .setId("a") + .setSource("{\"field\":\"value1\"}", MediaTypeRegistry.JSON) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test-df") + .setId("b") + .setSource("{\"field\":\"value2\"}", MediaTypeRegistry.JSON) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test-df") + .setId("c") + .setSource("{\"field\":\"value3\"}", MediaTypeRegistry.JSON) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test-df") + .setId("d") + .setSource("{\"field\":\"value1\"}", MediaTypeRegistry.JSON) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + ensureSearchable("test"); + } + + public void testTermsValuesSource() throws Exception { + AggregationBuilder agg = AggregationBuilders.composite( + "multi_buckets", + Collections.singletonList( + new TermsValuesSourceBuilder("keyword-field").script( + new Script( + ScriptType.INLINE, + "painless", + "String value = null; if (params == null || params._source == null || params._source.entity == null) { return \"\"; } for (item in params._source.entity) { if (item[\"name\"] == \"keyword-field\") { value = item['value']; break; } } return value;", + Collections.emptyMap() + ) + ) + ) + ); + SearchResponse response = client().prepareSearch("test").setQuery(matchAllQuery()).addAggregation(agg).get(); + + assertSearchResponse(response); + assertEquals(2, ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().size()); + assertEquals( + "field-1", + ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().get(0).getKey().get("keyword-field") + ); + assertEquals(1, ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().get(0).getDocCount()); + assertEquals( + "field-2", + ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().get(1).getKey().get("keyword-field") + ); + assertEquals(3, ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().get(1).getDocCount()); + } + + public void testSimpleDerivedFieldsQuery() { + assumeFalse( + "Derived fields do not support concurrent search https://github.com/opensearch-project/OpenSearch/issues/15007", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); + SearchRequest searchRequest = new SearchRequest("test-df").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "keyword", new Script("emit(params._source[\"field\"])")) + .fetchField("result") + .query(new TermsQueryBuilder("result", "value1")) + ); + SearchResponse response = client().search(searchRequest).actionGet(); + assertSearchResponse(response); + assertEquals(2, Objects.requireNonNull(response.getHits().getTotalHits()).value); + } + + public void testSimpleDerivedFieldsAgg() { + assumeFalse( + "Derived fields do not support concurrent search https://github.com/opensearch-project/OpenSearch/issues/15007", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); + SearchRequest searchRequest = new SearchRequest("test-df").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "keyword", new Script("emit(params._source[\"field\"])")) + .fetchField("result") + .aggregation(new TermsAggregationBuilder("derived-agg").field("result")) + ); + SearchResponse response = client().search(searchRequest).actionGet(); + assertSearchResponse(response); + Terms aggResponse = response.getAggregations().get("derived-agg"); + assertEquals(3, aggResponse.getBuckets().size()); + Terms.Bucket bucket = aggResponse.getBuckets().get(0); + assertEquals("value1", bucket.getKey()); + assertEquals(2, bucket.getDocCount()); + bucket = aggResponse.getBuckets().get(1); + assertEquals("value2", bucket.getKey()); + assertEquals(1, bucket.getDocCount()); + bucket = aggResponse.getBuckets().get(2); + assertEquals("value3", bucket.getKey()); + assertEquals(1, bucket.getDocCount()); + } +} diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml new file mode 100644 index 0000000000000..87c260ce5f308 --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml @@ -0,0 +1,1515 @@ +--- +setup: +- skip: + version: " - 2.14.99" + reason: "derived_field feature was added in 2.15" + +# -- NOT SUPPORTED: -- +# geobounds +# scripted metric +# -- NOT SUPPORTED: -- +# Any geo agg +# sig terms/text + +- do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: text + keyword: + type: keyword + os: + type: keyword + long: + type: long + float: + type: float + double: + type: double + date: + type: date + geo: + type: geo_point + ip: + type: ip + boolean: + type: boolean + array_of_long: + type: long + json_field: + type: text + derived: + derived_text: + type: text + script: "emit(params._source[\"text\"])" + derived_text_prefilter_field: + type: text + script: "emit(params._source[\"text\"])" + prefilter_field: "text" + derived_keyword: + type: keyword + script: "emit(params._source[\"keyword\"])" + derived_os: + type: keyword + script: "emit(params._source[\"os\"])" + derived_long: + type: long + script: "emit(params._source[\"long\"])" + derived_float: + type: float + script: "emit(params._source[\"float\"])" + derived_double: + type: double + script: "emit(params._source[\"double\"])" + derived_date: + type: date + script: "emit(ZonedDateTime.parse(params._source[\"date\"]).toInstant().toEpochMilli())" + derived_geo: + type: geo_point + script: "emit(params._source[\"geo\"][0], params._source[\"geo\"][1])" + derived_ip: + type: ip + script: "emit(params._source[\"ip\"])" + derived_boolean: + type: boolean + script: "emit(params._source[\"boolean\"])" + derived_array_of_long: + type: long + script: "emit(params._source[\"array_of_long\"][0]);emit(params._source[\"array_of_long\"][1]);" + derived_object: + type: object + properties: + keyword: keyword + ip: ip + os: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + +- do: + bulk: + refresh: true + body: + - index: + _index: test + _id: 1 + - text: "peter piper" + keyword: "foo" + os: "mac" + long: 1 + float: 1.0 + double: 1.0 + date: "2017-01-01T00:00:00Z" + geo: [ -74.0060, 40.7128 ] + ip: "192.168.0.1" + boolean: true + array_of_long: [ 1, 2 ] + json_field: "{\"text\":\"peter piper\",\"keyword\":\"foo\",\"os\":\"mac\",\"long\":1,\"float\":1.0,\"double\":1.0,\"date\":\"2017-01-01T00:00:00Z\",\"ip\":\"192.168.0.1\",\"boolean\":true, \"array_of_long\": [1, 2]}" + - index: + _index: test + _id: 2 + - text: "piper picked a peck" + keyword: "bar" + os: "windows" + long: 2 + float: 2.0 + double: 2.0 + date: "2017-01-02T00:00:00Z" + geo: [ -118.2437, 34.0522 ] + ip: "10.0.0.1" + boolean: false + array_of_long: [ 2, 3 ] + json_field: "{\"keyword\":\"bar\",\"long\":2,\"float\":2.0,\"os\":\"windows\",\"double\":2.0,\"date\":\"2017-01-02T00:00:00Z\",\"ip\":\"10.0.0.1\",\"boolean\":false, \"array_of_long\": [2, 3]}" + - index: + _index: test + _id: 3 + - text: "peck of pickled peppers" + keyword: "baz" + os: "mac" + long: -3 + float: -3.0 + double: -3.0 + date: "2017-01-03T00:00:00Z" + geo: [ -87.6298, 41.87 ] + ip: "172.16.0.1" + boolean: true + array_of_long: [ 3, 4 ] + json_field: "{\"keyword\":\"baz\",\"long\":-3,\"float\":-3.0,\"os\":\"mac\",\"double\":-3.0,\"date\":\"2017-01-03T00:00:00Z\",\"ip\":\"172.16.0.1\",\"boolean\":true, \"array_of_long\": [3, 4]}" + - index: + _index: test + _id: 4 + - text: "pickled peppers" + keyword: "qux" + os: "windows" + long: 4 + float: 4.0 + double: 4.0 + date: "2017-01-04T00:00:00Z" + geo: [ -74.0060, 40.7128 ] + ip: "192.168.0.2" + boolean: false + array_of_long: [ 4, 5 ] + json_field: "{\"keyword\":\"qux\",\"long\":4,\"float\":4.0,\"os\":\"windows\",\"double\":4.0,\"date\":\"2017-01-04T00:00:00Z\",\"ip\":\"192.168.0.2\",\"boolean\":false, \"array_of_long\": [4, 5]}" + - index: + _index: test + _id: 5 + - text: "peppers" + keyword: "quux" + os: "mac" + long: 5 + float: 5.0 + double: 5.0 + date: "2017-01-05T00:00:00Z" + geo: [ -87.6298, 41.87 ] + ip: "10.0.0.2" + boolean: true + array_of_long: [ 5, 6 ] + json_field: "{\"keyword\":\"quux\",\"long\":5,\"float\":5.0,\"os\":\"mac\",\"double\":5.0,\"date\":\"2017-01-05T00:00:00Z\",\"ip\":\"10.0.0.2\",\"boolean\":true, \"array_of_long\": [5, 6]}" + +- do: + indices.refresh: + index: [test] + +### BUCKET AGGS +--- +"Test terms aggregation on derived_keyword from search definition": +- do: + search: + index: test + body: + derived: + derived_keyword_search_definition: + type: keyword + script: "emit(params._source[\"keyword\"])" + size: 0 + aggs: + keywords: + terms: + field: derived_keyword_search_definition + +- match: { hits.total.value: 5 } +- length: { aggregations.keywords.buckets: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.doc_count: 1 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.doc_count: 1 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.doc_count: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.doc_count: 1 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.doc_count: 1 } + +--- +"Test terms aggregation on derived_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_keyword + +- match: { hits.total.value: 5 } +- length: { aggregations.keywords.buckets: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.doc_count: 1 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.doc_count: 1 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.doc_count: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.doc_count: 1 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.doc_count: 1 } + +--- +"Test range aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_ranges: + range: + field: derived_long + ranges: + - to: 0 + - from: 0 + to: 3 + - from: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.long_ranges.buckets: 3 } +- match: { aggregations.long_ranges.buckets.0.doc_count: 1 } +- match: { aggregations.long_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.long_ranges.buckets.2.doc_count: 2 } + +--- +"Test histogram aggregation on derived_float": +- do: + search: + index: test + body: + size: 0 + aggs: + float_histogram: + histogram: + field: derived_float + interval: 2 + +- match: { hits.total.value: 5 } +- length: { aggregations.float_histogram.buckets: 5 } +- match: { aggregations.float_histogram.buckets.0.key: -4.0 } +- match: { aggregations.float_histogram.buckets.0.doc_count: 1 } + +--- +"Test date_histogram aggregation on derived_date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: derived_date + calendar_interval: day + +- match: { hits.total.value: 5 } +- length: { aggregations.date_histogram.buckets: 5 } +- match: { aggregations.date_histogram.buckets.0.key_as_string: "2017-01-01T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.0.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.1.key_as_string: "2017-01-02T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.1.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.2.key_as_string: "2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.2.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.3.key_as_string: "2017-01-04T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.3.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.4.key_as_string: "2017-01-05T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.4.doc_count: 1 } + +--- +"Test date_range aggregation on derived_date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_range: + date_range: + field: derived_date + ranges: + - to: "2017-01-03T00:00:00Z" + - from: "2017-01-03T00:00:00Z" + +- match: { hits.total.value: 5 } +- match: { aggregations.date_range.buckets.0.key: "*-2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_range.buckets.0.doc_count: 2 } +- match: { aggregations.date_range.buckets.1.key: "2017-01-03T00:00:00.000Z-*" } +- match: { aggregations.date_range.buckets.1.doc_count: 3 } + +--- +"Test filters aggregation on derived_boolean": +- do: + search: + index: test + body: + size: 0 + aggs: + boolean_filters: + filters: + filters: + true_values: + term: + derived_boolean: true + false_values: + term: + derived_boolean: false + +- match: { hits.total.value: 5 } +- match: { aggregations.boolean_filters.buckets.true_values.doc_count: 3 } +- match: { aggregations.boolean_filters.buckets.false_values.doc_count: 2 } + +--- +"Test adjacency matrix aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + adj_matrix: + adjacency_matrix: + filters: + high_num: + range: + derived_long: + gte: 3 + low_num: + range: + derived_long: + lt: 3 +- match: { hits.total.value: 5 } +- length: { aggregations.adj_matrix.buckets: 2 } +- match: { aggregations.adj_matrix.buckets.0.key: "high_num" } +- match: { aggregations.adj_matrix.buckets.0.doc_count: 2 } +- match: { aggregations.adj_matrix.buckets.1.key: "low_num" } +- match: { aggregations.adj_matrix.buckets.1.doc_count: 3 } + +### METRIC AGGS + +--- +"Test stats aggregation on derived_array_of_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_array_stats: + stats: + field: derived_array_of_long + +- match: { hits.total.value: 5 } +- match: { aggregations.long_array_stats.count: 10 } +- match: { aggregations.long_array_stats.min: 1 } +- match: { aggregations.long_array_stats.max: 6 } +- match: { aggregations.long_array_stats.avg: 3.5 } +- match: { aggregations.long_array_stats.sum: 35 } + +--- +"Test cardinality aggregation on derived_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_keywords: + cardinality: + field: derived_keyword + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_keywords.value: 5 } + +--- +"Test percentiles aggregation on derived_double": +- do: + search: + index: test + body: + size: 0 + aggs: + double_percentiles: + percentiles: + field: derived_double + percents: [ 25, 50, 75 ] + +- match: { hits.total.value: 5 } +- length: { aggregations.double_percentiles.values: 3} + +--- +"Test percentile ranks aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_percentile_ranks: + percentile_ranks: + field: derived_long + values: [ 2, 4 ] + +- match: { hits.total.value: 5 } +- length: { aggregations.long_percentile_ranks.values: 2} + +--- +"Test top hits aggregation on derived_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + top_keywords: + terms: + field: derived_keyword + aggs: + top_hits: + top_hits: + size: 1 +- match: { hits.total.value: 5 } +- length: { aggregations.top_keywords.buckets: 5 } +- match: { aggregations.top_keywords.buckets.0.key: "bar" } +- match: { aggregations.top_keywords.buckets.0.doc_count: 1 } +- length: { aggregations.top_keywords.buckets.0.top_hits.hits.hits: 1 } + +--- +"Test matrix stats aggregation on derived_long and float": +- do: + search: + index: test + body: + size: 0 + aggs: + matrix_stats: + matrix_stats: + fields: [ derived_long, derived_float ] +- match: { hits.total.value: 5 } +- length: { aggregations.matrix_stats.fields: 2 } +- match: { aggregations.matrix_stats.fields.0.name: "derived_float" } +- match: { aggregations.matrix_stats.fields.0.count: 5 } +- match: { aggregations.matrix_stats.fields.1.name: "derived_long" } +- match: { aggregations.matrix_stats.fields.1.count: 5 } + +--- +"Test median absolute deviation aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + mad_long: + median_absolute_deviation: + field: derived_long +- match: { hits.total.value: 5 } +- match: { aggregations.mad_long.value: 2.0 } + +## Pipeline agg +--- +"Test simple pipeline agg with derived_keyword and long": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_keyword + aggs: + sum_derived_longs: + sum: + field: derived_long + sum_total: + sum_bucket: + buckets_path: "keywords>sum_derived_longs" +- match: { hits.total.value: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.sum_derived_longs.value: 2 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.sum_derived_longs.value: -3 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.sum_derived_longs.value: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.sum_derived_longs.value: 5 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.sum_derived_longs.value: 4 } +- match: { aggregations.sum_total.value: 9 } + + +--- +"Test terms aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_terms: + terms: + field: derived_ip + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_terms.buckets: 5 } +- match: { aggregations.ip_terms.buckets.0.key: "10.0.0.1" } +- match: { aggregations.ip_terms.buckets.0.doc_count: 1 } + +--- +"Test range aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_ranges: + ip_range: + field: derived_ip + ranges: + - to: "10.0.0.0" + - from: "10.0.0.0" + to: "172.16.0.0" + - from: "172.16.0.0" + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_ranges.buckets: 3 } +- match: { aggregations.ip_ranges.buckets.0.doc_count: 0 } +- match: { aggregations.ip_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.ip_ranges.buckets.2.doc_count: 3 } + +--- +"Test cardinality aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_ips: + cardinality: + field: derived_ip + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_ips.value: 5 } + +--- +"Test missing aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + missing_ips: + missing: + field: derived_ip + +- match: { hits.total.value: 5 } +- match: { aggregations.missing_ips.doc_count: 0 } + +--- +"Test value count aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_count: + value_count: + field: derived_ip + +- match: { hits.total.value: 5 } +- match: { aggregations.ip_count.value: 5 } + +--- +"Test composite agg": +- do: + search: + index: test + body: + size: 0 + aggs: + test_composite_agg: + composite: + size: 10 + sources: + - os: + terms: + field: derived_os + - keyword: + terms: + field: derived_keyword + - is_true: + terms: + field: derived_boolean + aggs: + avg_long: + avg: + field: derived_long +- match: { aggregations.test_composite_agg.buckets.0.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.0.key.keyword: "baz" } +- match: { aggregations.test_composite_agg.buckets.0.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.0.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.0.avg_long.value: -3.0 } +- match: { aggregations.test_composite_agg.buckets.1.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.1.key.keyword: "foo" } +- match: { aggregations.test_composite_agg.buckets.1.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.1.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.1.avg_long.value: 1.0 } +- match: { aggregations.test_composite_agg.buckets.2.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.2.key.keyword: "quux" } +- match: { aggregations.test_composite_agg.buckets.2.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.2.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.2.avg_long.value: 5.0 } +- match: { aggregations.test_composite_agg.buckets.3.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.3.key.keyword: "bar" } +- match: { aggregations.test_composite_agg.buckets.3.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.3.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.3.avg_long.value: 2.0 } +- match: { aggregations.test_composite_agg.buckets.4.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.4.key.keyword: "qux" } +- match: { aggregations.test_composite_agg.buckets.4.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.4.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.4.avg_long.value: 4.0 } + +--- +"Test auto date histogram": +- do: + search: + rest_total_hits_as_int: true + index: test + body: + size: 0 + aggs: + test_auto_date_histogram: + auto_date_histogram: + field: "derived_date" + buckets: 10 + format: "yyyy-MM-dd" + aggs: + avg_long: + avg: + field: derived_long +- match: { hits.total: 5 } +- length: { aggregations.test_auto_date_histogram.buckets: 9 } +- match: { aggregations.test_auto_date_histogram.buckets.0.key_as_string: "2017-01-01"} +- match: { aggregations.test_auto_date_histogram.buckets.0.avg_long.value: 1.0} + +--- +"Test variable_width_histogram aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + var_width_hist: + variable_width_histogram: + field: derived_long + buckets: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.var_width_hist.buckets: 3 } + +--- +"Test extended_stats aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + extended_stats_agg: + extended_stats: + field: derived_long + +- match: { hits.total.value: 5 } +- match: { aggregations.extended_stats_agg.count: 5 } +- match: { aggregations.extended_stats_agg.min: -3 } +- match: { aggregations.extended_stats_agg.max: 5 } +- is_true: aggregations.extended_stats_agg.avg +- is_true: aggregations.extended_stats_agg.sum +- is_true: aggregations.extended_stats_agg.sum_of_squares +- is_true: aggregations.extended_stats_agg.variance +- is_true: aggregations.extended_stats_agg.std_deviation + +--- +"Test rare_terms aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + rare_terms_agg: + rare_terms: + field: derived_keyword + max_doc_count: 1 + +- match: { hits.total.value: 5 } +- length: { aggregations.rare_terms_agg.buckets: 5 } + +--- +"Test global aggregation": +- do: + search: + index: test + body: + query: + term: + derived_keyword: "foo" + aggs: + all_docs: + global: {} + aggs: + avg_long: + avg: + field: derived_long + +- match: { hits.total.value: 1 } +- match: { aggregations.all_docs.doc_count: 5 } +- match: { aggregations.all_docs.avg_long.value: 1.8 } + +--- +"Test missing aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + missing_agg: + missing: + field: derived_keyword + +- match: { hits.total.value: 5 } +- match: { aggregations.missing_agg.doc_count: 0 } + +--- +"Test value_count aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + value_count_agg: + value_count: + field: derived_long + +- match: { hits.total.value: 5 } +- match: { aggregations.value_count_agg.value: 5 } + +--- +"Test weighted_avg aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + weighted_avg_agg: + weighted_avg: + value: + field: derived_long + weight: + field: derived_float + +- match: { hits.total.value: 5 } +- is_true: aggregations.weighted_avg_agg.value + +--- +"Test diversified_sampler aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + diversified_sampler_agg: + diversified_sampler: + field: derived_keyword + max_docs_per_value: 1 + aggs: + avg_long: + avg: + field: derived_long + +- match: { hits.total.value: 5 } +- match: { aggregations.diversified_sampler_agg.doc_count: 5 } +- match: { aggregations.diversified_sampler_agg.avg_long.value: 1.8 } + +--- +"Test sampler aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + sampler_agg: + sampler: + shard_size: 2 + aggs: + avg_long: + avg: + field: derived_long + +- match: { hits.total.value: 5 } +- is_true: aggregations.sampler_agg.doc_count +- is_true: aggregations.sampler_agg.avg_long.value + +--- +"Test multi_terms aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + multi_terms_agg: + multi_terms: + terms: + - field: derived_keyword + - field: derived_os + size: 10 + +- match: { hits.total.value: 5 } +- length: { aggregations.multi_terms_agg.buckets: 5 } + +#### SAME TESTS WITH DERIVED_OBJECT +--- +"Test terms aggregation on derived_object.keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_object.keyword + +- match: { hits.total.value: 5 } +- length: { aggregations.keywords.buckets: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.doc_count: 1 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.doc_count: 1 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.doc_count: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.doc_count: 1 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.doc_count: 1 } + +--- +"Test range aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_ranges: + range: + field: derived_object.long + ranges: + - to: 0 + - from: 0 + to: 3 + - from: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.long_ranges.buckets: 3 } +- match: { aggregations.long_ranges.buckets.0.doc_count: 1 } +- match: { aggregations.long_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.long_ranges.buckets.2.doc_count: 2 } + +--- +"Test histogram aggregation on derived_object.float": +- do: + search: + index: test + body: + size: 0 + aggs: + float_histogram: + histogram: + field: derived_object.float + interval: 2 + +- match: { hits.total.value: 5 } +- length: { aggregations.float_histogram.buckets: 5 } +- match: { aggregations.float_histogram.buckets.0.key: -4.0 } +- match: { aggregations.float_histogram.buckets.0.doc_count: 1 } + +--- +"Test date_histogram aggregation on derived_object.date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: derived_object.date + calendar_interval: day + +- match: { hits.total.value: 5 } +- length: { aggregations.date_histogram.buckets: 5 } +- match: { aggregations.date_histogram.buckets.0.key_as_string: "2017-01-01T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.0.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.1.key_as_string: "2017-01-02T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.1.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.2.key_as_string: "2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.2.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.3.key_as_string: "2017-01-04T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.3.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.4.key_as_string: "2017-01-05T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.4.doc_count: 1 } + +--- +"Test date_range aggregation on derived_object.date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_range: + date_range: + field: derived_object.date + ranges: + - to: "2017-01-03T00:00:00Z" + - from: "2017-01-03T00:00:00Z" + +- match: { hits.total.value: 5 } +- match: { aggregations.date_range.buckets.0.key: "*-2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_range.buckets.0.doc_count: 2 } +- match: { aggregations.date_range.buckets.1.key: "2017-01-03T00:00:00.000Z-*" } +- match: { aggregations.date_range.buckets.1.doc_count: 3 } + +--- +"Test filters aggregation on derived_object.boolean": +- do: + search: + index: test + body: + size: 0 + aggs: + boolean_filters: + filters: + filters: + true_values: + term: + derived_object.boolean: true + false_values: + term: + derived_object.boolean: false + +- match: { hits.total.value: 5 } +- match: { aggregations.boolean_filters.buckets.true_values.doc_count: 3 } +- match: { aggregations.boolean_filters.buckets.false_values.doc_count: 2 } + +--- +"Test adjacency matrix aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + adj_matrix: + adjacency_matrix: + filters: + high_num: + range: + derived_object.long: + gte: 3 + low_num: + range: + derived_object.long: + lt: 3 +- match: { hits.total.value: 5 } +- length: { aggregations.adj_matrix.buckets: 2 } +- match: { aggregations.adj_matrix.buckets.0.key: "high_num" } +- match: { aggregations.adj_matrix.buckets.0.doc_count: 2 } +- match: { aggregations.adj_matrix.buckets.1.key: "low_num" } +- match: { aggregations.adj_matrix.buckets.1.doc_count: 3 } + +--- +"Test stats aggregation on derived_object.array_of_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_array_stats: + stats: + field: derived_object.array_of_long + +- match: { hits.total.value: 5 } +- match: { aggregations.long_array_stats.count: 10 } +- match: { aggregations.long_array_stats.min: 1 } +- match: { aggregations.long_array_stats.max: 6 } +- match: { aggregations.long_array_stats.avg: 3.5 } +- match: { aggregations.long_array_stats.sum: 35 } + +--- +"Test cardinality aggregation on derived_object_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_keywords: + cardinality: + field: derived_object.keyword + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_keywords.value: 5 } + +--- +"Test percentiles aggregation on derived_object.double": +- do: + search: + index: test + body: + size: 0 + aggs: + double_percentiles: + percentiles: + field: derived_object.double + percents: [ 25, 50, 75 ] + +- match: { hits.total.value: 5 } +- length: { aggregations.double_percentiles.values: 3} + +--- +"Test percentile ranks aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_percentile_ranks: + percentile_ranks: + field: derived_object.long + values: [ 2, 4 ] + +- match: { hits.total.value: 5 } +- length: { aggregations.long_percentile_ranks.values: 2} + +--- +"Test top hits aggregation on derived_object.keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + top_keywords: + terms: + field: derived_object.keyword + aggs: + top_hits: + top_hits: + size: 1 +- match: { hits.total.value: 5 } +- length: { aggregations.top_keywords.buckets: 5 } +- match: { aggregations.top_keywords.buckets.0.key: "bar" } +- match: { aggregations.top_keywords.buckets.0.doc_count: 1 } +- length: { aggregations.top_keywords.buckets.0.top_hits.hits.hits: 1 } + +--- +"Test matrix stats aggregation on derived_object.long and float": +- do: + search: + index: test + body: + size: 0 + aggs: + matrix_stats: + matrix_stats: + fields: [ derived_object.long, derived_object.float ] +- match: { hits.total.value: 5 } +- length: { aggregations.matrix_stats.fields: 2 } +- match: { aggregations.matrix_stats.fields.0.name: "derived_object.long" } +- match: { aggregations.matrix_stats.fields.0.count: 5 } +- match: { aggregations.matrix_stats.fields.1.name: "derived_object.float" } +- match: { aggregations.matrix_stats.fields.1.count: 5 } + +--- +"Test median absolute deviation aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + mad_long: + median_absolute_deviation: + field: derived_object.long +- match: { hits.total.value: 5 } +- match: { aggregations.mad_long.value: 2.0 } + +--- +"Test simple pipeline agg derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_object.keyword + aggs: + sum_derived_longs: + sum: + field: derived_object.long + sum_total: + sum_bucket: + buckets_path: "keywords>sum_derived_longs" +- match: { hits.total.value: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.sum_derived_longs.value: 2 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.sum_derived_longs.value: -3 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.sum_derived_longs.value: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.sum_derived_longs.value: 5 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.sum_derived_longs.value: 4 } +- match: { aggregations.sum_total.value: 9 } + + +--- +"Test composite agg on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + test_composite_agg: + composite: + size: 10 + sources: + - os: + terms: + field: derived_object.os + - keyword: + terms: + field: derived_object.keyword + - is_true: + terms: + field: derived_object.boolean + aggs: + avg_long: + avg: + field: derived_object.long +- length: { aggregations.test_composite_agg.buckets: 5 } +- match: { aggregations.test_composite_agg.buckets.0.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.0.key.keyword: "baz" } +- match: { aggregations.test_composite_agg.buckets.0.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.0.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.0.avg_long.value: -3.0 } +- match: { aggregations.test_composite_agg.buckets.1.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.1.key.keyword: "foo" } +- match: { aggregations.test_composite_agg.buckets.1.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.1.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.1.avg_long.value: 1.0 } +- match: { aggregations.test_composite_agg.buckets.2.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.2.key.keyword: "quux" } +- match: { aggregations.test_composite_agg.buckets.2.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.2.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.2.avg_long.value: 5.0 } +- match: { aggregations.test_composite_agg.buckets.3.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.3.key.keyword: "bar" } +- match: { aggregations.test_composite_agg.buckets.3.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.3.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.3.avg_long.value: 2.0 } +- match: { aggregations.test_composite_agg.buckets.4.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.4.key.keyword: "qux" } +- match: { aggregations.test_composite_agg.buckets.4.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.4.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.4.avg_long.value: 4.0 } + +--- +"Test auto date histogram on derived_object": +- do: + search: + rest_total_hits_as_int: true + index: test + body: + size: 0 + aggs: + test_auto_date_histogram: + auto_date_histogram: + field: "derived_object.date" + buckets: 10 + format: "yyyy-MM-dd" + aggs: + avg_long: + avg: + field: derived_object.long +- match: { hits.total: 5 } +- length: { aggregations.test_auto_date_histogram.buckets: 9 } +- match: { aggregations.test_auto_date_histogram.buckets.0.key_as_string: "2017-01-01"} +- match: { aggregations.test_auto_date_histogram.buckets.0.avg_long.value: 1.0} + +--- +"Test variable_width_histogram aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + var_width_hist: + variable_width_histogram: + field: derived_object.long + buckets: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.var_width_hist.buckets: 3 } + +--- +"Test extended_stats aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + extended_stats_agg: + extended_stats: + field: derived_object.long + +- match: { hits.total.value: 5 } +- match: { aggregations.extended_stats_agg.count: 5 } +- match: { aggregations.extended_stats_agg.min: -3 } +- match: { aggregations.extended_stats_agg.max: 5 } +- is_true: aggregations.extended_stats_agg.avg +- is_true: aggregations.extended_stats_agg.sum +- is_true: aggregations.extended_stats_agg.sum_of_squares +- is_true: aggregations.extended_stats_agg.variance +- is_true: aggregations.extended_stats_agg.std_deviation + +--- +"Test rare_terms aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + rare_terms_agg: + rare_terms: + field: derived_object.keyword + max_doc_count: 1 + +- match: { hits.total.value: 5 } +- length: { aggregations.rare_terms_agg.buckets: 5 } + +--- +"Test global aggregation on derived_object": +- do: + search: + index: test + body: + query: + term: + derived_object.keyword: "foo" + aggs: + all_docs: + global: {} + aggs: + avg_long: + avg: + field: derived_object.long + +- match: { hits.total.value: 1 } +- match: { aggregations.all_docs.doc_count: 5 } +- match: { aggregations.all_docs.avg_long.value: 1.8 } + +--- +"Test value_count aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + value_count_agg: + value_count: + field: derived_object.long + +- match: { hits.total.value: 5 } +- match: { aggregations.value_count_agg.value: 5 } + +--- +"Test multi_terms aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + multi_terms_agg: + multi_terms: + terms: + - field: derived_object.keyword + - field: derived_object.os + size: 10 + +- match: { hits.total.value: 5 } +- length: { aggregations.multi_terms_agg.buckets: 5 } + + +### IP specific tests +--- +"Test terms aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_terms: + terms: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_terms.buckets: 5 } +- match: { aggregations.ip_terms.buckets.0.key: "10.0.0.1" } +- match: { aggregations.ip_terms.buckets.0.doc_count: 1 } + +--- +"Test range aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_ranges: + ip_range: + field: derived_object.ip + ranges: + - to: "10.0.0.0" + - from: "10.0.0.0" + to: "172.16.0.0" + - from: "172.16.0.0" + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_ranges.buckets: 3 } +- match: { aggregations.ip_ranges.buckets.0.doc_count: 0 } +- match: { aggregations.ip_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.ip_ranges.buckets.2.doc_count: 3 } + +--- +"Test cardinality aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_ips: + cardinality: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_ips.value: 5 } + +--- +"Test missing aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + missing_ips: + missing: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- match: { aggregations.missing_ips.doc_count: 0 } + +--- +"Test value count aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_count: + value_count: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- match: { aggregations.ip_count.value: 5 } + +### TEST UNSUPPORTED AGG TYPES +--- +"Test sig terms not supported": +- do: + catch: /illegal_argument_exception/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + terms: + derived_keyword: ["foo"] + aggs: + significant_os: + significant_terms: + field: "derived_os" + min_doc_count: 1 + size: 10 + +--- +"Test significant text": +- do: + catch: /illegal_argument_exception/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + terms: + derived_keyword: ["foo"] + aggs: + significant_words: + significant_text: + field: "derived_text" + size: 10 + min_doc_count: 1 + +--- +"Test scripted_metric aggregation": +- do: + catch: /A document doesn't have a value for a field/ + search: + index: test + body: + size: 0 + aggs: + scripted_metric_agg: + scripted_metric: + init_script: "state.arr = []" + map_script: "state.arr.add(doc.derived_long.value)" + combine_script: "return 0" + reduce_script: "return 0" + +--- +"Test geo_distance aggregation on derived_geo": +- do: + catch: /aggregation_execution_exception/ + search: + index: test + rest_total_hits_as_int: true + body: + size: 0 + aggs: + distance: + geo_distance: + field: derived_geo + origin: "35.7796, -78.6382" + ranges: + - to: 1000000 + - from: 1000000 + to: 5000000 + - from: 5000000 diff --git a/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml b/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml index 35ebb2b099139..61f79326dab06 100644 --- a/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml +++ b/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml @@ -83,3 +83,48 @@ index: documents_index id: some_id - match: { responses.0.hits.total: 1 } + + - do: + catch: bad_request + index: + index: queries_index + body: + query: + query_string: + query: "unmapped: *" + + - do: + catch: bad_request + index: + index: queries_index + body: + query: + query_string: + query: "_exists_: unmappedField" + + - do: + catch: bad_request + index: + index: queries_index + body: + query: + query_string: + query: "unmappedField: <100" + + - do: + catch: bad_request + index: + index: queries_index + body: + query: + query_string: + query: "unmappedField: test~" + + - do: + catch: bad_request + index: + index: queries_index + body: + query: + query_string: + query: "unmappedField: test*" diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java index 5378a6721efb2..488b9e632aa2a 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java @@ -8,24 +8,61 @@ package org.opensearch.search.pipeline.common; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.SearchPipelinePlugin; import org.opensearch.search.pipeline.Processor; +import org.opensearch.search.pipeline.SearchPhaseResultsProcessor; import org.opensearch.search.pipeline.SearchRequestProcessor; import org.opensearch.search.pipeline.SearchResponseProcessor; +import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; /** * Plugin providing common search request/response processors for use in search pipelines. */ public class SearchPipelineCommonModulePlugin extends Plugin implements SearchPipelinePlugin { + static final Setting> REQUEST_PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting( + "search.pipeline.common.request.processors.allowed", + List.of(), + Function.identity(), + Setting.Property.NodeScope + ); + + static final Setting> RESPONSE_PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting( + "search.pipeline.common.response.processors.allowed", + List.of(), + Function.identity(), + Setting.Property.NodeScope + ); + + static final Setting> SEARCH_PHASE_RESULTS_PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting( + "search.pipeline.common.search.phase.results.processors.allowed", + List.of(), + Function.identity(), + Setting.Property.NodeScope + ); + /** * No constructor needed, but build complains if we don't have a constructor with JavaDoc. */ public SearchPipelineCommonModulePlugin() {} + @Override + public List> getSettings() { + return List.of( + REQUEST_PROCESSORS_ALLOWLIST_SETTING, + RESPONSE_PROCESSORS_ALLOWLIST_SETTING, + SEARCH_PHASE_RESULTS_PROCESSORS_ALLOWLIST_SETTING + ); + } + /** * Returns a map of processor factories. * @@ -34,25 +71,66 @@ public SearchPipelineCommonModulePlugin() {} */ @Override public Map> getRequestProcessors(Parameters parameters) { - return Map.of( - FilterQueryRequestProcessor.TYPE, - new FilterQueryRequestProcessor.Factory(parameters.namedXContentRegistry), - ScriptRequestProcessor.TYPE, - new ScriptRequestProcessor.Factory(parameters.scriptService), - OversampleRequestProcessor.TYPE, - new OversampleRequestProcessor.Factory() + return filterForAllowlistSetting( + REQUEST_PROCESSORS_ALLOWLIST_SETTING, + parameters.env.settings(), + Map.of( + FilterQueryRequestProcessor.TYPE, + new FilterQueryRequestProcessor.Factory(parameters.namedXContentRegistry), + ScriptRequestProcessor.TYPE, + new ScriptRequestProcessor.Factory(parameters.scriptService), + OversampleRequestProcessor.TYPE, + new OversampleRequestProcessor.Factory() + ) ); } @Override public Map> getResponseProcessors(Parameters parameters) { - return Map.of( - RenameFieldResponseProcessor.TYPE, - new RenameFieldResponseProcessor.Factory(), - TruncateHitsResponseProcessor.TYPE, - new TruncateHitsResponseProcessor.Factory(), - CollapseResponseProcessor.TYPE, - new CollapseResponseProcessor.Factory() + return filterForAllowlistSetting( + RESPONSE_PROCESSORS_ALLOWLIST_SETTING, + parameters.env.settings(), + Map.of( + RenameFieldResponseProcessor.TYPE, + new RenameFieldResponseProcessor.Factory(), + TruncateHitsResponseProcessor.TYPE, + new TruncateHitsResponseProcessor.Factory(), + CollapseResponseProcessor.TYPE, + new CollapseResponseProcessor.Factory(), + SplitResponseProcessor.TYPE, + new SplitResponseProcessor.Factory(), + SortResponseProcessor.TYPE, + new SortResponseProcessor.Factory() + ) ); } + + @Override + public Map> getSearchPhaseResultsProcessors(Parameters parameters) { + return filterForAllowlistSetting(SEARCH_PHASE_RESULTS_PROCESSORS_ALLOWLIST_SETTING, parameters.env.settings(), Map.of()); + } + + private Map> filterForAllowlistSetting( + Setting> allowlistSetting, + Settings settings, + Map> map + ) { + if (allowlistSetting.exists(settings) == false) { + return Map.copyOf(map); + } + final Set allowlist = Set.copyOf(allowlistSetting.get(settings)); + // Assert that no unknown processors are defined in the allowlist + final Set unknownAllowlistProcessors = allowlist.stream() + .filter(p -> map.containsKey(p) == false) + .collect(Collectors.toUnmodifiableSet()); + if (unknownAllowlistProcessors.isEmpty() == false) { + throw new IllegalArgumentException( + "Processor(s) " + unknownAllowlistProcessors + " were defined in [" + allowlistSetting.getKey() + "] but do not exist" + ); + } + return map.entrySet() + .stream() + .filter(e -> allowlist.contains(e.getKey())) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); + } } diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SortResponseProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SortResponseProcessor.java new file mode 100644 index 0000000000000..e0bfd38b26376 --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SortResponseProcessor.java @@ -0,0 +1,209 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.document.DocumentField; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.search.SearchHit; +import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.Processor; +import org.opensearch.search.pipeline.SearchResponseProcessor; + +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * Processor that sorts an array of items. + * Throws exception is the specified field is not an array. + */ +public class SortResponseProcessor extends AbstractProcessor implements SearchResponseProcessor { + /** Key to reference this processor type from a search pipeline. */ + public static final String TYPE = "sort"; + /** Key defining the array field to be sorted. */ + public static final String SORT_FIELD = "field"; + /** Optional key defining the sort order. */ + public static final String SORT_ORDER = "order"; + /** Optional key to put the sorted values in a different field. */ + public static final String TARGET_FIELD = "target_field"; + /** Default sort order if not specified */ + public static final String DEFAULT_ORDER = "asc"; + + /** Enum defining how elements will be sorted */ + public enum SortOrder { + /** Sort in ascending (natural) order */ + ASCENDING("asc"), + /** Sort in descending (reverse) order */ + DESCENDING("desc"); + + private final String direction; + + SortOrder(String direction) { + this.direction = direction; + } + + @Override + public String toString() { + return this.direction; + } + + /** + * Converts the string representation of the enum value to the enum. + * @param value A string ("asc" or "desc") + * @return the corresponding enum value + */ + public static SortOrder fromString(String value) { + if (value == null) { + throw new IllegalArgumentException("Sort direction cannot be null"); + } + + if (value.equals(ASCENDING.toString())) { + return ASCENDING; + } else if (value.equals(DESCENDING.toString())) { + return DESCENDING; + } + throw new IllegalArgumentException("Sort direction [" + value + "] not recognized." + " Valid values are: [asc, desc]"); + } + } + + private final String sortField; + private final SortOrder sortOrder; + private final String targetField; + + SortResponseProcessor( + String tag, + String description, + boolean ignoreFailure, + String sortField, + SortOrder sortOrder, + String targetField + ) { + super(tag, description, ignoreFailure); + this.sortField = Objects.requireNonNull(sortField); + this.sortOrder = Objects.requireNonNull(sortOrder); + this.targetField = targetField == null ? sortField : targetField; + } + + /** + * Getter function for sortField + * @return sortField + */ + public String getSortField() { + return sortField; + } + + /** + * Getter function for targetField + * @return targetField + */ + public String getTargetField() { + return targetField; + } + + /** + * Getter function for sortOrder + * @return sortOrder + */ + public SortOrder getSortOrder() { + return sortOrder; + } + + @Override + public String getType() { + return TYPE; + } + + @Override + public SearchResponse processResponse(SearchRequest request, SearchResponse response) throws Exception { + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + Map fields = hit.getFields(); + if (fields.containsKey(sortField)) { + DocumentField docField = hit.getFields().get(sortField); + if (docField == null) { + throw new IllegalArgumentException("field [" + sortField + "] is null, cannot sort."); + } + hit.setDocumentField(targetField, new DocumentField(targetField, getSortedValues(docField.getValues()))); + } + if (hit.hasSource()) { + BytesReference sourceRef = hit.getSourceRef(); + Tuple> typeAndSourceMap = XContentHelper.convertToMap( + sourceRef, + false, + (MediaType) null + ); + + Map sourceAsMap = typeAndSourceMap.v2(); + if (sourceAsMap.containsKey(sortField)) { + Object val = sourceAsMap.get(sortField); + if (val instanceof List) { + @SuppressWarnings("unchecked") + List listVal = (List) val; + sourceAsMap.put(targetField, getSortedValues(listVal)); + } + XContentBuilder builder = XContentBuilder.builder(typeAndSourceMap.v1().xContent()); + builder.map(sourceAsMap); + hit.sourceRef(BytesReference.bytes(builder)); + } + } + } + return response; + } + + private List getSortedValues(List values) { + return values.stream() + .map(this::downcastToComparable) + .sorted(sortOrder.equals(SortOrder.ASCENDING) ? Comparator.naturalOrder() : Comparator.reverseOrder()) + .collect(Collectors.toList()); + } + + @SuppressWarnings("unchecked") + private Comparable downcastToComparable(Object obj) { + if (obj instanceof Comparable) { + return (Comparable) obj; + } else if (obj == null) { + throw new IllegalArgumentException("field [" + sortField + "] contains a null value.]"); + } else { + throw new IllegalArgumentException("field [" + sortField + "] of type [" + obj.getClass().getName() + "] is not comparable.]"); + } + } + + static class Factory implements Processor.Factory { + + @Override + public SortResponseProcessor create( + Map> processorFactories, + String tag, + String description, + boolean ignoreFailure, + Map config, + PipelineContext pipelineContext + ) { + String sortField = ConfigurationUtils.readStringProperty(TYPE, tag, config, SORT_FIELD); + String targetField = ConfigurationUtils.readStringProperty(TYPE, tag, config, TARGET_FIELD, sortField); + try { + SortOrder sortOrder = SortOrder.fromString( + ConfigurationUtils.readStringProperty(TYPE, tag, config, SORT_ORDER, DEFAULT_ORDER) + ); + return new SortResponseProcessor(tag, description, ignoreFailure, sortField, sortOrder, targetField); + } catch (IllegalArgumentException e) { + throw ConfigurationUtils.newConfigurationException(TYPE, tag, SORT_ORDER, e.getMessage()); + } + } + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SplitResponseProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SplitResponseProcessor.java new file mode 100644 index 0000000000000..bb3db4d9bc2c1 --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SplitResponseProcessor.java @@ -0,0 +1,162 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.document.DocumentField; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.search.SearchHit; +import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.Processor; +import org.opensearch.search.pipeline.SearchResponseProcessor; + +import java.util.Arrays; +import java.util.Map; +import java.util.Objects; + +/** + * Processor that sorts an array of items. + * Throws exception is the specified field is not an array. + */ +public class SplitResponseProcessor extends AbstractProcessor implements SearchResponseProcessor { + /** Key to reference this processor type from a search pipeline. */ + public static final String TYPE = "split"; + /** Key defining the string field to be split. */ + public static final String SPLIT_FIELD = "field"; + /** Key defining the delimiter used to split the string. This can be a regular expression pattern. */ + public static final String SEPARATOR = "separator"; + /** Optional key for handling empty trailing fields. */ + public static final String PRESERVE_TRAILING = "preserve_trailing"; + /** Optional key to put the split values in a different field. */ + public static final String TARGET_FIELD = "target_field"; + + private final String splitField; + private final String separator; + private final boolean preserveTrailing; + private final String targetField; + + SplitResponseProcessor( + String tag, + String description, + boolean ignoreFailure, + String splitField, + String separator, + boolean preserveTrailing, + String targetField + ) { + super(tag, description, ignoreFailure); + this.splitField = Objects.requireNonNull(splitField); + this.separator = Objects.requireNonNull(separator); + this.preserveTrailing = preserveTrailing; + this.targetField = targetField == null ? splitField : targetField; + } + + /** + * Getter function for splitField + * @return sortField + */ + public String getSplitField() { + return splitField; + } + + /** + * Getter function for separator + * @return separator + */ + public String getSeparator() { + return separator; + } + + /** + * Getter function for preserveTrailing + * @return preserveTrailing; + */ + public boolean isPreserveTrailing() { + return preserveTrailing; + } + + /** + * Getter function for targetField + * @return targetField + */ + public String getTargetField() { + return targetField; + } + + @Override + public String getType() { + return TYPE; + } + + @Override + public SearchResponse processResponse(SearchRequest request, SearchResponse response) throws Exception { + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + Map fields = hit.getFields(); + if (fields.containsKey(splitField)) { + DocumentField docField = hit.getFields().get(splitField); + if (docField == null) { + throw new IllegalArgumentException("field [" + splitField + "] is null, cannot split."); + } + Object val = docField.getValue(); + if (!(val instanceof String)) { + throw new IllegalArgumentException("field [" + splitField + "] is not a string, cannot split"); + } + Object[] strings = ((String) val).split(separator, preserveTrailing ? -1 : 0); + hit.setDocumentField(targetField, new DocumentField(targetField, Arrays.asList(strings))); + } + if (hit.hasSource()) { + BytesReference sourceRef = hit.getSourceRef(); + Tuple> typeAndSourceMap = XContentHelper.convertToMap( + sourceRef, + false, + (MediaType) null + ); + + Map sourceAsMap = typeAndSourceMap.v2(); + if (sourceAsMap.containsKey(splitField)) { + Object val = sourceAsMap.get(splitField); + if (val instanceof String) { + Object[] strings = ((String) val).split(separator, preserveTrailing ? -1 : 0); + sourceAsMap.put(targetField, Arrays.asList(strings)); + } + XContentBuilder builder = XContentBuilder.builder(typeAndSourceMap.v1().xContent()); + builder.map(sourceAsMap); + hit.sourceRef(BytesReference.bytes(builder)); + } + } + } + return response; + } + + static class Factory implements Processor.Factory { + + @Override + public SplitResponseProcessor create( + Map> processorFactories, + String tag, + String description, + boolean ignoreFailure, + Map config, + PipelineContext pipelineContext + ) { + String splitField = ConfigurationUtils.readStringProperty(TYPE, tag, config, SPLIT_FIELD); + String separator = ConfigurationUtils.readStringProperty(TYPE, tag, config, SEPARATOR); + boolean preserveTrailing = ConfigurationUtils.readBooleanProperty(TYPE, tag, config, PRESERVE_TRAILING, false); + String targetField = ConfigurationUtils.readStringProperty(TYPE, tag, config, TARGET_FIELD, splitField); + return new SplitResponseProcessor(tag, description, ignoreFailure, splitField, separator, preserveTrailing, targetField); + } + } +} diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java new file mode 100644 index 0000000000000..e10f06da29ba0 --- /dev/null +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.common.settings.Settings; +import org.opensearch.env.TestEnvironment; +import org.opensearch.plugins.SearchPipelinePlugin; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.BiFunction; + +public class SearchPipelineCommonModulePluginTests extends OpenSearchTestCase { + + public void testRequestProcessorAllowlist() throws IOException { + final String key = SearchPipelineCommonModulePlugin.REQUEST_PROCESSORS_ALLOWLIST_SETTING.getKey(); + runAllowlistTest(key, List.of(), SearchPipelineCommonModulePlugin::getRequestProcessors); + runAllowlistTest(key, List.of("filter_query"), SearchPipelineCommonModulePlugin::getRequestProcessors); + runAllowlistTest(key, List.of("script"), SearchPipelineCommonModulePlugin::getRequestProcessors); + runAllowlistTest(key, List.of("oversample", "script"), SearchPipelineCommonModulePlugin::getRequestProcessors); + runAllowlistTest(key, List.of("filter_query", "script", "oversample"), SearchPipelineCommonModulePlugin::getRequestProcessors); + + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> runAllowlistTest(key, List.of("foo"), SearchPipelineCommonModulePlugin::getRequestProcessors) + ); + assertTrue(e.getMessage(), e.getMessage().contains("foo")); + } + + public void testResponseProcessorAllowlist() throws IOException { + final String key = SearchPipelineCommonModulePlugin.RESPONSE_PROCESSORS_ALLOWLIST_SETTING.getKey(); + runAllowlistTest(key, List.of(), SearchPipelineCommonModulePlugin::getResponseProcessors); + runAllowlistTest(key, List.of("rename_field"), SearchPipelineCommonModulePlugin::getResponseProcessors); + runAllowlistTest(key, List.of("truncate_hits"), SearchPipelineCommonModulePlugin::getResponseProcessors); + runAllowlistTest(key, List.of("collapse", "truncate_hits"), SearchPipelineCommonModulePlugin::getResponseProcessors); + runAllowlistTest( + key, + List.of("rename_field", "truncate_hits", "collapse"), + SearchPipelineCommonModulePlugin::getResponseProcessors + ); + runAllowlistTest(key, List.of("split", "sort"), SearchPipelineCommonModulePlugin::getResponseProcessors); + + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> runAllowlistTest(key, List.of("foo"), SearchPipelineCommonModulePlugin::getResponseProcessors) + ); + assertTrue(e.getMessage(), e.getMessage().contains("foo")); + } + + public void testSearchPhaseResultsProcessorAllowlist() throws IOException { + final String key = SearchPipelineCommonModulePlugin.SEARCH_PHASE_RESULTS_PROCESSORS_ALLOWLIST_SETTING.getKey(); + runAllowlistTest(key, List.of(), SearchPipelineCommonModulePlugin::getSearchPhaseResultsProcessors); + + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> runAllowlistTest(key, List.of("foo"), SearchPipelineCommonModulePlugin::getSearchPhaseResultsProcessors) + ); + assertTrue(e.getMessage(), e.getMessage().contains("foo")); + } + + private void runAllowlistTest( + String settingKey, + List allowlist, + BiFunction> function + ) throws IOException { + final Settings settings = Settings.builder().putList(settingKey, allowlist).build(); + try (SearchPipelineCommonModulePlugin plugin = new SearchPipelineCommonModulePlugin()) { + assertEquals(Set.copyOf(allowlist), function.apply(plugin, createParameters(settings)).keySet()); + } + } + + public void testAllowlistNotSpecified() throws IOException { + final Settings settings = Settings.EMPTY; + try (SearchPipelineCommonModulePlugin plugin = new SearchPipelineCommonModulePlugin()) { + assertEquals(Set.of("oversample", "filter_query", "script"), plugin.getRequestProcessors(createParameters(settings)).keySet()); + assertEquals( + Set.of("rename_field", "truncate_hits", "collapse", "split", "sort"), + plugin.getResponseProcessors(createParameters(settings)).keySet() + ); + assertEquals(Set.of(), plugin.getSearchPhaseResultsProcessors(createParameters(settings)).keySet()); + } + } + + private static SearchPipelinePlugin.Parameters createParameters(Settings settings) { + return new SearchPipelinePlugin.Parameters( + TestEnvironment.newEnvironment(Settings.builder().put(settings).put("path.home", "").build()), + null, + null, + null, + () -> 0L, + (a, b) -> null, + null, + null, + $ -> {}, + null + ); + } +} diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SortResponseProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SortResponseProcessorTests.java new file mode 100644 index 0000000000000..c18c6b34b05d1 --- /dev/null +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SortResponseProcessorTests.java @@ -0,0 +1,230 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a.java + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.apache.lucene.search.TotalHits; +import org.opensearch.OpenSearchParseException; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchResponseSections; +import org.opensearch.common.document.DocumentField; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class SortResponseProcessorTests extends OpenSearchTestCase { + + private static final List PI = List.of(3, 1, 4, 1, 5, 9, 2, 6); + private static final List E = List.of(2, 7, 1, 8, 2, 8, 1, 8); + private static final List X; + static { + List x = new ArrayList<>(); + x.add(1); + x.add(null); + x.add(3); + X = x; + } + + private SearchRequest createDummyRequest() { + QueryBuilder query = new TermQueryBuilder("field", "value"); + SearchSourceBuilder source = new SearchSourceBuilder().query(query); + return new SearchRequest().source(source); + } + + private SearchResponse createTestResponse() { + SearchHit[] hits = new SearchHit[2]; + + // one response with source + Map piMap = new HashMap<>(); + piMap.put("digits", new DocumentField("digits", PI)); + hits[0] = new SearchHit(0, "doc 1", piMap, Collections.emptyMap()); + hits[0].sourceRef(new BytesArray("{ \"digits\" : " + PI + " }")); + hits[0].score((float) Math.PI); + + // one without source + Map eMap = new HashMap<>(); + eMap.put("digits", new DocumentField("digits", E)); + hits[1] = new SearchHit(1, "doc 2", eMap, Collections.emptyMap()); + hits[1].score((float) Math.E); + + SearchHits searchHits = new SearchHits(hits, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 2); + SearchResponseSections searchResponseSections = new SearchResponseSections(searchHits, null, null, false, false, null, 0); + return new SearchResponse(searchResponseSections, null, 1, 1, 0, 10, null, null); + } + + private SearchResponse createTestResponseNullField() { + SearchHit[] hits = new SearchHit[1]; + + Map map = new HashMap<>(); + map.put("digits", null); + hits[0] = new SearchHit(0, "doc 1", map, Collections.emptyMap()); + hits[0].sourceRef(new BytesArray("{ \"digits\" : null }")); + hits[0].score((float) Math.PI); + + SearchHits searchHits = new SearchHits(hits, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1); + SearchResponseSections searchResponseSections = new SearchResponseSections(searchHits, null, null, false, false, null, 0); + return new SearchResponse(searchResponseSections, null, 1, 1, 0, 10, null, null); + } + + private SearchResponse createTestResponseNullListEntry() { + SearchHit[] hits = new SearchHit[1]; + + Map xMap = new HashMap<>(); + xMap.put("digits", new DocumentField("digits", X)); + hits[0] = new SearchHit(0, "doc 1", xMap, Collections.emptyMap()); + hits[0].sourceRef(new BytesArray("{ \"digits\" : " + X + " }")); + hits[0].score((float) Math.PI); + + SearchHits searchHits = new SearchHits(hits, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1); + SearchResponseSections searchResponseSections = new SearchResponseSections(searchHits, null, null, false, false, null, 0); + return new SearchResponse(searchResponseSections, null, 1, 1, 0, 10, null, null); + } + + private SearchResponse createTestResponseNotComparable() { + SearchHit[] hits = new SearchHit[1]; + + Map piMap = new HashMap<>(); + piMap.put("maps", new DocumentField("maps", List.of(Map.of("foo", "I'm incomparable!")))); + hits[0] = new SearchHit(0, "doc 1", piMap, Collections.emptyMap()); + hits[0].sourceRef(new BytesArray("{ \"maps\" : [{ \"foo\" : \"I'm incomparable!\"}]] }")); + hits[0].score((float) Math.PI); + + SearchHits searchHits = new SearchHits(hits, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1); + SearchResponseSections searchResponseSections = new SearchResponseSections(searchHits, null, null, false, false, null, 0); + return new SearchResponse(searchResponseSections, null, 1, 1, 0, 10, null, null); + } + + public void testSortResponse() throws Exception { + SearchRequest request = createDummyRequest(); + + SortResponseProcessor sortResponseProcessor = new SortResponseProcessor( + null, + null, + false, + "digits", + SortResponseProcessor.SortOrder.ASCENDING, + "sorted" + ); + SearchResponse response = createTestResponse(); + SearchResponse sortResponse = sortResponseProcessor.processResponse(request, response); + + assertEquals(response.getHits(), sortResponse.getHits()); + + assertEquals(PI, sortResponse.getHits().getHits()[0].field("digits").getValues()); + assertEquals(List.of(1, 1, 2, 3, 4, 5, 6, 9), sortResponse.getHits().getHits()[0].field("sorted").getValues()); + Map map = sortResponse.getHits().getHits()[0].getSourceAsMap(); + assertNotNull(map); + assertEquals(List.of(1, 1, 2, 3, 4, 5, 6, 9), map.get("sorted")); + + assertEquals(E, sortResponse.getHits().getHits()[1].field("digits").getValues()); + assertEquals(List.of(1, 1, 2, 2, 7, 8, 8, 8), sortResponse.getHits().getHits()[1].field("sorted").getValues()); + assertNull(sortResponse.getHits().getHits()[1].getSourceAsMap()); + } + + public void testSortResponseSameField() throws Exception { + SearchRequest request = createDummyRequest(); + + SortResponseProcessor sortResponseProcessor = new SortResponseProcessor( + null, + null, + false, + "digits", + SortResponseProcessor.SortOrder.DESCENDING, + null + ); + SearchResponse response = createTestResponse(); + SearchResponse sortResponse = sortResponseProcessor.processResponse(request, response); + + assertEquals(response.getHits(), sortResponse.getHits()); + assertEquals(List.of(9, 6, 5, 4, 3, 2, 1, 1), sortResponse.getHits().getHits()[0].field("digits").getValues()); + assertEquals(List.of(8, 8, 8, 7, 2, 2, 1, 1), sortResponse.getHits().getHits()[1].field("digits").getValues()); + } + + public void testSortResponseNullListEntry() { + SearchRequest request = createDummyRequest(); + + SortResponseProcessor sortResponseProcessor = new SortResponseProcessor( + null, + null, + false, + "digits", + SortResponseProcessor.SortOrder.ASCENDING, + null + ); + assertThrows( + IllegalArgumentException.class, + () -> sortResponseProcessor.processResponse(request, createTestResponseNullListEntry()) + ); + } + + public void testNullField() { + SearchRequest request = createDummyRequest(); + + SortResponseProcessor sortResponseProcessor = new SortResponseProcessor( + null, + null, + false, + "digits", + SortResponseProcessor.SortOrder.DESCENDING, + null + ); + + assertThrows(IllegalArgumentException.class, () -> sortResponseProcessor.processResponse(request, createTestResponseNullField())); + } + + public void testNotComparableField() { + SearchRequest request = createDummyRequest(); + + SortResponseProcessor sortResponseProcessor = new SortResponseProcessor( + null, + null, + false, + "maps", + SortResponseProcessor.SortOrder.ASCENDING, + null + ); + + assertThrows( + IllegalArgumentException.class, + () -> sortResponseProcessor.processResponse(request, createTestResponseNotComparable()) + ); + } + + public void testFactory() { + String sortField = RandomDocumentPicks.randomFieldName(random()); + String targetField = RandomDocumentPicks.randomFieldName(random()); + Map config = new HashMap<>(); + config.put("field", sortField); + config.put("order", "desc"); + config.put("target_field", targetField); + + SortResponseProcessor.Factory factory = new SortResponseProcessor.Factory(); + SortResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + assertEquals("sort", processor.getType()); + assertEquals(sortField, processor.getSortField()); + assertEquals(targetField, processor.getTargetField()); + assertEquals(SortResponseProcessor.SortOrder.DESCENDING, processor.getSortOrder()); + + expectThrows( + OpenSearchParseException.class, + () -> factory.create(Collections.emptyMap(), null, null, false, Collections.emptyMap(), null) + ); + } +} diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SplitResponseProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SplitResponseProcessorTests.java new file mode 100644 index 0000000000000..fcbc8ccf43cff --- /dev/null +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SplitResponseProcessorTests.java @@ -0,0 +1,213 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a.java + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.apache.lucene.search.TotalHits; +import org.opensearch.OpenSearchParseException; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchResponseSections; +import org.opensearch.common.document.DocumentField; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class SplitResponseProcessorTests extends OpenSearchTestCase { + + private static final String NO_TRAILING = "one,two,three"; + private static final String TRAILING = "alpha,beta,gamma,"; + private static final String REGEX_DELIM = "one1two2three"; + + private SearchRequest createDummyRequest() { + QueryBuilder query = new TermQueryBuilder("field", "value"); + SearchSourceBuilder source = new SearchSourceBuilder().query(query); + return new SearchRequest().source(source); + } + + private SearchResponse createTestResponse() { + SearchHit[] hits = new SearchHit[2]; + + // one response with source + Map csvMap = new HashMap<>(); + csvMap.put("csv", new DocumentField("csv", List.of(NO_TRAILING))); + hits[0] = new SearchHit(0, "doc 1", csvMap, Collections.emptyMap()); + hits[0].sourceRef(new BytesArray("{ \"csv\" : \"" + NO_TRAILING + "\" }")); + hits[0].score(1f); + + // one without source + csvMap = new HashMap<>(); + csvMap.put("csv", new DocumentField("csv", List.of(TRAILING))); + hits[1] = new SearchHit(1, "doc 2", csvMap, Collections.emptyMap()); + hits[1].score(2f); + + SearchHits searchHits = new SearchHits(hits, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 2); + SearchResponseSections searchResponseSections = new SearchResponseSections(searchHits, null, null, false, false, null, 0); + return new SearchResponse(searchResponseSections, null, 1, 1, 0, 10, null, null); + } + + private SearchResponse createTestResponseRegex() { + SearchHit[] hits = new SearchHit[1]; + + Map dsvMap = new HashMap<>(); + dsvMap.put("dsv", new DocumentField("dsv", List.of(REGEX_DELIM))); + hits[0] = new SearchHit(0, "doc 1", dsvMap, Collections.emptyMap()); + hits[0].sourceRef(new BytesArray("{ \"dsv\" : \"" + REGEX_DELIM + "\" }")); + hits[0].score(1f); + + SearchHits searchHits = new SearchHits(hits, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1); + SearchResponseSections searchResponseSections = new SearchResponseSections(searchHits, null, null, false, false, null, 0); + return new SearchResponse(searchResponseSections, null, 1, 1, 0, 10, null, null); + } + + private SearchResponse createTestResponseNullField() { + SearchHit[] hits = new SearchHit[1]; + + Map map = new HashMap<>(); + map.put("csv", null); + hits[0] = new SearchHit(0, "doc 1", map, Collections.emptyMap()); + hits[0].sourceRef(new BytesArray("{ \"csv\" : null }")); + hits[0].score(1f); + + SearchHits searchHits = new SearchHits(hits, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1); + SearchResponseSections searchResponseSections = new SearchResponseSections(searchHits, null, null, false, false, null, 0); + return new SearchResponse(searchResponseSections, null, 1, 1, 0, 10, null, null); + } + + private SearchResponse createTestResponseEmptyList() { + SearchHit[] hits = new SearchHit[1]; + + Map map = new HashMap<>(); + map.put("empty", new DocumentField("empty", List.of())); + hits[0] = new SearchHit(0, "doc 1", map, Collections.emptyMap()); + hits[0].sourceRef(new BytesArray("{ \"empty\" : [] }")); + hits[0].score(1f); + + SearchHits searchHits = new SearchHits(hits, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1); + SearchResponseSections searchResponseSections = new SearchResponseSections(searchHits, null, null, false, false, null, 0); + return new SearchResponse(searchResponseSections, null, 1, 1, 0, 10, null, null); + } + + private SearchResponse createTestResponseNotString() { + SearchHit[] hits = new SearchHit[1]; + + Map piMap = new HashMap<>(); + piMap.put("maps", new DocumentField("maps", List.of(Map.of("foo", "I'm the Map!")))); + hits[0] = new SearchHit(0, "doc 1", piMap, Collections.emptyMap()); + hits[0].sourceRef(new BytesArray("{ \"maps\" : [{ \"foo\" : \"I'm the Map!\"}]] }")); + hits[0].score(1f); + + SearchHits searchHits = new SearchHits(hits, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1); + SearchResponseSections searchResponseSections = new SearchResponseSections(searchHits, null, null, false, false, null, 0); + return new SearchResponse(searchResponseSections, null, 1, 1, 0, 10, null, null); + } + + public void testSplitResponse() throws Exception { + SearchRequest request = createDummyRequest(); + + SplitResponseProcessor splitResponseProcessor = new SplitResponseProcessor(null, null, false, "csv", ",", false, "split"); + SearchResponse response = createTestResponse(); + SearchResponse splitResponse = splitResponseProcessor.processResponse(request, response); + + assertEquals(response.getHits(), splitResponse.getHits()); + + assertEquals(NO_TRAILING, splitResponse.getHits().getHits()[0].field("csv").getValue()); + assertEquals(List.of("one", "two", "three"), splitResponse.getHits().getHits()[0].field("split").getValues()); + Map map = splitResponse.getHits().getHits()[0].getSourceAsMap(); + assertNotNull(map); + assertEquals(List.of("one", "two", "three"), map.get("split")); + + assertEquals(TRAILING, splitResponse.getHits().getHits()[1].field("csv").getValue()); + assertEquals(List.of("alpha", "beta", "gamma"), splitResponse.getHits().getHits()[1].field("split").getValues()); + assertNull(splitResponse.getHits().getHits()[1].getSourceAsMap()); + } + + public void testSplitResponseRegex() throws Exception { + SearchRequest request = createDummyRequest(); + + SplitResponseProcessor splitResponseProcessor = new SplitResponseProcessor(null, null, false, "dsv", "\\d", false, "split"); + SearchResponse response = createTestResponseRegex(); + SearchResponse splitResponse = splitResponseProcessor.processResponse(request, response); + + assertEquals(response.getHits(), splitResponse.getHits()); + + assertEquals(REGEX_DELIM, splitResponse.getHits().getHits()[0].field("dsv").getValue()); + assertEquals(List.of("one", "two", "three"), splitResponse.getHits().getHits()[0].field("split").getValues()); + Map map = splitResponse.getHits().getHits()[0].getSourceAsMap(); + assertNotNull(map); + assertEquals(List.of("one", "two", "three"), map.get("split")); + } + + public void testSplitResponseSameField() throws Exception { + SearchRequest request = createDummyRequest(); + + SplitResponseProcessor splitResponseProcessor = new SplitResponseProcessor(null, null, false, "csv", ",", true, null); + SearchResponse response = createTestResponse(); + SearchResponse splitResponse = splitResponseProcessor.processResponse(request, response); + + assertEquals(response.getHits(), splitResponse.getHits()); + assertEquals(List.of("one", "two", "three"), splitResponse.getHits().getHits()[0].field("csv").getValues()); + assertEquals(List.of("alpha", "beta", "gamma", ""), splitResponse.getHits().getHits()[1].field("csv").getValues()); + } + + public void testSplitResponseEmptyList() { + SearchRequest request = createDummyRequest(); + + SplitResponseProcessor splitResponseProcessor = new SplitResponseProcessor(null, null, false, "empty", ",", false, null); + assertThrows(IllegalArgumentException.class, () -> splitResponseProcessor.processResponse(request, createTestResponseEmptyList())); + } + + public void testNullField() { + SearchRequest request = createDummyRequest(); + + SplitResponseProcessor splitResponseProcessor = new SplitResponseProcessor(null, null, false, "csv", ",", false, null); + + assertThrows(IllegalArgumentException.class, () -> splitResponseProcessor.processResponse(request, createTestResponseNullField())); + } + + public void testNotStringField() { + SearchRequest request = createDummyRequest(); + + SplitResponseProcessor splitResponseProcessor = new SplitResponseProcessor(null, null, false, "maps", ",", false, null); + + assertThrows(IllegalArgumentException.class, () -> splitResponseProcessor.processResponse(request, createTestResponseNotString())); + } + + public void testFactory() { + String splitField = RandomDocumentPicks.randomFieldName(random()); + String targetField = RandomDocumentPicks.randomFieldName(random()); + Map config = new HashMap<>(); + config.put("field", splitField); + config.put("separator", ","); + config.put("preserve_trailing", true); + config.put("target_field", targetField); + + SplitResponseProcessor.Factory factory = new SplitResponseProcessor.Factory(); + SplitResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + assertEquals("split", processor.getType()); + assertEquals(splitField, processor.getSplitField()); + assertEquals(",", processor.getSeparator()); + assertTrue(processor.isPreserveTrailing()); + assertEquals(targetField, processor.getTargetField()); + + expectThrows( + OpenSearchParseException.class, + () -> factory.create(Collections.emptyMap(), null, null, false, Collections.emptyMap(), null) + ); + } +} diff --git a/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/80_sort_response.yml b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/80_sort_response.yml new file mode 100644 index 0000000000000..c160b550b2a6e --- /dev/null +++ b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/80_sort_response.yml @@ -0,0 +1,152 @@ +--- +teardown: + - do: + search_pipeline.delete: + id: "my_pipeline" + ignore: 404 + +--- +"Test sort processor": + - do: + search_pipeline.put: + id: "my_pipeline" + body: > + { + "description": "test pipeline", + "response_processors": [ + { + "sort": + { + "field": "a", + "target_field": "b" + } + } + ] + } + - match: { acknowledged: true } + + - do: + search_pipeline.put: + id: "my_pipeline_2" + body: > + { + "description": "test pipeline with ignore failure true", + "response_processors": [ + { + "sort": + { + "field": "aa", + "ignore_failure": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + search_pipeline.put: + id: "my_pipeline_3" + body: > + { + "description": "test pipeline", + "response_processors": [ + { + "sort": + { + "field": "a", + "order": "desc", + "target_field": "b" + } + } + ] + } + - match: { acknowledged: true } + + - do: + indices.create: + index: test + + - do: + indices.put_mapping: + index: test + body: + properties: + a: + type: integer + store: true + doc_values: true + + - do: + index: + index: test + id: 1 + body: { + "a": [ 3, 1, 4 ] + } + + - do: + indices.refresh: + index: test + + - do: + search: + body: { } + - match: { hits.total.value: 1 } + + - do: + search: + index: test + search_pipeline: "my_pipeline" + body: { } + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source: { "a": [3, 1, 4], "b": [1, 3, 4] } } + + # Should also work with no search body specified + - do: + search: + index: test + search_pipeline: "my_pipeline" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source: { "a": [3, 1, 4], "b": [1, 3, 4] } } + + # Pipeline with ignore_failure set to true + # Should return while catching error + - do: + search: + index: test + search_pipeline: "my_pipeline_2" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source: { "a": [3, 1, 4] } } + + # Pipeline with desc sort order + - do: + search: + index: test + search_pipeline: "my_pipeline_3" + body: { } + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source: { "a": [3, 1, 4], "b": [4, 3, 1] } } + + # No source, using stored_fields + - do: + search: + index: test + search_pipeline: "my_pipeline" + body: { + "_source": false, + "stored_fields": [ "a" ] + } + - match: { hits.hits.0.fields: { "a": [3, 1, 4], "b": [1, 3, 4] } } + + # No source, using docvalue_fields + - do: + search: + index: test + search_pipeline: "my_pipeline_3" + body: { + "_source": false, + "docvalue_fields": [ "a" ] + } + # a is stored sorted because docvalue_fields is pre-sorted to optimize aggregations + # this is poorly documented which makes it really hard to write "expected" values on tests + - match: { hits.hits.0.fields: { "a": [1, 3, 4], "b": [4, 3, 1] } } diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 deleted file mode 100644 index 6784ac6c3b64f..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5c26883046fed --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +bdc12df04bb6858890b8aa108060b5b365a26102 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 deleted file mode 100644 index 3d86194de9213..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1fd224fdd0b44 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +c87f2ec3d9a97bd2b793d16817abb2bab93a7fc3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 deleted file mode 100644 index 4ef1adb818300..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..22d35128c3ad5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +81af1040bfa977f98dd0e1bd9639513ea862ca04 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 deleted file mode 100644 index 06c86b8fda557..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..d4767d06b22bf --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +7fa28b510f0f16f4d5d7188b86bef59e048f62f9 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 16cb1cce7f504..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..47af3100f0f2d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b2798069092a981a832b7510d0462ee9efb7a80e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 deleted file mode 100644 index 2f70f791f65ed..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8b30272861770 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +3d5e2d5bcc6baeeb8c13a230980c6132a778e036 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 deleted file mode 100644 index 621cbf58f3133..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1a094fa19a623 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +58a631d9d44c4ed7cc0dcc9cffa6641da9374d72 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 deleted file mode 100644 index ac96e7545ed58..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5fbfde0836e0c --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +77cd136dd3843f5e7cbcf68c824975d745c49ddb \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 0847ac3034db7..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8dad0e3104dc8 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b50ff619cdcdc48e748cba3405c9988529f28f60 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0-snapshot-847316d.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..80e254ed3d098 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +04436942995a4952ce5654126dfb767d6335674e \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0-snapshot-c896995.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index 696803bf63b46..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e6314f36fb29e208d58c0470f14269c9c36996ba \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0-snapshot-847316d.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..3baed2a6e660b --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +85918e24fc3bf63fcd953807ab2eb3fa55c987c2 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0-snapshot-c896995.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index 7a12077d7fc62..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -77fbf1e37af79715f28f66d8cc5b50af2982fc54 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0-snapshot-847316d.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..4e9327112d412 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +15e425e9cc0ab9d65fac3c919199a24dfa3631eb \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0-snapshot-c896995.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index efed62c7e5e5b..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7a4e9c6004c72782e1002e1dcfaf4fbab7887d8 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0-snapshot-847316d.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..7e7e9fe5b22b4 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +3d16c18348e7d4a00cb83100c43f3e21239d224e \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0-snapshot-c896995.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index f2020abcb8ef7..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -42ac148a3769d6eb880d7f184d1917bad48ca303 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0-snapshot-847316d.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..98e0ecc9cbb89 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +2ef6d9dffc6816d3cd04a54fe1ee43e13f850a37 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0-snapshot-c896995.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index b64e4061311e5..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -adf2a25339ac8722647f8196288c1f5056bbf0de \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0-snapshot-847316d.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..ef675f2b9702e --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +e72b2262f5393d9ff255fb901297d4e6790e9102 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0-snapshot-c896995.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index f56e7fc5df766..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a689e3af2015b21b7b4f41a1206b50c44519b6f7 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0-snapshot-847316d.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..d8bbac27fd360 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +416ac44b2e76592c9e85338798cae93c3cf5475e \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0-snapshot-c896995.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index 30732e3c4a688..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c875f7706ee81b1fb0b3443767a8c9c52f30abc5 \ No newline at end of file diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index b4c62fbf85cb8..4a95b04de3952 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -60,7 +60,6 @@ import java.util.function.ToLongBiFunction; import org.ehcache.Cache; -import org.ehcache.CachePersistenceException; import org.ehcache.PersistentCacheManager; import org.ehcache.config.builders.CacheConfigurationBuilder; import org.ehcache.config.builders.CacheEventListenerConfigurationBuilder; @@ -104,8 +103,6 @@ public class EhcacheDiskCache implements ICache { // Unique id associated with this cache. private final static String UNIQUE_ID = UUID.randomUUID().toString(); private final static String THREAD_POOL_ALIAS_PREFIX = "ehcachePool"; - private final static int MINIMUM_MAX_SIZE_IN_BYTES = 1024 * 100; // 100KB - // A Cache manager can create many caches. private final PersistentCacheManager cacheManager; @@ -127,13 +124,18 @@ public class EhcacheDiskCache implements ICache { private final Serializer keySerializer; private final Serializer valueSerializer; + final static int MINIMUM_MAX_SIZE_IN_BYTES = 1024 * 100; // 100KB + final static String CACHE_DATA_CLEANUP_DURING_INITIALIZATION_EXCEPTION = "Failed to delete ehcache disk cache under " + + "path: %s during initialization. Please clean this up manually and restart the process"; + /** * Used in computeIfAbsent to synchronize loading of a given key. This is needed as ehcache doesn't provide a * computeIfAbsent method. */ Map, CompletableFuture, V>>> completableFutureMap = new ConcurrentHashMap<>(); - private EhcacheDiskCache(Builder builder) { + @SuppressForbidden(reason = "Ehcache uses File.io") + EhcacheDiskCache(Builder builder) { this.keyType = Objects.requireNonNull(builder.keyType, "Key type shouldn't be null"); this.valueType = Objects.requireNonNull(builder.valueType, "Value type shouldn't be null"); this.expireAfterAccess = Objects.requireNonNull(builder.getExpireAfterAcess(), "ExpireAfterAccess value shouldn't " + "be null"); @@ -151,6 +153,18 @@ private EhcacheDiskCache(Builder builder) { if (this.storagePath == null || this.storagePath.isBlank()) { throw new IllegalArgumentException("Storage path shouldn't be null or empty"); } + // Delete all the previous disk cache related files/data. We don't persist data between process restart for + // now which is why need to do this. Clean up in case there was a non graceful restart and we had older disk + // cache data still lying around. + Path ehcacheDirectory = Paths.get(this.storagePath); + if (Files.exists(ehcacheDirectory)) { + try { + logger.info("Found older disk cache data lying around during initialization under path: {}", this.storagePath); + IOUtils.rm(ehcacheDirectory); + } catch (IOException e) { + throw new OpenSearchException(String.format(CACHE_DATA_CLEANUP_DURING_INITIALIZATION_EXCEPTION, this.storagePath), e); + } + } if (builder.threadPoolAlias == null || builder.threadPoolAlias.isBlank()) { this.threadPoolAlias = THREAD_POOL_ALIAS_PREFIX + "DiskWrite#" + UNIQUE_ID; } else { @@ -175,6 +189,11 @@ private EhcacheDiskCache(Builder builder) { } } + // Package private for testing + PersistentCacheManager getCacheManager() { + return this.cacheManager; + } + @SuppressWarnings({ "rawtypes" }) private Cache buildCache(Duration expireAfterAccess, Builder builder) { // Creating the cache requires permissions specified in plugin-security.policy @@ -255,7 +274,7 @@ Map, CompletableFuture, V>>> getCompletableFutur } @SuppressForbidden(reason = "Ehcache uses File.io") - private PersistentCacheManager buildCacheManager() { + PersistentCacheManager buildCacheManager() { // In case we use multiple ehCaches, we can define this cache manager at a global level. // Creating the cache manager also requires permissions specified in plugin-security.policy return AccessController.doPrivileged((PrivilegedAction) () -> { @@ -444,20 +463,21 @@ public void refresh() { @Override @SuppressForbidden(reason = "Ehcache uses File.io") public void close() { - cacheManager.removeCache(this.diskCacheAlias); - cacheManager.close(); try { - cacheManager.destroyCache(this.diskCacheAlias); - // Delete all the disk cache related files/data - Path ehcacheDirectory = Paths.get(this.storagePath); - if (Files.exists(ehcacheDirectory)) { + cacheManager.close(); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("Exception occurred while trying to close ehcache manager"), e); + } + // Delete all the disk cache related files/data in case it is present + Path ehcacheDirectory = Paths.get(this.storagePath); + if (Files.exists(ehcacheDirectory)) { + try { IOUtils.rm(ehcacheDirectory); + } catch (IOException e) { + logger.error(() -> new ParameterizedMessage("Failed to delete ehcache disk cache data under path: {}", this.storagePath)); } - } catch (CachePersistenceException e) { - throw new OpenSearchException("Exception occurred while destroying ehcache and associated data", e); - } catch (IOException e) { - logger.error(() -> new ParameterizedMessage("Failed to delete ehcache disk cache data under path: {}", this.storagePath)); } + } /** diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java index 29551befd3e9f..2bc24227bb513 100644 --- a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java @@ -25,6 +25,7 @@ import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.bytes.CompositeBytesReference; @@ -34,6 +35,8 @@ import java.io.IOException; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -47,10 +50,17 @@ import java.util.concurrent.Phaser; import java.util.function.ToLongBiFunction; +import org.ehcache.PersistentCacheManager; + import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_LISTENER_MODE_SYNC_KEY; import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_MAX_SIZE_IN_BYTES_KEY; import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_STORAGE_PATH_KEY; +import static org.opensearch.cache.store.disk.EhcacheDiskCache.MINIMUM_MAX_SIZE_IN_BYTES; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; @ThreadLeakFilters(filters = { EhcacheThreadLeakFilter.class }) public class EhCacheDiskCacheTests extends OpenSearchSingleNodeTestCase { @@ -882,6 +892,289 @@ public void testStatsTrackingDisabled() throws Exception { } } + public void testDiskCacheFilesAreClearedUpDuringCloseAndInitialization() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + String path = env.nodePaths()[0].path.toString() + "/request_cache"; + // Create a dummy file to simulate a scenario where the data is already in the disk cache storage path + // beforehand. + Files.createDirectory(Path.of(path)); + Path dummyFilePath = Files.createFile(Path.of(path + "/testing.txt")); + assertTrue(Files.exists(dummyFilePath)); + ICache ehcacheTest = new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") + .setStoragePath(path) + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setThreadPoolAlias("") + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false) + .build(); + int randomKeys = randomIntBetween(10, 100); + for (int i = 0; i < randomKeys; i++) { + ICacheKey iCacheKey = getICacheKey(UUID.randomUUID().toString()); + ehcacheTest.put(iCacheKey, UUID.randomUUID().toString()); + assertEquals(0, ehcacheTest.count()); // Expect count of 0 if NoopCacheStatsHolder is used + assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), ehcacheTest.stats().getTotalStats()); + } + // Verify that older data was wiped out after initialization + assertFalse(Files.exists(dummyFilePath)); + + // Verify that there is data present under desired path by explicitly verifying the folder name by prefix + // (used from disk cache alias) + assertTrue(Files.exists(Path.of(path))); + boolean folderExists = Files.walk(Path.of(path)) + .filter(Files::isDirectory) + .anyMatch(path1 -> path1.getFileName().toString().startsWith("test1")); + assertTrue(folderExists); + ehcacheTest.close(); + assertFalse(Files.exists(Path.of(path))); // Verify everything is cleared up now after close() + } + } + + public void testDiskCacheCloseCalledTwiceAndVerifyDiskDataIsCleanedUp() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + String path = env.nodePaths()[0].path.toString() + "/request_cache"; + ICache ehcacheTest = new EhcacheDiskCache.Builder().setThreadPoolAlias(null) + .setStoragePath(path) + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false) + .build(); + int randomKeys = randomIntBetween(10, 100); + for (int i = 0; i < randomKeys; i++) { + ICacheKey iCacheKey = getICacheKey(UUID.randomUUID().toString()); + ehcacheTest.put(iCacheKey, UUID.randomUUID().toString()); + assertEquals(0, ehcacheTest.count()); // Expect count storagePath 0 if NoopCacheStatsHolder is used + assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), ehcacheTest.stats().getTotalStats()); + } + ehcacheTest.close(); + assertFalse(Files.exists(Path.of(path))); // Verify everything is cleared up now after close() + // Call it again. This will throw an exception. + ehcacheTest.close(); + } + } + + public void testDiskCacheCloseAfterCleaningUpFilesManually() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + String path = env.nodePaths()[0].path.toString() + "/request_cache"; + ICache ehcacheTest = new EhcacheDiskCache.Builder().setThreadPoolAlias(null) + .setStoragePath(path) + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false) + .build(); + int randomKeys = randomIntBetween(10, 100); + for (int i = 0; i < randomKeys; i++) { + ICacheKey iCacheKey = getICacheKey(UUID.randomUUID().toString()); + ehcacheTest.put(iCacheKey, UUID.randomUUID().toString()); + assertEquals(0, ehcacheTest.count()); // Expect count storagePath 0 if NoopCacheStatsHolder is used + assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), ehcacheTest.stats().getTotalStats()); + } + IOUtils.rm(Path.of(path)); + ehcacheTest.close(); + } + } + + public void testEhcacheDiskCacheWithoutStoragePathDefined() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + assertThrows( + IllegalArgumentException.class, + () -> new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false) + .build() + ); + } + } + + public void testEhcacheDiskCacheWithoutStoragePathNull() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + assertThrows( + IllegalArgumentException.class, + () -> new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") + .setStoragePath(null) + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false) + .build() + ); + } + } + + public void testEhcacheWithStorageSizeLowerThanMinimumExpected() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + assertThrows( + IllegalArgumentException.class, + () -> new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(MINIMUM_MAX_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false) + .build() + ); + } + } + + public void testEhcacheWithStorageSizeZero() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + assertThrows( + IllegalArgumentException.class, + () -> new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(0) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false) + .build() + ); + } + } + + public void testEhcacheCloseWithDestroyCacheMethodThrowingException() throws Exception { + EhcacheDiskCache ehcacheDiskCache = new MockEhcahceDiskCache(createDummyBuilder(null)); + PersistentCacheManager cacheManager = ehcacheDiskCache.getCacheManager(); + doNothing().when(cacheManager).removeCache(anyString()); + doNothing().when(cacheManager).close(); + doThrow(new RuntimeException("test")).when(cacheManager).destroyCache(anyString()); + ehcacheDiskCache.close(); + } + + static class MockEhcahceDiskCache extends EhcacheDiskCache { + + public MockEhcahceDiskCache(Builder builder) { + super(builder); + } + + @Override + PersistentCacheManager buildCacheManager() { + PersistentCacheManager cacheManager = mock(PersistentCacheManager.class); + return cacheManager; + } + } + + private EhcacheDiskCache.Builder createDummyBuilder(String storagePath) throws IOException { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + if (storagePath == null || storagePath.isBlank()) { + storagePath = env.nodePaths()[0].path.toString() + "/request_cache"; + } + return (EhcacheDiskCache.Builder) new EhcacheDiskCache.Builder().setThreadPoolAlias( + "ehcacheTest" + ) + .setIsEventListenerModeSync(true) + .setStoragePath(storagePath) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false); + } + } + private List getRandomDimensions(List dimensionNames) { Random rand = Randomness.get(); int bound = 3; diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.17.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.17.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.17.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index d631855013527..81ac52b97cefa 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -66,7 +66,7 @@ dependencies { runtimeOnly "com.optimaize.languagedetector:language-detector:0.6" runtimeOnly "com.google.guava:guava:${versions.guava}" // Other dependencies - api 'org.tukaani:xz:1.9' + api 'org.tukaani:xz:1.10' api "commons-io:commons-io:${versions.commonsio}" api "org.slf4j:slf4j-api:${versions.slf4j}" diff --git a/plugins/ingest-attachment/licenses/xz-1.10.jar.sha1 b/plugins/ingest-attachment/licenses/xz-1.10.jar.sha1 new file mode 100644 index 0000000000000..e3757c19ce5ab --- /dev/null +++ b/plugins/ingest-attachment/licenses/xz-1.10.jar.sha1 @@ -0,0 +1 @@ +1be8166f89e035a56c6bfc67dbc423996fe577e2 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xz-1.9.jar.sha1 b/plugins/ingest-attachment/licenses/xz-1.9.jar.sha1 deleted file mode 100644 index c3e22d167212f..0000000000000 --- a/plugins/ingest-attachment/licenses/xz-1.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1ea4bec1a921180164852c65006d928617bd2caf \ No newline at end of file diff --git a/plugins/query-insights/src/internalClusterTest/java/org/opensearch/plugin/insights/QueryInsightsPluginTransportIT.java b/plugins/query-insights/src/internalClusterTest/java/org/opensearch/plugin/insights/QueryInsightsPluginTransportIT.java deleted file mode 100644 index 04e715444f50a..0000000000000 --- a/plugins/query-insights/src/internalClusterTest/java/org/opensearch/plugin/insights/QueryInsightsPluginTransportIT.java +++ /dev/null @@ -1,274 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights; - -import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; -import org.opensearch.action.admin.cluster.node.info.NodeInfo; -import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; -import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; -import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.opensearch.action.index.IndexResponse; -import org.opensearch.action.search.SearchResponse; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.query.QueryBuilders; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; -import org.opensearch.plugin.insights.rules.model.MetricType; -import org.opensearch.plugins.Plugin; -import org.opensearch.plugins.PluginInfo; -import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.Assert; - -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; - -/** - * Transport Action tests for Query Insights Plugin - */ - -@OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) -public class QueryInsightsPluginTransportIT extends OpenSearchIntegTestCase { - - private final int TOTAL_NUMBER_OF_NODES = 2; - private final int TOTAL_SEARCH_REQUESTS = 5; - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(QueryInsightsPlugin.class); - } - - /** - * Test Query Insights Plugin is installed - */ - public void testQueryInsightPluginInstalled() { - NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); - nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName()); - NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); - List pluginInfos = nodesInfoResponse.getNodes() - .stream() - .flatMap( - (Function>) nodeInfo -> nodeInfo.getInfo(PluginsAndModules.class).getPluginInfos().stream() - ) - .collect(Collectors.toList()); - Assert.assertTrue( - pluginInfos.stream().anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.plugin.insights.QueryInsightsPlugin")) - ); - } - - /** - * Test get top queries when feature disabled - */ - public void testGetTopQueriesWhenFeatureDisabled() { - TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); - TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); - Assert.assertNotEquals(0, response.failures().size()); - Assert.assertEquals( - "Cannot get top n queries for [latency] when it is not enabled.", - response.failures().get(0).getCause().getCause().getMessage() - ); - } - - /** - * Test update top query record when feature enabled - */ - public void testUpdateRecordWhenFeatureDisabledThenEnabled() throws ExecutionException, InterruptedException { - Settings commonSettings = Settings.builder().put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "false").build(); - - logger.info("--> starting nodes for query insight testing"); - List nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); - - logger.info("--> waiting for nodes to form a cluster"); - ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); - assertFalse(health.isTimedOut()); - - assertAcked( - prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) - ); - ensureStableCluster(2); - logger.info("--> creating indices for query insight testing"); - for (int i = 0; i < 5; i++) { - IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); - assertEquals("CREATED", response.status().toString()); - } - // making search requests to get top queries - for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { - SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) - .prepareSearch() - .setQuery(QueryBuilders.matchAllQuery()) - .get(); - assertEquals(searchResponse.getFailedShards(), 0); - } - - TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); - TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); - Assert.assertNotEquals(0, response.failures().size()); - Assert.assertEquals( - "Cannot get top n queries for [latency] when it is not enabled.", - response.failures().get(0).getCause().getCause().getMessage() - ); - - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().persistentSettings( - Settings.builder().put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true").build() - ); - assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).get()); - TopQueriesRequest request2 = new TopQueriesRequest(MetricType.LATENCY); - TopQueriesResponse response2 = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request2).actionGet(); - Assert.assertEquals(0, response2.failures().size()); - Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response2.getNodes().size()); - for (int i = 0; i < TOTAL_NUMBER_OF_NODES; i++) { - Assert.assertEquals(0, response2.getNodes().get(i).getTopQueriesRecord().size()); - } - - internalCluster().stopAllNodes(); - } - - /** - * Test get top queries when feature enabled - */ - public void testGetTopQueriesWhenFeatureEnabled() throws InterruptedException { - Settings commonSettings = Settings.builder() - .put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true") - .put(TOP_N_LATENCY_QUERIES_SIZE.getKey(), "100") - .put(TOP_N_LATENCY_QUERIES_WINDOW_SIZE.getKey(), "600s") - .build(); - - logger.info("--> starting nodes for query insight testing"); - List nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); - - logger.info("--> waiting for nodes to form a cluster"); - ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); - assertFalse(health.isTimedOut()); - - assertAcked( - prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) - ); - ensureStableCluster(2); - logger.info("--> creating indices for query insight testing"); - for (int i = 0; i < 5; i++) { - IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); - assertEquals("CREATED", response.status().toString()); - } - // making search requests to get top queries - for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { - SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) - .prepareSearch() - .setQuery(QueryBuilders.matchAllQuery()) - .get(); - assertEquals(searchResponse.getFailedShards(), 0); - } - // Sleep to wait for queue drained to top queries store - Thread.sleep(6000); - TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); - TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); - Assert.assertEquals(0, response.failures().size()); - Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response.getNodes().size()); - Assert.assertEquals(TOTAL_SEARCH_REQUESTS, response.getNodes().stream().mapToInt(o -> o.getTopQueriesRecord().size()).sum()); - - internalCluster().stopAllNodes(); - } - - /** - * Test get top queries with small top n size - */ - public void testGetTopQueriesWithSmallTopN() throws InterruptedException { - Settings commonSettings = Settings.builder() - .put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true") - .put(TOP_N_LATENCY_QUERIES_SIZE.getKey(), "1") - .put(TOP_N_LATENCY_QUERIES_WINDOW_SIZE.getKey(), "600s") - .build(); - - logger.info("--> starting nodes for query insight testing"); - List nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); - - logger.info("--> waiting for nodes to form a cluster"); - ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); - assertFalse(health.isTimedOut()); - - assertAcked( - prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) - ); - ensureStableCluster(2); - logger.info("--> creating indices for query insight testing"); - for (int i = 0; i < 5; i++) { - IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); - assertEquals("CREATED", response.status().toString()); - } - // making search requests to get top queries - for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { - SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) - .prepareSearch() - .setQuery(QueryBuilders.matchAllQuery()) - .get(); - assertEquals(searchResponse.getFailedShards(), 0); - } - Thread.sleep(6000); - TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); - TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); - Assert.assertEquals(0, response.failures().size()); - Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response.getNodes().size()); - Assert.assertEquals(2, response.getNodes().stream().mapToInt(o -> o.getTopQueriesRecord().size()).sum()); - - internalCluster().stopAllNodes(); - } - - /** - * Test get top queries with small window size - */ - public void testGetTopQueriesWithSmallWindowSize() throws InterruptedException { - Settings commonSettings = Settings.builder() - .put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true") - .put(TOP_N_LATENCY_QUERIES_SIZE.getKey(), "100") - .put(TOP_N_LATENCY_QUERIES_WINDOW_SIZE.getKey(), "1m") - .build(); - - logger.info("--> starting nodes for query insight testing"); - List nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); - - logger.info("--> waiting for nodes to form a cluster"); - ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); - assertFalse(health.isTimedOut()); - - assertAcked( - prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) - ); - ensureStableCluster(2); - logger.info("--> creating indices for query insight testing"); - for (int i = 0; i < 5; i++) { - IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); - assertEquals("CREATED", response.status().toString()); - } - // making search requests to get top queries - for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { - SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) - .prepareSearch() - .setQuery(QueryBuilders.matchAllQuery()) - .get(); - assertEquals(searchResponse.getFailedShards(), 0); - } - - TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); - TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); - Assert.assertEquals(0, response.failures().size()); - Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response.getNodes().size()); - Thread.sleep(6000); - internalCluster().stopAllNodes(); - } -} diff --git a/plugins/query-insights/src/javaRestTest/java/org/opensearch/plugin/insights/TopQueriesRestIT.java b/plugins/query-insights/src/javaRestTest/java/org/opensearch/plugin/insights/TopQueriesRestIT.java deleted file mode 100644 index 57dea6ad8d5ff..0000000000000 --- a/plugins/query-insights/src/javaRestTest/java/org/opensearch/plugin/insights/TopQueriesRestIT.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights; - -import org.opensearch.client.Request; -import org.opensearch.client.Response; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.test.rest.OpenSearchRestTestCase; -import org.junit.Assert; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.List; -import java.util.Map; - -/** - * Rest Action tests for Query Insights - */ -public class TopQueriesRestIT extends OpenSearchRestTestCase { - - /** - * test Query Insights is installed - * @throws IOException IOException - */ - @SuppressWarnings("unchecked") - public void testQueryInsightsPluginInstalled() throws IOException { - Request request = new Request("GET", "/_cat/plugins?s=component&h=name,component,version,description&format=json"); - Response response = client().performRequest(request); - List pluginsList = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - response.getEntity().getContent() - ).list(); - Assert.assertTrue( - pluginsList.stream().map(o -> (Map) o).anyMatch(plugin -> plugin.get("component").equals("query-insights")) - ); - } - - /** - * test enabling top queries - * @throws IOException IOException - */ - public void testTopQueriesResponses() throws IOException { - // Enable Top N Queries feature - Request request = new Request("PUT", "/_cluster/settings"); - request.setJsonEntity(defaultTopQueriesSettings()); - Response response = client().performRequest(request); - - Assert.assertEquals(200, response.getStatusLine().getStatusCode()); - - // Create documents for search - request = new Request("POST", "/my-index-0/_doc"); - request.setJsonEntity(createDocumentsBody()); - response = client().performRequest(request); - - Assert.assertEquals(201, response.getStatusLine().getStatusCode()); - - // Do Search - request = new Request("GET", "/my-index-0/_search?size=20&pretty"); - request.setJsonEntity(searchBody()); - response = client().performRequest(request); - Assert.assertEquals(200, response.getStatusLine().getStatusCode()); - response = client().performRequest(request); - Assert.assertEquals(200, response.getStatusLine().getStatusCode()); - - // Get Top Queries - request = new Request("GET", "/_insights/top_queries?pretty"); - response = client().performRequest(request); - - Assert.assertEquals(200, response.getStatusLine().getStatusCode()); - String top_requests = new String(response.getEntity().getContent().readAllBytes(), StandardCharsets.UTF_8); - Assert.assertTrue(top_requests.contains("top_queries")); - Assert.assertEquals(2, top_requests.split("searchType", -1).length - 1); - } - - private String defaultTopQueriesSettings() { - return "{\n" - + " \"persistent\" : {\n" - + " \"search.top_n_queries.latency.enabled\" : \"true\",\n" - + " \"search.top_n_queries.latency.window_size\" : \"600s\",\n" - + " \"search.top_n_queries.latency.top_n_size\" : 5\n" - + " }\n" - + "}"; - } - - private String createDocumentsBody() { - return "{\n" - + " \"@timestamp\": \"2099-11-15T13:12:00\",\n" - + " \"message\": \"this is document 1\",\n" - + " \"user\": {\n" - + " \"id\": \"cyji\"\n" - + " }\n" - + "}"; - } - - private String searchBody() { - return "{}"; - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java deleted file mode 100644 index bba676436c39a..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights; - -import org.opensearch.action.ActionRequest; -import org.opensearch.client.Client; -import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.IndexScopedSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.settings.SettingsFilter; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.core.action.ActionResponse; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.env.Environment; -import org.opensearch.env.NodeEnvironment; -import org.opensearch.plugin.insights.core.listener.QueryInsightsListener; -import org.opensearch.plugin.insights.core.service.QueryInsightsService; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; -import org.opensearch.plugin.insights.rules.resthandler.top_queries.RestTopQueriesAction; -import org.opensearch.plugin.insights.rules.transport.top_queries.TransportTopQueriesAction; -import org.opensearch.plugin.insights.settings.QueryInsightsSettings; -import org.opensearch.plugins.ActionPlugin; -import org.opensearch.plugins.Plugin; -import org.opensearch.repositories.RepositoriesService; -import org.opensearch.rest.RestController; -import org.opensearch.rest.RestHandler; -import org.opensearch.script.ScriptService; -import org.opensearch.threadpool.ExecutorBuilder; -import org.opensearch.threadpool.ScalingExecutorBuilder; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.watcher.ResourceWatcherService; - -import java.util.Collection; -import java.util.List; -import java.util.function.Supplier; - -/** - * Plugin class for Query Insights. - */ -public class QueryInsightsPlugin extends Plugin implements ActionPlugin { - /** - * Default constructor - */ - public QueryInsightsPlugin() {} - - @Override - public Collection createComponents( - final Client client, - final ClusterService clusterService, - final ThreadPool threadPool, - final ResourceWatcherService resourceWatcherService, - final ScriptService scriptService, - final NamedXContentRegistry xContentRegistry, - final Environment environment, - final NodeEnvironment nodeEnvironment, - final NamedWriteableRegistry namedWriteableRegistry, - final IndexNameExpressionResolver indexNameExpressionResolver, - final Supplier repositoriesServiceSupplier - ) { - // create top n queries service - final QueryInsightsService queryInsightsService = new QueryInsightsService(clusterService.getClusterSettings(), threadPool, client); - return List.of(queryInsightsService, new QueryInsightsListener(clusterService, queryInsightsService)); - } - - @Override - public List> getExecutorBuilders(final Settings settings) { - return List.of( - new ScalingExecutorBuilder( - QueryInsightsSettings.QUERY_INSIGHTS_EXECUTOR, - 1, - Math.min((OpenSearchExecutors.allocatedProcessors(settings) + 1) / 2, QueryInsightsSettings.MAX_THREAD_COUNT), - TimeValue.timeValueMinutes(5) - ) - ); - } - - @Override - public List getRestHandlers( - final Settings settings, - final RestController restController, - final ClusterSettings clusterSettings, - final IndexScopedSettings indexScopedSettings, - final SettingsFilter settingsFilter, - final IndexNameExpressionResolver indexNameExpressionResolver, - final Supplier nodesInCluster - ) { - return List.of(new RestTopQueriesAction()); - } - - @Override - public List> getActions() { - return List.of(new ActionPlugin.ActionHandler<>(TopQueriesAction.INSTANCE, TransportTopQueriesAction.class)); - } - - @Override - public List> getSettings() { - return List.of( - // Settings for top N queries - QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED, - QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE, - QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE, - QueryInsightsSettings.TOP_N_LATENCY_EXPORTER_SETTINGS, - QueryInsightsSettings.TOP_N_CPU_QUERIES_ENABLED, - QueryInsightsSettings.TOP_N_CPU_QUERIES_SIZE, - QueryInsightsSettings.TOP_N_CPU_QUERIES_WINDOW_SIZE, - QueryInsightsSettings.TOP_N_CPU_EXPORTER_SETTINGS, - QueryInsightsSettings.TOP_N_MEMORY_QUERIES_ENABLED, - QueryInsightsSettings.TOP_N_MEMORY_QUERIES_SIZE, - QueryInsightsSettings.TOP_N_MEMORY_QUERIES_WINDOW_SIZE, - QueryInsightsSettings.TOP_N_MEMORY_EXPORTER_SETTINGS - ); - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/DebugExporter.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/DebugExporter.java deleted file mode 100644 index 116bd26e1f9bc..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/DebugExporter.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.core.exporter; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; - -import java.util.List; - -/** - * Debug exporter for development purpose - */ -public final class DebugExporter implements QueryInsightsExporter { - /** - * Logger of the debug exporter - */ - private final Logger logger = LogManager.getLogger(); - - /** - * Constructor of DebugExporter - */ - private DebugExporter() {} - - private static class InstanceHolder { - private static final DebugExporter INSTANCE = new DebugExporter(); - } - - /** - Get the singleton instance of DebugExporter - * - @return DebugExporter instance - */ - public static DebugExporter getInstance() { - return InstanceHolder.INSTANCE; - } - - /** - * Write the list of SearchQueryRecord to debug log - * - * @param records list of {@link SearchQueryRecord} - */ - @Override - public void export(final List records) { - logger.debug("QUERY_INSIGHTS_RECORDS: " + records.toString()); - } - - /** - * Close the debugger exporter sink - */ - @Override - public void close() { - logger.debug("Closing the DebugExporter.."); - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporter.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporter.java deleted file mode 100644 index c19fe3655098b..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporter.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.core.exporter; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.action.bulk.BulkRequestBuilder; -import org.opensearch.action.bulk.BulkResponse; -import org.opensearch.action.index.IndexRequest; -import org.opensearch.client.Client; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.action.ActionListener; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormatter; - -import java.util.List; - -/** - * Local index exporter for exporting query insights data to local OpenSearch indices. - */ -public final class LocalIndexExporter implements QueryInsightsExporter { - /** - * Logger of the local index exporter - */ - private final Logger logger = LogManager.getLogger(); - private final Client client; - private DateTimeFormatter indexPattern; - - /** - * Constructor of LocalIndexExporter - * - * @param client OS client - * @param indexPattern the pattern of index to export to - */ - public LocalIndexExporter(final Client client, final DateTimeFormatter indexPattern) { - this.indexPattern = indexPattern; - this.client = client; - } - - /** - * Getter of indexPattern - * - * @return indexPattern - */ - public DateTimeFormatter getIndexPattern() { - return indexPattern; - } - - /** - * Setter of indexPattern - * - * @param indexPattern index pattern - * @return the current LocalIndexExporter - */ - public LocalIndexExporter setIndexPattern(DateTimeFormatter indexPattern) { - this.indexPattern = indexPattern; - return this; - } - - /** - * Export a list of SearchQueryRecord to a local index - * - * @param records list of {@link SearchQueryRecord} - */ - @Override - public void export(final List records) { - if (records == null || records.size() == 0) { - return; - } - try { - final String index = getDateTimeFromFormat(); - final BulkRequestBuilder bulkRequestBuilder = client.prepareBulk().setTimeout(TimeValue.timeValueMinutes(1)); - for (SearchQueryRecord record : records) { - bulkRequestBuilder.add( - new IndexRequest(index).source(record.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) - ); - } - bulkRequestBuilder.execute(new ActionListener() { - @Override - public void onResponse(BulkResponse bulkItemResponses) {} - - @Override - public void onFailure(Exception e) { - logger.error("Failed to execute bulk operation for query insights data: ", e); - } - }); - } catch (final Exception e) { - logger.error("Unable to index query insights data: ", e); - } - } - - /** - * Close the exporter sink - */ - @Override - public void close() { - logger.debug("Closing the LocalIndexExporter.."); - } - - private String getDateTimeFromFormat() { - return indexPattern.print(DateTime.now(DateTimeZone.UTC)); - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporter.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporter.java deleted file mode 100644 index 42e5354eb1640..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporter.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.core.exporter; - -import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; - -import java.io.Closeable; -import java.util.List; - -/** - * Base interface for Query Insights exporters - */ -public interface QueryInsightsExporter extends Closeable { - /** - * Export a list of SearchQueryRecord to the exporter sink - * - * @param records list of {@link SearchQueryRecord} - */ - void export(final List records); -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactory.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactory.java deleted file mode 100644 index 016911761a3d0..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactory.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.core.exporter; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.client.Client; -import org.opensearch.common.settings.Settings; -import org.joda.time.format.DateTimeFormat; - -import java.io.IOException; -import java.util.HashSet; -import java.util.Locale; -import java.util.Set; - -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_N_QUERIES_INDEX_PATTERN; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_QUERIES_EXPORTER_TYPE; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORTER_TYPE; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORT_INDEX; - -/** - * Factory class for validating and creating exporters based on provided settings - */ -public class QueryInsightsExporterFactory { - /** - * Logger of the query insights exporter factory - */ - private final Logger logger = LogManager.getLogger(); - final private Client client; - final private Set exporters; - - /** - * Constructor of QueryInsightsExporterFactory - * - * @param client OS client - */ - public QueryInsightsExporterFactory(final Client client) { - this.client = client; - this.exporters = new HashSet<>(); - } - - /** - * Validate exporter sink config - * - * @param settings exporter sink config {@link Settings} - * @throws IllegalArgumentException if provided exporter sink config settings are invalid - */ - public void validateExporterConfig(final Settings settings) throws IllegalArgumentException { - // Disable exporter if the EXPORTER_TYPE setting is null - if (settings.get(EXPORTER_TYPE) == null) { - return; - } - SinkType type; - try { - type = SinkType.parse(settings.get(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE)); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - Locale.ROOT, - "Invalid exporter type [%s], type should be one of %s", - settings.get(EXPORTER_TYPE), - SinkType.allSinkTypes() - ) - ); - } - switch (type) { - case LOCAL_INDEX: - final String indexPattern = settings.get(EXPORT_INDEX, DEFAULT_TOP_N_QUERIES_INDEX_PATTERN); - if (indexPattern.length() == 0) { - throw new IllegalArgumentException("Empty index pattern configured for the exporter"); - } - try { - DateTimeFormat.forPattern(indexPattern); - } catch (Exception e) { - throw new IllegalArgumentException( - String.format(Locale.ROOT, "Invalid index pattern [%s] configured for the exporter", indexPattern) - ); - } - } - } - - /** - * Create an exporter based on provided parameters - * - * @param type The type of exporter to create - * @param indexPattern the index pattern if creating a index exporter - * @return QueryInsightsExporter the created exporter sink - */ - public QueryInsightsExporter createExporter(SinkType type, String indexPattern) { - if (SinkType.LOCAL_INDEX.equals(type)) { - QueryInsightsExporter exporter = new LocalIndexExporter(client, DateTimeFormat.forPattern(indexPattern)); - this.exporters.add(exporter); - return exporter; - } - return DebugExporter.getInstance(); - } - - /** - * Update an exporter based on provided parameters - * - * @param exporter The exporter to update - * @param indexPattern the index pattern if creating a index exporter - * @return QueryInsightsExporter the updated exporter sink - */ - public QueryInsightsExporter updateExporter(QueryInsightsExporter exporter, String indexPattern) { - if (exporter.getClass() == LocalIndexExporter.class) { - ((LocalIndexExporter) exporter).setIndexPattern(DateTimeFormat.forPattern(indexPattern)); - } - return exporter; - } - - /** - * Close an exporter - * - * @param exporter the exporter to close - */ - public void closeExporter(QueryInsightsExporter exporter) throws IOException { - if (exporter != null) { - exporter.close(); - this.exporters.remove(exporter); - } - } - - /** - * Close all exporters - * - */ - public void closeAllExporters() { - for (QueryInsightsExporter exporter : exporters) { - try { - closeExporter(exporter); - } catch (IOException e) { - logger.error("Fail to close query insights exporter, error: ", e); - } - } - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/SinkType.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/SinkType.java deleted file mode 100644 index c90c9c76b6706..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/SinkType.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.core.exporter; - -import java.util.Arrays; -import java.util.Locale; -import java.util.Set; -import java.util.stream.Collectors; - -/** - * Type of supported sinks - */ -public enum SinkType { - /** debug exporter */ - DEBUG("debug"), - /** local index exporter */ - LOCAL_INDEX("local_index"); - - private final String type; - - SinkType(String type) { - this.type = type; - } - - @Override - public String toString() { - return type; - } - - /** - * Parse SinkType from String - * @param type the String representation of the SinkType - * @return SinkType - */ - public static SinkType parse(final String type) { - return valueOf(type.toUpperCase(Locale.ROOT)); - } - - /** - * Get all valid SinkTypes - * - * @return A set contains all valid SinkTypes - */ - public static Set allSinkTypes() { - return Arrays.stream(values()).collect(Collectors.toSet()); - } - - /** - * Get Sink type from exporter - * - * @param exporter the {@link QueryInsightsExporter} - * @return SinkType associated with this exporter - */ - public static SinkType getSinkTypeFromExporter(QueryInsightsExporter exporter) { - if (exporter.getClass().equals(LocalIndexExporter.class)) { - return SinkType.LOCAL_INDEX; - } - return SinkType.DEBUG; - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java deleted file mode 100644 index a1f810ad5987c..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.core.listener; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.action.search.SearchPhaseContext; -import org.opensearch.action.search.SearchRequest; -import org.opensearch.action.search.SearchRequestContext; -import org.opensearch.action.search.SearchRequestOperationsListener; -import org.opensearch.action.search.SearchTask; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.inject.Inject; -import org.opensearch.core.tasks.resourcetracker.TaskResourceInfo; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.plugin.insights.core.service.QueryInsightsService; -import org.opensearch.plugin.insights.rules.model.Attribute; -import org.opensearch.plugin.insights.rules.model.MetricType; -import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; -import org.opensearch.tasks.Task; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.getTopNEnabledSetting; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.getTopNSizeSetting; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.getTopNWindowSizeSetting; - -/** - * The listener for query insights services. - * It forwards query-related data to the appropriate query insights stores, - * either for each request or for each phase. - * - * @opensearch.internal - */ -public final class QueryInsightsListener extends SearchRequestOperationsListener { - private static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); - - private static final Logger log = LogManager.getLogger(QueryInsightsListener.class); - - private final QueryInsightsService queryInsightsService; - private final ClusterService clusterService; - - /** - * Constructor for QueryInsightsListener - * - * @param clusterService The Node's cluster service. - * @param queryInsightsService The topQueriesByLatencyService associated with this listener - */ - @Inject - public QueryInsightsListener(final ClusterService clusterService, final QueryInsightsService queryInsightsService) { - this.clusterService = clusterService; - this.queryInsightsService = queryInsightsService; - // Setting endpoints set up for top n queries, including enabling top n queries, window size and top n size - // Expected metricTypes are Latency, CPU and Memory. - for (MetricType type : MetricType.allMetricTypes()) { - clusterService.getClusterSettings() - .addSettingsUpdateConsumer(getTopNEnabledSetting(type), v -> this.setEnableTopQueries(type, v)); - clusterService.getClusterSettings() - .addSettingsUpdateConsumer( - getTopNSizeSetting(type), - v -> this.queryInsightsService.setTopNSize(type, v), - v -> this.queryInsightsService.validateTopNSize(type, v) - ); - clusterService.getClusterSettings() - .addSettingsUpdateConsumer( - getTopNWindowSizeSetting(type), - v -> this.queryInsightsService.setWindowSize(type, v), - v -> this.queryInsightsService.validateWindowSize(type, v) - ); - - this.setEnableTopQueries(type, clusterService.getClusterSettings().get(getTopNEnabledSetting(type))); - this.queryInsightsService.validateTopNSize(type, clusterService.getClusterSettings().get(getTopNSizeSetting(type))); - this.queryInsightsService.setTopNSize(type, clusterService.getClusterSettings().get(getTopNSizeSetting(type))); - this.queryInsightsService.validateWindowSize(type, clusterService.getClusterSettings().get(getTopNWindowSizeSetting(type))); - this.queryInsightsService.setWindowSize(type, clusterService.getClusterSettings().get(getTopNWindowSizeSetting(type))); - } - } - - /** - * Enable or disable top queries insights collection for {@link MetricType} - * This function will enable or disable the corresponding listeners - * and query insights services. - * - * @param metricType {@link MetricType} - * @param enabled boolean - */ - public void setEnableTopQueries(final MetricType metricType, final boolean enabled) { - boolean isAllMetricsDisabled = !queryInsightsService.isEnabled(); - this.queryInsightsService.enableCollection(metricType, enabled); - if (!enabled) { - // disable QueryInsightsListener only if all metrics collections are disabled now. - if (!queryInsightsService.isEnabled()) { - super.setEnabled(false); - this.queryInsightsService.stop(); - } - } else { - super.setEnabled(true); - // restart QueryInsightsListener only if none of metrics collections is enabled before. - if (isAllMetricsDisabled) { - this.queryInsightsService.stop(); - this.queryInsightsService.start(); - } - } - - } - - @Override - public boolean isEnabled() { - return super.isEnabled(); - } - - @Override - public void onPhaseStart(SearchPhaseContext context) {} - - @Override - public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} - - @Override - public void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} - - @Override - public void onRequestStart(SearchRequestContext searchRequestContext) {} - - @Override - public void onRequestEnd(final SearchPhaseContext context, final SearchRequestContext searchRequestContext) { - constructSearchQueryRecord(context, searchRequestContext); - } - - @Override - public void onRequestFailure(final SearchPhaseContext context, final SearchRequestContext searchRequestContext) { - constructSearchQueryRecord(context, searchRequestContext); - } - - private void constructSearchQueryRecord(final SearchPhaseContext context, final SearchRequestContext searchRequestContext) { - SearchTask searchTask = context.getTask(); - List tasksResourceUsages = searchRequestContext.getPhaseResourceUsage(); - tasksResourceUsages.add( - new TaskResourceInfo( - searchTask.getAction(), - searchTask.getId(), - searchTask.getParentTaskId().getId(), - clusterService.localNode().getId(), - searchTask.getTotalResourceStats() - ) - ); - - final SearchRequest request = context.getRequest(); - try { - Map measurements = new HashMap<>(); - if (queryInsightsService.isCollectionEnabled(MetricType.LATENCY)) { - measurements.put( - MetricType.LATENCY, - TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - searchRequestContext.getAbsoluteStartNanos()) - ); - } - if (queryInsightsService.isCollectionEnabled(MetricType.CPU)) { - measurements.put( - MetricType.CPU, - tasksResourceUsages.stream().map(a -> a.getTaskResourceUsage().getCpuTimeInNanos()).mapToLong(Long::longValue).sum() - ); - } - if (queryInsightsService.isCollectionEnabled(MetricType.MEMORY)) { - measurements.put( - MetricType.MEMORY, - tasksResourceUsages.stream().map(a -> a.getTaskResourceUsage().getMemoryInBytes()).mapToLong(Long::longValue).sum() - ); - } - Map attributes = new HashMap<>(); - attributes.put(Attribute.SEARCH_TYPE, request.searchType().toString().toLowerCase(Locale.ROOT)); - attributes.put(Attribute.SOURCE, request.source().toString(FORMAT_PARAMS)); - attributes.put(Attribute.TOTAL_SHARDS, context.getNumShards()); - attributes.put(Attribute.INDICES, request.indices()); - attributes.put(Attribute.PHASE_LATENCY_MAP, searchRequestContext.phaseTookMap()); - attributes.put(Attribute.TASK_RESOURCE_USAGES, tasksResourceUsages); - - Map labels = new HashMap<>(); - // Retrieve user provided label if exists - String userProvidedLabel = context.getTask().getHeader(Task.X_OPAQUE_ID); - if (userProvidedLabel != null) { - labels.put(Task.X_OPAQUE_ID, userProvidedLabel); - } - attributes.put(Attribute.LABELS, labels); - // construct SearchQueryRecord from attributes and measurements - SearchQueryRecord record = new SearchQueryRecord(request.getOrCreateAbsoluteStartMillis(), measurements, attributes); - queryInsightsService.addRecord(record); - } catch (Exception e) { - log.error(String.format(Locale.ROOT, "fail to ingest query insight data, error: %s", e)); - } - } - -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java deleted file mode 100644 index c63430a1a726c..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java +++ /dev/null @@ -1,283 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.core.service; - -import org.opensearch.client.Client; -import org.opensearch.common.inject.Inject; -import org.opensearch.common.lifecycle.AbstractLifecycleComponent; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.plugin.insights.core.exporter.QueryInsightsExporterFactory; -import org.opensearch.plugin.insights.rules.model.MetricType; -import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; -import org.opensearch.plugin.insights.settings.QueryInsightsSettings; -import org.opensearch.threadpool.Scheduler; -import org.opensearch.threadpool.ThreadPool; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.LinkedBlockingQueue; - -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.getExporterSettings; - -/** - * Service responsible for gathering, analyzing, storing and exporting - * information related to search queries - * - * @opensearch.internal - */ -public class QueryInsightsService extends AbstractLifecycleComponent { - /** - * The internal OpenSearch thread pool that execute async processing and exporting tasks - */ - private final ThreadPool threadPool; - - /** - * Services to capture top n queries for different metric types - */ - private final Map topQueriesServices; - - /** - * Flags for enabling insight data collection for different metric types - */ - private final Map enableCollect; - - /** - * The internal thread-safe queue to ingest the search query data and subsequently forward to processors - */ - private final LinkedBlockingQueue queryRecordsQueue; - - /** - * Holds a reference to delayed operation {@link Scheduler.Cancellable} so it can be cancelled when - * the service closed concurrently. - */ - protected volatile Scheduler.Cancellable scheduledFuture; - - /** - * Query Insights exporter factory - */ - final QueryInsightsExporterFactory queryInsightsExporterFactory; - - /** - * Constructor of the QueryInsightsService - * - * @param clusterSettings OpenSearch cluster level settings - * @param threadPool The OpenSearch thread pool to run async tasks - * @param client OS client - */ - @Inject - public QueryInsightsService(final ClusterSettings clusterSettings, final ThreadPool threadPool, final Client client) { - enableCollect = new HashMap<>(); - queryRecordsQueue = new LinkedBlockingQueue<>(QueryInsightsSettings.QUERY_RECORD_QUEUE_CAPACITY); - this.threadPool = threadPool; - this.queryInsightsExporterFactory = new QueryInsightsExporterFactory(client); - // initialize top n queries services and configurations consumers - topQueriesServices = new HashMap<>(); - for (MetricType metricType : MetricType.allMetricTypes()) { - enableCollect.put(metricType, false); - topQueriesServices.put(metricType, new TopQueriesService(metricType, threadPool, queryInsightsExporterFactory)); - } - for (MetricType type : MetricType.allMetricTypes()) { - clusterSettings.addSettingsUpdateConsumer( - getExporterSettings(type), - (settings -> setExporter(type, settings)), - (settings -> validateExporterConfig(type, settings)) - ); - } - } - - /** - * Ingest the query data into in-memory stores - * - * @param record the record to ingest - */ - public boolean addRecord(final SearchQueryRecord record) { - boolean shouldAdd = false; - for (Map.Entry entry : topQueriesServices.entrySet()) { - if (!enableCollect.get(entry.getKey())) { - continue; - } - List currentSnapshot = entry.getValue().getTopQueriesCurrentSnapshot(); - // skip add to top N queries store if the incoming record is smaller than the Nth record - if (currentSnapshot.size() < entry.getValue().getTopNSize() - || SearchQueryRecord.compare(record, currentSnapshot.get(0), entry.getKey()) > 0) { - shouldAdd = true; - break; - } - } - if (shouldAdd) { - return queryRecordsQueue.offer(record); - } - return false; - } - - /** - * Drain the queryRecordsQueue into internal stores and services - */ - public void drainRecords() { - final List records = new ArrayList<>(); - queryRecordsQueue.drainTo(records); - records.sort(Comparator.comparingLong(SearchQueryRecord::getTimestamp)); - for (MetricType metricType : MetricType.allMetricTypes()) { - if (enableCollect.get(metricType)) { - // ingest the records into topQueriesService - topQueriesServices.get(metricType).consumeRecords(records); - } - } - } - - /** - * Get the top queries service based on metricType - * @param metricType {@link MetricType} - * @return {@link TopQueriesService} - */ - public TopQueriesService getTopQueriesService(final MetricType metricType) { - return topQueriesServices.get(metricType); - } - - /** - * Set flag to enable or disable Query Insights data collection - * - * @param metricType {@link MetricType} - * @param enable Flag to enable or disable Query Insights data collection - */ - public void enableCollection(final MetricType metricType, final boolean enable) { - this.enableCollect.put(metricType, enable); - this.topQueriesServices.get(metricType).setEnabled(enable); - } - - /** - * Get if the Query Insights data collection is enabled for a MetricType - * - * @param metricType {@link MetricType} - * @return if the Query Insights data collection is enabled - */ - public boolean isCollectionEnabled(final MetricType metricType) { - return this.enableCollect.get(metricType); - } - - /** - * Check if query insights service is enabled - * - * @return if query insights service is enabled - */ - public boolean isEnabled() { - for (MetricType t : MetricType.allMetricTypes()) { - if (isCollectionEnabled(t)) { - return true; - } - } - return false; - } - - /** - * Validate the window size config for a metricType - * - * @param type {@link MetricType} - * @param windowSize {@link TimeValue} - */ - public void validateWindowSize(final MetricType type, final TimeValue windowSize) { - if (topQueriesServices.containsKey(type)) { - topQueriesServices.get(type).validateWindowSize(windowSize); - } - } - - /** - * Set window size for a metricType - * - * @param type {@link MetricType} - * @param windowSize {@link TimeValue} - */ - public void setWindowSize(final MetricType type, final TimeValue windowSize) { - if (topQueriesServices.containsKey(type)) { - topQueriesServices.get(type).setWindowSize(windowSize); - } - } - - /** - * Validate the top n size config for a metricType - * - * @param type {@link MetricType} - * @param topNSize top n size - */ - public void validateTopNSize(final MetricType type, final int topNSize) { - if (topQueriesServices.containsKey(type)) { - topQueriesServices.get(type).validateTopNSize(topNSize); - } - } - - /** - * Set the top n size config for a metricType - * - * @param type {@link MetricType} - * @param topNSize top n size - */ - public void setTopNSize(final MetricType type, final int topNSize) { - if (topQueriesServices.containsKey(type)) { - topQueriesServices.get(type).setTopNSize(topNSize); - } - } - - /** - * Set the exporter config for a metricType - * - * @param type {@link MetricType} - * @param settings exporter settings - */ - public void setExporter(final MetricType type, final Settings settings) { - if (topQueriesServices.containsKey(type)) { - topQueriesServices.get(type).setExporter(settings); - } - } - - /** - * Validate the exporter config for a metricType - * - * @param type {@link MetricType} - * @param settings exporter settings - */ - public void validateExporterConfig(final MetricType type, final Settings settings) { - if (topQueriesServices.containsKey(type)) { - topQueriesServices.get(type).validateExporterConfig(settings); - } - } - - @Override - protected void doStart() { - if (isEnabled()) { - scheduledFuture = threadPool.scheduleWithFixedDelay( - this::drainRecords, - QueryInsightsSettings.QUERY_RECORD_QUEUE_DRAIN_INTERVAL, - QueryInsightsSettings.QUERY_INSIGHTS_EXECUTOR - ); - } - } - - @Override - protected void doStop() { - if (scheduledFuture != null) { - scheduledFuture.cancel(); - } - } - - @Override - protected void doClose() throws IOException { - // close all top n queries service - for (TopQueriesService topQueriesService : topQueriesServices.values()) { - topQueriesService.close(); - } - // close any unclosed resources - queryInsightsExporterFactory.closeAllExporters(); - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java deleted file mode 100644 index c21b89be4dcca..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java +++ /dev/null @@ -1,374 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.core.service; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.plugin.insights.core.exporter.QueryInsightsExporter; -import org.opensearch.plugin.insights.core.exporter.QueryInsightsExporterFactory; -import org.opensearch.plugin.insights.core.exporter.SinkType; -import org.opensearch.plugin.insights.rules.model.MetricType; -import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; -import org.opensearch.plugin.insights.settings.QueryInsightsSettings; -import org.opensearch.threadpool.ThreadPool; - -import java.io.IOException; -import java.time.Instant; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.temporal.ChronoUnit; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Locale; -import java.util.PriorityQueue; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_N_QUERIES_INDEX_PATTERN; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_QUERIES_EXPORTER_TYPE; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORTER_TYPE; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORT_INDEX; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.QUERY_INSIGHTS_EXECUTOR; - -/** - * Service responsible for gathering and storing top N queries - * with high latency or resource usage - * - * @opensearch.internal - */ -public class TopQueriesService { - /** - * Logger of the local index exporter - */ - private final Logger logger = LogManager.getLogger(); - private boolean enabled; - /** - * The metric type to measure top n queries - */ - private final MetricType metricType; - private int topNSize; - /** - * The window size to keep the top n queries - */ - private TimeValue windowSize; - /** - * The current window start timestamp - */ - private long windowStart; - /** - * The internal thread-safe store that holds the top n queries insight data - */ - private final PriorityQueue topQueriesStore; - - /** - * The AtomicReference of a snapshot of the current window top queries for getters to consume - */ - private final AtomicReference> topQueriesCurrentSnapshot; - - /** - * The AtomicReference of a snapshot of the last window top queries for getters to consume - */ - private final AtomicReference> topQueriesHistorySnapshot; - - /** - * Factory for validating and creating exporters - */ - private final QueryInsightsExporterFactory queryInsightsExporterFactory; - - /** - * The internal OpenSearch thread pool that execute async processing and exporting tasks - */ - private final ThreadPool threadPool; - - /** - * Exporter for exporting top queries data - */ - private QueryInsightsExporter exporter; - - TopQueriesService( - final MetricType metricType, - final ThreadPool threadPool, - final QueryInsightsExporterFactory queryInsightsExporterFactory - ) { - this.enabled = false; - this.metricType = metricType; - this.threadPool = threadPool; - this.queryInsightsExporterFactory = queryInsightsExporterFactory; - this.topNSize = QueryInsightsSettings.DEFAULT_TOP_N_SIZE; - this.windowSize = QueryInsightsSettings.DEFAULT_WINDOW_SIZE; - this.windowStart = -1L; - this.exporter = null; - topQueriesStore = new PriorityQueue<>(topNSize, (a, b) -> SearchQueryRecord.compare(a, b, metricType)); - topQueriesCurrentSnapshot = new AtomicReference<>(new ArrayList<>()); - topQueriesHistorySnapshot = new AtomicReference<>(new ArrayList<>()); - } - - /** - * Set the top N size for TopQueriesService service. - * - * @param topNSize the top N size to set - */ - public void setTopNSize(final int topNSize) { - this.topNSize = topNSize; - } - - /** - * Get the current configured top n size - * - * @return top n size - */ - public int getTopNSize() { - return topNSize; - } - - /** - * Validate the top N size based on the internal constrains - * - * @param size the wanted top N size - */ - public void validateTopNSize(final int size) { - if (size > QueryInsightsSettings.MAX_N_SIZE) { - throw new IllegalArgumentException( - "Top N size setting for [" - + metricType - + "]" - + " should be smaller than max top N size [" - + QueryInsightsSettings.MAX_N_SIZE - + "was (" - + size - + " > " - + QueryInsightsSettings.MAX_N_SIZE - + ")" - ); - } - } - - /** - * Set enable flag for the service - * @param enabled boolean - */ - public void setEnabled(final boolean enabled) { - this.enabled = enabled; - } - - /** - * Set the window size for top N queries service - * - * @param windowSize window size to set - */ - public void setWindowSize(final TimeValue windowSize) { - this.windowSize = windowSize; - // reset the window start time since the window size has changed - this.windowStart = -1L; - } - - /** - * Validate if the window size is valid, based on internal constrains. - * - * @param windowSize the window size to validate - */ - public void validateWindowSize(final TimeValue windowSize) { - if (windowSize.compareTo(QueryInsightsSettings.MAX_WINDOW_SIZE) > 0 - || windowSize.compareTo(QueryInsightsSettings.MIN_WINDOW_SIZE) < 0) { - throw new IllegalArgumentException( - "Window size setting for [" - + metricType - + "]" - + " should be between [" - + QueryInsightsSettings.MIN_WINDOW_SIZE - + "," - + QueryInsightsSettings.MAX_WINDOW_SIZE - + "]" - + "was (" - + windowSize - + ")" - ); - } - if (!(QueryInsightsSettings.VALID_WINDOW_SIZES_IN_MINUTES.contains(windowSize) || windowSize.getMinutes() % 60 == 0)) { - throw new IllegalArgumentException( - "Window size setting for [" - + metricType - + "]" - + " should be multiple of 1 hour, or one of " - + QueryInsightsSettings.VALID_WINDOW_SIZES_IN_MINUTES - + ", was (" - + windowSize - + ")" - ); - } - } - - /** - * Set up the top queries exporter based on provided settings - * - * @param settings exporter config {@link Settings} - */ - public void setExporter(final Settings settings) { - if (settings.get(EXPORTER_TYPE) != null) { - SinkType expectedType = SinkType.parse(settings.get(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE)); - if (exporter != null && expectedType == SinkType.getSinkTypeFromExporter(exporter)) { - queryInsightsExporterFactory.updateExporter(exporter, settings.get(EXPORT_INDEX, DEFAULT_TOP_N_QUERIES_INDEX_PATTERN)); - } else { - try { - queryInsightsExporterFactory.closeExporter(this.exporter); - } catch (IOException e) { - logger.error("Fail to close the current exporter when updating exporter, error: ", e); - } - this.exporter = queryInsightsExporterFactory.createExporter( - SinkType.parse(settings.get(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE)), - settings.get(EXPORT_INDEX, DEFAULT_TOP_N_QUERIES_INDEX_PATTERN) - ); - } - } else { - // Disable exporter if exporter type is set to null - try { - queryInsightsExporterFactory.closeExporter(this.exporter); - this.exporter = null; - } catch (IOException e) { - logger.error("Fail to close the current exporter when disabling exporter, error: ", e); - } - } - } - - /** - * Validate provided settings for top queries exporter - * - * @param settings settings exporter config {@link Settings} - */ - public void validateExporterConfig(Settings settings) { - queryInsightsExporterFactory.validateExporterConfig(settings); - } - - /** - * Get all top queries records that are in the current top n queries store - * Optionally include top N records from the last window. - * - * By default, return the records in sorted order. - * - * @param includeLastWindow if the top N queries from the last window should be included - * @return List of the records that are in the query insight store - * @throws IllegalArgumentException if query insight is disabled in the cluster - */ - public List getTopQueriesRecords(final boolean includeLastWindow) throws IllegalArgumentException { - if (!enabled) { - throw new IllegalArgumentException( - String.format(Locale.ROOT, "Cannot get top n queries for [%s] when it is not enabled.", metricType.toString()) - ); - } - // read from window snapshots - final List queries = new ArrayList<>(topQueriesCurrentSnapshot.get()); - if (includeLastWindow) { - queries.addAll(topQueriesHistorySnapshot.get()); - } - return Stream.of(queries) - .flatMap(Collection::stream) - .sorted((a, b) -> SearchQueryRecord.compare(a, b, metricType) * -1) - .collect(Collectors.toList()); - } - - /** - * Consume records to top queries stores - * - * @param records a list of {@link SearchQueryRecord} - */ - void consumeRecords(final List records) { - final long currentWindowStart = calculateWindowStart(System.currentTimeMillis()); - List recordsInLastWindow = new ArrayList<>(); - List recordsInThisWindow = new ArrayList<>(); - for (SearchQueryRecord record : records) { - // skip the records that does not have the corresponding measurement - if (!record.getMeasurements().containsKey(metricType)) { - continue; - } - if (record.getTimestamp() < currentWindowStart) { - recordsInLastWindow.add(record); - } else { - recordsInThisWindow.add(record); - } - } - // add records in last window, if there are any, to the top n store - addToTopNStore(recordsInLastWindow); - // rotate window and reset window start if necessary - rotateWindowIfNecessary(currentWindowStart); - // add records in current window, if there are any, to the top n store - addToTopNStore(recordsInThisWindow); - // update the current window snapshot for getters to consume - final List newSnapShot = new ArrayList<>(topQueriesStore); - newSnapShot.sort((a, b) -> SearchQueryRecord.compare(a, b, metricType)); - topQueriesCurrentSnapshot.set(newSnapShot); - } - - private void addToTopNStore(final List records) { - topQueriesStore.addAll(records); - // remove top elements for fix sizing priority queue - while (topQueriesStore.size() > topNSize) { - topQueriesStore.poll(); - } - } - - /** - * Reset the current window and rotate the data to history snapshot for top n queries, - * This function would be invoked zero time or only once in each consumeRecords call - * - * @param newWindowStart the new windowStart to set to - */ - private void rotateWindowIfNecessary(final long newWindowStart) { - // reset window if the current window is outdated - if (windowStart < newWindowStart) { - final List history = new ArrayList<>(); - // rotate the current window to history store only if the data belongs to the last window - if (windowStart == newWindowStart - windowSize.getMillis()) { - history.addAll(topQueriesStore); - } - topQueriesHistorySnapshot.set(history); - topQueriesStore.clear(); - topQueriesCurrentSnapshot.set(new ArrayList<>()); - windowStart = newWindowStart; - // export to the configured sink - if (exporter != null) { - threadPool.executor(QUERY_INSIGHTS_EXECUTOR).execute(() -> exporter.export(history)); - } - } - } - - /** - * Calculate the window start for the given timestamp - * - * @param timestamp the given timestamp to calculate window start - */ - private long calculateWindowStart(final long timestamp) { - final LocalDateTime currentTime = LocalDateTime.ofInstant(Instant.ofEpochMilli(timestamp), ZoneId.of("UTC")); - LocalDateTime windowStartTime = currentTime.truncatedTo(ChronoUnit.HOURS); - while (!windowStartTime.plusMinutes(windowSize.getMinutes()).isAfter(currentTime)) { - windowStartTime = windowStartTime.plusMinutes(windowSize.getMinutes()); - } - return windowStartTime.toInstant(ZoneOffset.UTC).getEpochSecond() * 1000; - } - - /** - * Get the current top queries snapshot from the AtomicReference. - * - * @return a list of {@link SearchQueryRecord} - */ - public List getTopQueriesCurrentSnapshot() { - return topQueriesCurrentSnapshot.get(); - } - - /** - * Close the top n queries service - */ - public void close() throws IOException { - queryInsightsExporterFactory.closeExporter(this.exporter); - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/package-info.java deleted file mode 100644 index 9b6b5856f7d27..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Transport Actions, Requests and Responses for Query Insights - */ -package org.opensearch.plugin.insights.rules.action; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueries.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueries.java deleted file mode 100644 index 26cff82aae52e..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueries.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.action.top_queries; - -import org.opensearch.action.support.nodes.BaseNodeResponse; -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.xcontent.ToXContentObject; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; - -import java.io.IOException; -import java.util.List; - -/** - * Holds all top queries records by resource usage or latency on a node - * Mainly used in the top N queries node response workflow. - * - * @opensearch.internal - */ -public class TopQueries extends BaseNodeResponse implements ToXContentObject { - /** The store to keep the top queries records */ - private final List topQueriesRecords; - - /** - * Create the TopQueries Object from StreamInput - * @param in A {@link StreamInput} object. - * @throws IOException IOException - */ - public TopQueries(final StreamInput in) throws IOException { - super(in); - topQueriesRecords = in.readList(SearchQueryRecord::new); - } - - /** - * Create the TopQueries Object - * @param node A node that is part of the cluster. - * @param searchQueryRecords A list of SearchQueryRecord associated in this TopQueries. - */ - public TopQueries(final DiscoveryNode node, final List searchQueryRecords) { - super(node); - topQueriesRecords = searchQueryRecords; - } - - @Override - public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { - if (topQueriesRecords != null) { - for (SearchQueryRecord record : topQueriesRecords) { - record.toXContent(builder, params); - } - } - return builder; - } - - @Override - public void writeTo(final StreamOutput out) throws IOException { - super.writeTo(out); - out.writeList(topQueriesRecords); - - } - - /** - * Get all top queries records - * - * @return the top queries records in this node response - */ - public List getTopQueriesRecord() { - return topQueriesRecords; - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesAction.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesAction.java deleted file mode 100644 index b8ed69fa5692b..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesAction.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.action.top_queries; - -import org.opensearch.action.ActionType; - -/** - * Transport action for cluster/node level top queries information. - * - * @opensearch.internal - */ -public class TopQueriesAction extends ActionType { - - /** - * The TopQueriesAction Instance. - */ - public static final TopQueriesAction INSTANCE = new TopQueriesAction(); - /** - * The name of this Action - */ - public static final String NAME = "cluster:admin/opensearch/insights/top_queries"; - - private TopQueriesAction() { - super(NAME, TopQueriesResponse::new); - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequest.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequest.java deleted file mode 100644 index 3bdff2c403161..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequest.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.action.top_queries; - -import org.opensearch.action.support.nodes.BaseNodesRequest; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.plugin.insights.rules.model.MetricType; - -import java.io.IOException; - -/** - * A request to get cluster/node level top queries information. - * - * @opensearch.internal - */ -public class TopQueriesRequest extends BaseNodesRequest { - - final MetricType metricType; - - /** - * Constructor for TopQueriesRequest - * - * @param in A {@link StreamInput} object. - * @throws IOException if the stream cannot be deserialized. - */ - public TopQueriesRequest(final StreamInput in) throws IOException { - super(in); - this.metricType = MetricType.readFromStream(in); - } - - /** - * Get top queries from nodes based on the nodes ids specified. - * If none are passed, cluster level top queries will be returned. - * - * @param metricType {@link MetricType} - * @param nodesIds the nodeIds specified in the request - */ - public TopQueriesRequest(final MetricType metricType, final String... nodesIds) { - super(nodesIds); - this.metricType = metricType; - } - - /** - * Get the type of requested metrics - */ - public MetricType getMetricType() { - return metricType; - } - - @Override - public void writeTo(final StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(metricType.toString()); - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponse.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponse.java deleted file mode 100644 index 2e66bb7f77baf..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponse.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.action.top_queries; - -import org.opensearch.action.FailedNodeException; -import org.opensearch.action.support.nodes.BaseNodesResponse; -import org.opensearch.cluster.ClusterName; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.xcontent.ToXContentFragment; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.plugin.insights.rules.model.Attribute; -import org.opensearch.plugin.insights.rules.model.MetricType; -import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; - -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.stream.Collectors; - -/** - * Transport response for cluster/node level top queries information. - * - * @opensearch.internal - */ -public class TopQueriesResponse extends BaseNodesResponse implements ToXContentFragment { - - private static final String CLUSTER_LEVEL_RESULTS_KEY = "top_queries"; - private final MetricType metricType; - private final int top_n_size; - - /** - * Constructor for TopQueriesResponse. - * - * @param in A {@link StreamInput} object. - * @throws IOException if the stream cannot be deserialized. - */ - public TopQueriesResponse(final StreamInput in) throws IOException { - super(in); - top_n_size = in.readInt(); - metricType = in.readEnum(MetricType.class); - } - - /** - * Constructor for TopQueriesResponse - * - * @param clusterName The current cluster name - * @param nodes A list that contains top queries results from all nodes - * @param failures A list that contains FailedNodeException - * @param top_n_size The top N size to return to the user - * @param metricType the {@link MetricType} to be returned in this response - */ - public TopQueriesResponse( - final ClusterName clusterName, - final List nodes, - final List failures, - final int top_n_size, - final MetricType metricType - ) { - super(clusterName, nodes, failures); - this.top_n_size = top_n_size; - this.metricType = metricType; - } - - @Override - protected List readNodesFrom(final StreamInput in) throws IOException { - return in.readList(TopQueries::new); - } - - @Override - protected void writeNodesTo(final StreamOutput out, final List nodes) throws IOException { - out.writeList(nodes); - out.writeLong(top_n_size); - out.writeEnum(metricType); - } - - @Override - public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { - final List results = getNodes(); - postProcess(results); - builder.startObject(); - toClusterLevelResult(builder, params, results); - return builder.endObject(); - } - - @Override - public String toString() { - try { - final XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); - builder.startObject(); - this.toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - return builder.toString(); - } catch (IOException e) { - return "{ \"error\" : \"" + e.getMessage() + "\"}"; - } - } - - /** - * Post process the top queries results to add customized attributes - * - * @param results the top queries results - */ - private void postProcess(final List results) { - for (TopQueries topQueries : results) { - final String nodeId = topQueries.getNode().getId(); - for (SearchQueryRecord record : topQueries.getTopQueriesRecord()) { - record.addAttribute(Attribute.NODE_ID, nodeId); - } - } - } - - /** - * Merge top n queries results from nodes into cluster level results in XContent format. - * - * @param builder XContent builder - * @param params serialization parameters - * @param results top queries results from all nodes - * @throws IOException if an error occurs - */ - private void toClusterLevelResult(final XContentBuilder builder, final Params params, final List results) - throws IOException { - final List all_records = results.stream() - .map(TopQueries::getTopQueriesRecord) - .flatMap(Collection::stream) - .sorted((a, b) -> SearchQueryRecord.compare(a, b, metricType) * -1) - .limit(top_n_size) - .collect(Collectors.toList()); - builder.startArray(CLUSTER_LEVEL_RESULTS_KEY); - for (SearchQueryRecord record : all_records) { - record.toXContent(builder, params); - } - builder.endArray(); - } - -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java deleted file mode 100644 index dcdb085fdc6fa..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.model; - -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.Locale; - -/** - * Valid attributes for a search query record - * - * @opensearch.internal - */ -public enum Attribute { - /** - * The search query type - */ - SEARCH_TYPE, - /** - * The search query source - */ - SOURCE, - /** - * Total shards queried - */ - TOTAL_SHARDS, - /** - * The indices involved - */ - INDICES, - /** - * The per phase level latency map for a search query - */ - PHASE_LATENCY_MAP, - /** - * The node id for this request - */ - NODE_ID, - /** - * Tasks level resource usages in this request - */ - TASK_RESOURCE_USAGES, - /** - * Custom search request labels - */ - LABELS; - - /** - * Read an Attribute from a StreamInput - * - * @param in the StreamInput to read from - * @return Attribute - * @throws IOException IOException - */ - static Attribute readFromStream(final StreamInput in) throws IOException { - return Attribute.valueOf(in.readString().toUpperCase(Locale.ROOT)); - } - - /** - * Write Attribute to a StreamOutput - * - * @param out the StreamOutput to write - * @param attribute the Attribute to write - * @throws IOException IOException - */ - static void writeTo(final StreamOutput out, final Attribute attribute) throws IOException { - out.writeString(attribute.toString()); - } - - @Override - public String toString() { - return this.name().toLowerCase(Locale.ROOT); - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java deleted file mode 100644 index 4694c757f4ef2..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.model; - -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Comparator; -import java.util.Locale; -import java.util.Set; -import java.util.stream.Collectors; - -/** - * Valid metric types for a search query record - * - * @opensearch.internal - */ -public enum MetricType implements Comparator { - /** - * Latency metric type - */ - LATENCY, - /** - * CPU usage metric type - */ - CPU, - /** - * JVM heap usage metric type - */ - MEMORY; - - /** - * Read a MetricType from a StreamInput - * - * @param in the StreamInput to read from - * @return MetricType - * @throws IOException IOException - */ - public static MetricType readFromStream(final StreamInput in) throws IOException { - return fromString(in.readString()); - } - - /** - * Create MetricType from String - * - * @param metricType the String representation of MetricType - * @return MetricType - */ - public static MetricType fromString(final String metricType) { - return MetricType.valueOf(metricType.toUpperCase(Locale.ROOT)); - } - - /** - * Write MetricType to a StreamOutput - * - * @param out the StreamOutput to write - * @param metricType the MetricType to write - * @throws IOException IOException - */ - static void writeTo(final StreamOutput out, final MetricType metricType) throws IOException { - out.writeString(metricType.toString()); - } - - @Override - public String toString() { - return this.name().toLowerCase(Locale.ROOT); - } - - /** - * Get all valid metrics - * - * @return A set of String that contains all valid metrics - */ - public static Set allMetricTypes() { - return Arrays.stream(values()).collect(Collectors.toSet()); - } - - /** - * Compare two numbers based on the metric type - * - * @param a the first Number to be compared. - * @param b the second Number to be compared. - * @return a negative integer, zero, or a positive integer as the first argument is less than, equal to, or greater than the second - */ - public int compare(final Number a, final Number b) { - switch (this) { - case LATENCY: - case CPU: - case MEMORY: - return Long.compare(a.longValue(), b.longValue()); - } - return -1; - } - - /** - * Parse a value with the correct type based on MetricType - * - * @param o the generic object to parse - * @return {@link Number} - */ - Number parseValue(final Object o) { - switch (this) { - case LATENCY: - case CPU: - case MEMORY: - return (Long) o; - default: - return (Number) o; - } - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java deleted file mode 100644 index fec00a680ae58..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.model; - -import org.opensearch.core.common.Strings; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.ToXContentObject; -import org.opensearch.core.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; - -/** - * SearchQueryRecord represents a minimal atomic record stored in the Query Insight Framework, - * which contains extensive information related to a search query. - * - * @opensearch.internal - */ -public class SearchQueryRecord implements ToXContentObject, Writeable { - private final long timestamp; - private final Map measurements; - private final Map attributes; - - /** - * Constructor of SearchQueryRecord - * - * @param in the StreamInput to read the SearchQueryRecord from - * @throws IOException IOException - * @throws ClassCastException ClassCastException - */ - public SearchQueryRecord(final StreamInput in) throws IOException, ClassCastException { - this.timestamp = in.readLong(); - measurements = new HashMap<>(); - in.readMap(MetricType::readFromStream, StreamInput::readGenericValue) - .forEach(((metricType, o) -> measurements.put(metricType, metricType.parseValue(o)))); - this.attributes = in.readMap(Attribute::readFromStream, StreamInput::readGenericValue); - } - - /** - * Constructor of SearchQueryRecord - * - * @param timestamp The timestamp of the query. - * @param measurements A list of Measurement associated with this query - * @param attributes A list of Attributes associated with this query - */ - public SearchQueryRecord(final long timestamp, Map measurements, final Map attributes) { - if (measurements == null) { - throw new IllegalArgumentException("Measurements cannot be null"); - } - this.measurements = measurements; - this.attributes = attributes; - this.timestamp = timestamp; - } - - /** - * Returns the observation time of the metric. - * - * @return the observation time in milliseconds - */ - public long getTimestamp() { - return timestamp; - } - - /** - * Returns the measurement associated with the specified name. - * - * @param name the name of the measurement - * @return the measurement object, or null if not found - */ - public Number getMeasurement(final MetricType name) { - return measurements.get(name); - } - - /** - * Returns a map of all the measurements associated with the metric. - * - * @return a map of measurement names to measurement objects - */ - public Map getMeasurements() { - return measurements; - } - - /** - * Returns a map of the attributes associated with the metric. - * - * @return a map of attribute keys to attribute values - */ - public Map getAttributes() { - return attributes; - } - - /** - * Add an attribute to this record - * - * @param attribute attribute to add - * @param value the value associated with the attribute - */ - public void addAttribute(final Attribute attribute, final Object value) { - attributes.put(attribute, value); - } - - @Override - public XContentBuilder toXContent(final XContentBuilder builder, final ToXContent.Params params) throws IOException { - builder.startObject(); - builder.field("timestamp", timestamp); - for (Map.Entry entry : attributes.entrySet()) { - builder.field(entry.getKey().toString(), entry.getValue()); - } - for (Map.Entry entry : measurements.entrySet()) { - builder.field(entry.getKey().toString(), entry.getValue()); - } - return builder.endObject(); - } - - /** - * Write a SearchQueryRecord to a StreamOutput - * - * @param out the StreamOutput to write - * @throws IOException IOException - */ - @Override - public void writeTo(final StreamOutput out) throws IOException { - out.writeLong(timestamp); - out.writeMap(measurements, (stream, metricType) -> MetricType.writeTo(out, metricType), StreamOutput::writeGenericValue); - out.writeMap(attributes, (stream, attribute) -> Attribute.writeTo(out, attribute), StreamOutput::writeGenericValue); - } - - /** - * Compare two SearchQueryRecord, based on the given MetricType - * - * @param a the first SearchQueryRecord to compare - * @param b the second SearchQueryRecord to compare - * @param metricType the MetricType to compare on - * @return 0 if the first SearchQueryRecord is numerically equal to the second SearchQueryRecord; - * -1 if the first SearchQueryRecord is numerically less than the second SearchQueryRecord; - * 1 if the first SearchQueryRecord is numerically greater than the second SearchQueryRecord. - */ - public static int compare(final SearchQueryRecord a, final SearchQueryRecord b, final MetricType metricType) { - return metricType.compare(a.getMeasurement(metricType), b.getMeasurement(metricType)); - } - - /** - * Check if a SearchQueryRecord is deep equal to another record - * - * @param o the other SearchQueryRecord record - * @return true if two records are deep equal, false otherwise. - */ - @Override - public boolean equals(final Object o) { - if (this == o) { - return true; - } - if (!(o instanceof SearchQueryRecord)) { - return false; - } - final SearchQueryRecord other = (SearchQueryRecord) o; - return timestamp == other.getTimestamp() - && measurements.equals(other.getMeasurements()) - && attributes.size() == other.getAttributes().size(); - } - - @Override - public int hashCode() { - return Objects.hash(timestamp, measurements, attributes); - } - - @Override - public String toString() { - return Strings.toString(MediaTypeRegistry.JSON, this); - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/package-info.java deleted file mode 100644 index c59ec1550f54b..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Data Models for Query Insight Records - */ -package org.opensearch.plugin.insights.rules.model; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/package-info.java deleted file mode 100644 index 3787f05f65552..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Rest Handlers for Query Insights - */ -package org.opensearch.plugin.insights.rules.resthandler; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesAction.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesAction.java deleted file mode 100644 index 6aa511c626ab1..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesAction.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.resthandler.top_queries; - -import org.opensearch.client.node.NodeClient; -import org.opensearch.common.settings.Settings; -import org.opensearch.core.common.Strings; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; -import org.opensearch.plugin.insights.rules.model.MetricType; -import org.opensearch.rest.BaseRestHandler; -import org.opensearch.rest.BytesRestResponse; -import org.opensearch.rest.RestChannel; -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestResponse; -import org.opensearch.rest.action.RestResponseListener; - -import java.util.List; -import java.util.Locale; -import java.util.Set; -import java.util.stream.Collectors; - -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_QUERIES_BASE_URI; -import static org.opensearch.rest.RestRequest.Method.GET; - -/** - * Rest action to get Top N queries by certain metric type - * - * @opensearch.api - */ -public class RestTopQueriesAction extends BaseRestHandler { - /** The metric types that are allowed in top N queries */ - static final Set ALLOWED_METRICS = MetricType.allMetricTypes().stream().map(MetricType::toString).collect(Collectors.toSet()); - - /** - * Constructor for RestTopQueriesAction - */ - public RestTopQueriesAction() {} - - @Override - public List routes() { - return List.of( - new Route(GET, TOP_QUERIES_BASE_URI), - new Route(GET, String.format(Locale.ROOT, "%s/{nodeId}", TOP_QUERIES_BASE_URI)) - ); - } - - @Override - public String getName() { - return "query_insights_top_queries_action"; - } - - @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { - final TopQueriesRequest topQueriesRequest = prepareRequest(request); - topQueriesRequest.timeout(request.param("timeout")); - - return channel -> client.execute(TopQueriesAction.INSTANCE, topQueriesRequest, topQueriesResponse(channel)); - } - - static TopQueriesRequest prepareRequest(final RestRequest request) { - final String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); - final String metricType = request.param("type", MetricType.LATENCY.toString()); - if (!ALLOWED_METRICS.contains(metricType)) { - throw new IllegalArgumentException( - String.format(Locale.ROOT, "request [%s] contains invalid metric type [%s]", request.path(), metricType) - ); - } - return new TopQueriesRequest(MetricType.fromString(metricType), nodesIds); - } - - @Override - protected Set responseParams() { - return Settings.FORMAT_PARAMS; - } - - @Override - public boolean canTripCircuitBreaker() { - return false; - } - - private RestResponseListener topQueriesResponse(final RestChannel channel) { - return new RestResponseListener<>(channel) { - @Override - public RestResponse buildResponse(final TopQueriesResponse response) throws Exception { - return new BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)); - } - }; - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/package-info.java deleted file mode 100644 index 087cf7d765f8c..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Rest Handlers for Top N Queries - */ -package org.opensearch.plugin.insights.rules.resthandler.top_queries; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/package-info.java deleted file mode 100644 index f3a1c70b9af57..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Transport Actions for Query Insights. - */ -package org.opensearch.plugin.insights.rules.transport; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java deleted file mode 100644 index 7949b70a16db6..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.transport.top_queries; - -import org.opensearch.action.FailedNodeException; -import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.TransportNodesAction; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.plugin.insights.core.service.QueryInsightsService; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueries; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; -import org.opensearch.plugin.insights.settings.QueryInsightsSettings; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportService; - -import java.io.IOException; -import java.util.List; - -/** - * Transport action for cluster/node level top queries information. - * - * @opensearch.internal - */ -public class TransportTopQueriesAction extends TransportNodesAction< - TopQueriesRequest, - TopQueriesResponse, - TransportTopQueriesAction.NodeRequest, - TopQueries> { - - private final QueryInsightsService queryInsightsService; - - /** - * Create the TransportTopQueriesAction Object - - * @param threadPool The OpenSearch thread pool to run async tasks - * @param clusterService The clusterService of this node - * @param transportService The TransportService of this node - * @param queryInsightsService The topQueriesByLatencyService associated with this Transport Action - * @param actionFilters the action filters - */ - @Inject - public TransportTopQueriesAction( - final ThreadPool threadPool, - final ClusterService clusterService, - final TransportService transportService, - final QueryInsightsService queryInsightsService, - final ActionFilters actionFilters - ) { - super( - TopQueriesAction.NAME, - threadPool, - clusterService, - transportService, - actionFilters, - TopQueriesRequest::new, - NodeRequest::new, - ThreadPool.Names.GENERIC, - TopQueries.class - ); - this.queryInsightsService = queryInsightsService; - } - - @Override - protected TopQueriesResponse newResponse( - final TopQueriesRequest topQueriesRequest, - final List responses, - final List failures - ) { - int size; - switch (topQueriesRequest.getMetricType()) { - case CPU: - size = clusterService.getClusterSettings().get(QueryInsightsSettings.TOP_N_CPU_QUERIES_SIZE); - break; - case MEMORY: - size = clusterService.getClusterSettings().get(QueryInsightsSettings.TOP_N_MEMORY_QUERIES_SIZE); - break; - default: - size = clusterService.getClusterSettings().get(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); - } - return new TopQueriesResponse(clusterService.getClusterName(), responses, failures, size, topQueriesRequest.getMetricType()); - } - - @Override - protected NodeRequest newNodeRequest(final TopQueriesRequest request) { - return new NodeRequest(request); - } - - @Override - protected TopQueries newNodeResponse(final StreamInput in) throws IOException { - return new TopQueries(in); - } - - @Override - protected TopQueries nodeOperation(final NodeRequest nodeRequest) { - final TopQueriesRequest request = nodeRequest.request; - return new TopQueries( - clusterService.localNode(), - queryInsightsService.getTopQueriesService(request.getMetricType()).getTopQueriesRecords(true) - ); - } - - /** - * Inner Node Top Queries Request - * - * @opensearch.internal - */ - public static class NodeRequest extends TransportRequest { - - final TopQueriesRequest request; - - /** - * Create the NodeResponse object from StreamInput - * - * @param in the StreamInput to read the object - * @throws IOException IOException - */ - public NodeRequest(StreamInput in) throws IOException { - super(in); - request = new TopQueriesRequest(in); - } - - /** - * Create the NodeResponse object from a TopQueriesRequest - * @param request the TopQueriesRequest object - */ - public NodeRequest(final TopQueriesRequest request) { - this.request = request; - } - - @Override - public void writeTo(final StreamOutput out) throws IOException { - super.writeTo(out); - request.writeTo(out); - } - } -} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/package-info.java deleted file mode 100644 index 54da0980deff8..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Transport Actions for Top N Queries. - */ -package org.opensearch.plugin.insights.rules.transport.top_queries; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java deleted file mode 100644 index 25309b5721792..0000000000000 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java +++ /dev/null @@ -1,304 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.settings; - -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.plugin.insights.core.exporter.SinkType; -import org.opensearch.plugin.insights.rules.model.MetricType; - -import java.util.Arrays; -import java.util.HashSet; -import java.util.Set; -import java.util.concurrent.TimeUnit; - -/** - * Settings for Query Insights Plugin - * - * @opensearch.api - * @opensearch.experimental - */ -public class QueryInsightsSettings { - /** - * Executors settings - */ - public static final String QUERY_INSIGHTS_EXECUTOR = "query_insights_executor"; - /** - * Max number of thread - */ - public static final int MAX_THREAD_COUNT = 5; - /** - * Max number of requests for the consumer to collect at one time - */ - public static final int QUERY_RECORD_QUEUE_CAPACITY = 1000; - /** - * Time interval for record queue consumer to run - */ - public static final TimeValue QUERY_RECORD_QUEUE_DRAIN_INTERVAL = new TimeValue(5, TimeUnit.SECONDS); - /** - * Default Values and Settings - */ - public static final TimeValue MAX_WINDOW_SIZE = new TimeValue(1, TimeUnit.DAYS); - /** - * Minimal window size - */ - public static final TimeValue MIN_WINDOW_SIZE = new TimeValue(1, TimeUnit.MINUTES); - /** - * Valid window sizes - */ - public static final Set VALID_WINDOW_SIZES_IN_MINUTES = new HashSet<>( - Arrays.asList( - new TimeValue(1, TimeUnit.MINUTES), - new TimeValue(5, TimeUnit.MINUTES), - new TimeValue(10, TimeUnit.MINUTES), - new TimeValue(30, TimeUnit.MINUTES) - ) - ); - - /** Default N size for top N queries */ - public static final int MAX_N_SIZE = 100; - /** Default window size in seconds to keep the top N queries with latency data in query insight store */ - public static final TimeValue DEFAULT_WINDOW_SIZE = new TimeValue(60, TimeUnit.SECONDS); - /** Default top N size to keep the data in query insight store */ - public static final int DEFAULT_TOP_N_SIZE = 3; - /** - * Query Insights base uri - */ - public static final String PLUGINS_BASE_URI = "/_insights"; - - /** - * Settings for Top Queries - * - */ - public static final String TOP_QUERIES_BASE_URI = PLUGINS_BASE_URI + "/top_queries"; - /** Default prefix for top N queries feature */ - public static final String TOP_N_QUERIES_SETTING_PREFIX = "search.insights.top_queries"; - /** Default prefix for top N queries by latency feature */ - public static final String TOP_N_LATENCY_QUERIES_PREFIX = TOP_N_QUERIES_SETTING_PREFIX + ".latency"; - /** Default prefix for top N queries by cpu feature */ - public static final String TOP_N_CPU_QUERIES_PREFIX = TOP_N_QUERIES_SETTING_PREFIX + ".cpu"; - /** Default prefix for top N queries by memory feature */ - public static final String TOP_N_MEMORY_QUERIES_PREFIX = TOP_N_QUERIES_SETTING_PREFIX + ".memory"; - /** - * Boolean setting for enabling top queries by latency. - */ - public static final Setting TOP_N_LATENCY_QUERIES_ENABLED = Setting.boolSetting( - TOP_N_LATENCY_QUERIES_PREFIX + ".enabled", - false, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - /** - * Int setting to define the top n size for top queries by latency. - */ - public static final Setting TOP_N_LATENCY_QUERIES_SIZE = Setting.intSetting( - TOP_N_LATENCY_QUERIES_PREFIX + ".top_n_size", - DEFAULT_TOP_N_SIZE, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - /** - * Time setting to define the window size in seconds for top queries by latency. - */ - public static final Setting TOP_N_LATENCY_QUERIES_WINDOW_SIZE = Setting.positiveTimeSetting( - TOP_N_LATENCY_QUERIES_PREFIX + ".window_size", - DEFAULT_WINDOW_SIZE, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - - /** - * Boolean setting for enabling top queries by cpu. - */ - public static final Setting TOP_N_CPU_QUERIES_ENABLED = Setting.boolSetting( - TOP_N_CPU_QUERIES_PREFIX + ".enabled", - false, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - /** - * Int setting to define the top n size for top queries by cpu. - */ - public static final Setting TOP_N_CPU_QUERIES_SIZE = Setting.intSetting( - TOP_N_CPU_QUERIES_PREFIX + ".top_n_size", - DEFAULT_TOP_N_SIZE, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - /** - * Time setting to define the window size in seconds for top queries by cpu. - */ - public static final Setting TOP_N_CPU_QUERIES_WINDOW_SIZE = Setting.positiveTimeSetting( - TOP_N_CPU_QUERIES_PREFIX + ".window_size", - DEFAULT_WINDOW_SIZE, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - - /** - * Boolean setting for enabling top queries by memory. - */ - public static final Setting TOP_N_MEMORY_QUERIES_ENABLED = Setting.boolSetting( - TOP_N_MEMORY_QUERIES_PREFIX + ".enabled", - false, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - /** - * Int setting to define the top n size for top queries by memory. - */ - public static final Setting TOP_N_MEMORY_QUERIES_SIZE = Setting.intSetting( - TOP_N_MEMORY_QUERIES_PREFIX + ".top_n_size", - DEFAULT_TOP_N_SIZE, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - /** - * Time setting to define the window size in seconds for top queries by memory. - */ - public static final Setting TOP_N_MEMORY_QUERIES_WINDOW_SIZE = Setting.positiveTimeSetting( - TOP_N_MEMORY_QUERIES_PREFIX + ".window_size", - DEFAULT_WINDOW_SIZE, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - - /** - * Config key for exporter type - */ - public static final String EXPORTER_TYPE = "type"; - /** - * Config key for export index - */ - public static final String EXPORT_INDEX = "config.index"; - - /** - * Settings and defaults for top queries exporters - */ - private static final String TOP_N_LATENCY_QUERIES_EXPORTER_PREFIX = TOP_N_LATENCY_QUERIES_PREFIX + ".exporter."; - /** - * Prefix for top n queries by cpu exporters - */ - private static final String TOP_N_CPU_QUERIES_EXPORTER_PREFIX = TOP_N_CPU_QUERIES_PREFIX + ".exporter."; - /** - * Prefix for top n queries by memory exporters - */ - private static final String TOP_N_MEMORY_QUERIES_EXPORTER_PREFIX = TOP_N_MEMORY_QUERIES_PREFIX + ".exporter."; - /** - * Default index pattern of top n queries - */ - public static final String DEFAULT_TOP_N_QUERIES_INDEX_PATTERN = "'top_queries-'YYYY.MM.dd"; - /** - * Default exporter type of top queries - */ - public static final String DEFAULT_TOP_QUERIES_EXPORTER_TYPE = SinkType.LOCAL_INDEX.toString(); - - /** - * Settings for the exporter of top latency queries - */ - public static final Setting TOP_N_LATENCY_EXPORTER_SETTINGS = Setting.groupSetting( - TOP_N_LATENCY_QUERIES_EXPORTER_PREFIX, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - /** - * Settings for the exporter of top cpu queries - */ - public static final Setting TOP_N_CPU_EXPORTER_SETTINGS = Setting.groupSetting( - TOP_N_CPU_QUERIES_EXPORTER_PREFIX, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - /** - * Settings for the exporter of top cpu queries - */ - public static final Setting TOP_N_MEMORY_EXPORTER_SETTINGS = Setting.groupSetting( - TOP_N_MEMORY_QUERIES_EXPORTER_PREFIX, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - /** - * Get the enabled setting based on type - * @param type MetricType - * @return enabled setting - */ - public static Setting getTopNEnabledSetting(MetricType type) { - switch (type) { - case CPU: - return TOP_N_CPU_QUERIES_ENABLED; - case MEMORY: - return TOP_N_MEMORY_QUERIES_ENABLED; - default: - return TOP_N_LATENCY_QUERIES_ENABLED; - } - } - - /** - * Get the top n size setting based on type - * @param type MetricType - * @return top n size setting - */ - public static Setting getTopNSizeSetting(MetricType type) { - switch (type) { - case CPU: - return TOP_N_CPU_QUERIES_SIZE; - case MEMORY: - return TOP_N_MEMORY_QUERIES_SIZE; - default: - return TOP_N_LATENCY_QUERIES_SIZE; - } - } - - /** - * Get the window size setting based on type - * @param type MetricType - * @return top n queries window size setting - */ - public static Setting getTopNWindowSizeSetting(MetricType type) { - switch (type) { - case CPU: - return TOP_N_CPU_QUERIES_WINDOW_SIZE; - case MEMORY: - return TOP_N_MEMORY_QUERIES_WINDOW_SIZE; - default: - return TOP_N_LATENCY_QUERIES_WINDOW_SIZE; - } - } - - /** - * Get the exporter settings based on type - * @param type MetricType - * @return exporter setting - */ - public static Setting getExporterSettings(MetricType type) { - switch (type) { - case CPU: - return TOP_N_CPU_EXPORTER_SETTINGS; - case MEMORY: - return TOP_N_MEMORY_EXPORTER_SETTINGS; - default: - return TOP_N_LATENCY_EXPORTER_SETTINGS; - } - } - - /** - * Default constructor - */ - public QueryInsightsSettings() {} -} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java deleted file mode 100644 index 2efe9085a39ee..0000000000000 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights; - -import org.opensearch.action.ActionRequest; -import org.opensearch.client.Client; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.core.action.ActionResponse; -import org.opensearch.plugin.insights.core.listener.QueryInsightsListener; -import org.opensearch.plugin.insights.core.service.QueryInsightsService; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; -import org.opensearch.plugin.insights.rules.resthandler.top_queries.RestTopQueriesAction; -import org.opensearch.plugin.insights.settings.QueryInsightsSettings; -import org.opensearch.plugins.ActionPlugin; -import org.opensearch.rest.RestHandler; -import org.opensearch.test.ClusterServiceUtils; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.ExecutorBuilder; -import org.opensearch.threadpool.ScalingExecutorBuilder; -import org.opensearch.threadpool.ThreadPool; -import org.junit.Before; - -import java.util.Arrays; -import java.util.List; - -import static org.mockito.Mockito.mock; - -public class QueryInsightsPluginTests extends OpenSearchTestCase { - - private QueryInsightsPlugin queryInsightsPlugin; - - private final Client client = mock(Client.class); - private ClusterService clusterService; - private final ThreadPool threadPool = mock(ThreadPool.class); - - @Before - public void setup() { - queryInsightsPlugin = new QueryInsightsPlugin(); - Settings.Builder settingsBuilder = Settings.builder(); - Settings settings = settingsBuilder.build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - QueryInsightsTestUtils.registerAllQueryInsightsSettings(clusterSettings); - clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, threadPool); - } - - public void testGetSettings() { - assertEquals( - Arrays.asList( - QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED, - QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE, - QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE, - QueryInsightsSettings.TOP_N_LATENCY_EXPORTER_SETTINGS, - QueryInsightsSettings.TOP_N_CPU_QUERIES_ENABLED, - QueryInsightsSettings.TOP_N_CPU_QUERIES_SIZE, - QueryInsightsSettings.TOP_N_CPU_QUERIES_WINDOW_SIZE, - QueryInsightsSettings.TOP_N_CPU_EXPORTER_SETTINGS, - QueryInsightsSettings.TOP_N_MEMORY_QUERIES_ENABLED, - QueryInsightsSettings.TOP_N_MEMORY_QUERIES_SIZE, - QueryInsightsSettings.TOP_N_MEMORY_QUERIES_WINDOW_SIZE, - QueryInsightsSettings.TOP_N_MEMORY_EXPORTER_SETTINGS - ), - queryInsightsPlugin.getSettings() - ); - } - - public void testCreateComponent() { - List components = (List) queryInsightsPlugin.createComponents( - client, - clusterService, - threadPool, - null, - null, - null, - null, - null, - null, - null, - null - ); - assertEquals(2, components.size()); - assertTrue(components.get(0) instanceof QueryInsightsService); - assertTrue(components.get(1) instanceof QueryInsightsListener); - } - - public void testGetExecutorBuilders() { - Settings.Builder settingsBuilder = Settings.builder(); - Settings settings = settingsBuilder.build(); - List> executorBuilders = queryInsightsPlugin.getExecutorBuilders(settings); - assertEquals(1, executorBuilders.size()); - assertTrue(executorBuilders.get(0) instanceof ScalingExecutorBuilder); - } - - public void testGetRestHandlers() { - List components = queryInsightsPlugin.getRestHandlers(Settings.EMPTY, null, null, null, null, null, null); - assertEquals(1, components.size()); - assertTrue(components.get(0) instanceof RestTopQueriesAction); - } - - public void testGetActions() { - List> components = queryInsightsPlugin.getActions(); - assertEquals(1, components.size()); - assertTrue(components.get(0).getAction() instanceof TopQueriesAction); - } - -} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java deleted file mode 100644 index 7fa4e9841c20e..0000000000000 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights; - -import org.opensearch.action.search.SearchType; -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.util.Maps; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueries; -import org.opensearch.plugin.insights.rules.model.Attribute; -import org.opensearch.plugin.insights.rules.model.MetricType; -import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; -import org.opensearch.plugin.insights.settings.QueryInsightsSettings; -import org.opensearch.test.VersionUtils; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Set; -import java.util.TreeSet; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.test.OpenSearchTestCase.buildNewFakeTransportAddress; -import static org.opensearch.test.OpenSearchTestCase.random; -import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLengthBetween; -import static org.opensearch.test.OpenSearchTestCase.randomArray; -import static org.opensearch.test.OpenSearchTestCase.randomIntBetween; -import static org.opensearch.test.OpenSearchTestCase.randomLong; -import static org.opensearch.test.OpenSearchTestCase.randomLongBetween; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -final public class QueryInsightsTestUtils { - - public QueryInsightsTestUtils() {} - - public static List generateQueryInsightRecords(int count) { - return generateQueryInsightRecords(count, count, System.currentTimeMillis(), 0); - } - - /** - * Creates a List of random Query Insight Records for testing purpose - */ - public static List generateQueryInsightRecords(int lower, int upper, long startTimeStamp, long interval) { - List records = new ArrayList<>(); - int countOfRecords = randomIntBetween(lower, upper); - long timestamp = startTimeStamp; - for (int i = 0; i < countOfRecords; ++i) { - Map measurements = Map.of( - MetricType.LATENCY, - randomLongBetween(1000, 10000), - MetricType.CPU, - randomLongBetween(1000, 10000), - MetricType.MEMORY, - randomLongBetween(1000, 10000) - ); - - Map phaseLatencyMap = new HashMap<>(); - int countOfPhases = randomIntBetween(2, 5); - for (int j = 0; j < countOfPhases; ++j) { - phaseLatencyMap.put(randomAlphaOfLengthBetween(5, 10), randomLong()); - } - Map attributes = new HashMap<>(); - attributes.put(Attribute.SEARCH_TYPE, SearchType.QUERY_THEN_FETCH.toString().toLowerCase(Locale.ROOT)); - attributes.put(Attribute.SOURCE, "{\"size\":20}"); - attributes.put(Attribute.TOTAL_SHARDS, randomIntBetween(1, 100)); - attributes.put(Attribute.INDICES, randomArray(1, 3, Object[]::new, () -> randomAlphaOfLengthBetween(5, 10))); - attributes.put(Attribute.PHASE_LATENCY_MAP, phaseLatencyMap); - - records.add(new SearchQueryRecord(timestamp, measurements, attributes)); - timestamp += interval; - } - return records; - } - - public static TopQueries createRandomTopQueries() { - DiscoveryNode node = new DiscoveryNode( - "node_for_top_queries_test", - buildNewFakeTransportAddress(), - emptyMap(), - emptySet(), - VersionUtils.randomVersion(random()) - ); - List records = generateQueryInsightRecords(10); - - return new TopQueries(node, records); - } - - public static TopQueries createFixedTopQueries() { - DiscoveryNode node = new DiscoveryNode( - "node_for_top_queries_test", - buildNewFakeTransportAddress(), - emptyMap(), - emptySet(), - VersionUtils.randomVersion(random()) - ); - List records = new ArrayList<>(); - records.add(createFixedSearchQueryRecord()); - - return new TopQueries(node, records); - } - - public static SearchQueryRecord createFixedSearchQueryRecord() { - long timestamp = 1706574180000L; - Map measurements = Map.of(MetricType.LATENCY, 1L); - - Map phaseLatencyMap = new HashMap<>(); - Map attributes = new HashMap<>(); - attributes.put(Attribute.SEARCH_TYPE, SearchType.QUERY_THEN_FETCH.toString().toLowerCase(Locale.ROOT)); - - return new SearchQueryRecord(timestamp, measurements, attributes); - } - - public static void compareJson(ToXContent param1, ToXContent param2) throws IOException { - if (param1 == null || param2 == null) { - assertNull(param1); - assertNull(param2); - return; - } - - ToXContent.Params params = ToXContent.EMPTY_PARAMS; - XContentBuilder param1Builder = jsonBuilder(); - param1.toXContent(param1Builder, params); - - XContentBuilder param2Builder = jsonBuilder(); - param2.toXContent(param2Builder, params); - - assertEquals(param1Builder.toString(), param2Builder.toString()); - } - - @SuppressWarnings("unchecked") - public static boolean checkRecordsEquals(List records1, List records2) { - if (records1.size() != records2.size()) { - return false; - } - for (int i = 0; i < records1.size(); i++) { - if (!records1.get(i).equals(records2.get(i))) { - return false; - } - Map attributes1 = records1.get(i).getAttributes(); - Map attributes2 = records2.get(i).getAttributes(); - for (Map.Entry entry : attributes1.entrySet()) { - Attribute attribute = entry.getKey(); - Object value = entry.getValue(); - if (!attributes2.containsKey(attribute)) { - return false; - } - if (value instanceof Object[] && !Arrays.deepEquals((Object[]) value, (Object[]) attributes2.get(attribute))) { - return false; - } else if (value instanceof Map - && !Maps.deepEquals((Map) value, (Map) attributes2.get(attribute))) { - return false; - } - } - } - return true; - } - - public static boolean checkRecordsEqualsWithoutOrder( - List records1, - List records2, - MetricType metricType - ) { - Set set2 = new TreeSet<>((a, b) -> SearchQueryRecord.compare(a, b, metricType)); - set2.addAll(records2); - if (records1.size() != records2.size()) { - return false; - } - for (int i = 0; i < records1.size(); i++) { - if (!set2.contains(records1.get(i))) { - return false; - } - } - return true; - } - - public static void registerAllQueryInsightsSettings(ClusterSettings clusterSettings) { - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_EXPORTER_SETTINGS); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_CPU_QUERIES_ENABLED); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_CPU_QUERIES_SIZE); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_CPU_QUERIES_WINDOW_SIZE); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_CPU_EXPORTER_SETTINGS); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_MEMORY_QUERIES_ENABLED); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_MEMORY_QUERIES_SIZE); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_MEMORY_QUERIES_WINDOW_SIZE); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_MEMORY_EXPORTER_SETTINGS); - } -} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/DebugExporterTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/DebugExporterTests.java deleted file mode 100644 index 736e406289b2c..0000000000000 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/DebugExporterTests.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.core.exporter; - -import org.opensearch.plugin.insights.QueryInsightsTestUtils; -import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Before; - -import java.util.List; - -/** - * Granular tests for the {@link DebugExporterTests} class. - */ -public class DebugExporterTests extends OpenSearchTestCase { - private DebugExporter debugExporter; - - @Before - public void setup() { - debugExporter = DebugExporter.getInstance(); - } - - public void testExport() { - List records = QueryInsightsTestUtils.generateQueryInsightRecords(2); - try { - debugExporter.export(records); - } catch (Exception e) { - fail("No exception should be thrown when exporting query insights data"); - } - } -} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporterTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporterTests.java deleted file mode 100644 index 9ea864a7083f4..0000000000000 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporterTests.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.core.exporter; - -import org.opensearch.action.bulk.BulkAction; -import org.opensearch.action.bulk.BulkRequestBuilder; -import org.opensearch.action.bulk.BulkResponse; -import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.client.Client; -import org.opensearch.plugin.insights.QueryInsightsTestUtils; -import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; -import org.opensearch.test.OpenSearchTestCase; -import org.joda.time.format.DateTimeFormat; -import org.joda.time.format.DateTimeFormatter; -import org.junit.Before; - -import java.util.List; - -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; - -/** - * Granular tests for the {@link LocalIndexExporterTests} class. - */ -public class LocalIndexExporterTests extends OpenSearchTestCase { - private final DateTimeFormatter format = DateTimeFormat.forPattern("YYYY.MM.dd"); - private final Client client = mock(Client.class); - private LocalIndexExporter localIndexExporter; - - @Before - public void setup() { - localIndexExporter = new LocalIndexExporter(client, format); - } - - public void testExportEmptyRecords() { - List records = List.of(); - try { - localIndexExporter.export(records); - } catch (Exception e) { - fail("No exception should be thrown when exporting empty query insights data"); - } - } - - @SuppressWarnings("unchecked") - public void testExportRecords() { - BulkRequestBuilder bulkRequestBuilder = spy(new BulkRequestBuilder(client, BulkAction.INSTANCE)); - final PlainActionFuture future = mock(PlainActionFuture.class); - when(future.actionGet()).thenReturn(null); - doAnswer(invocation -> future).when(bulkRequestBuilder).execute(); - when(client.prepareBulk()).thenReturn(bulkRequestBuilder); - - List records = QueryInsightsTestUtils.generateQueryInsightRecords(2); - try { - localIndexExporter.export(records); - } catch (Exception e) { - fail("No exception should be thrown when exporting query insights data"); - } - assertEquals(2, bulkRequestBuilder.numberOfActions()); - } - - @SuppressWarnings("unchecked") - public void testExportRecordsWithError() { - BulkRequestBuilder bulkRequestBuilder = spy(new BulkRequestBuilder(client, BulkAction.INSTANCE)); - final PlainActionFuture future = mock(PlainActionFuture.class); - when(future.actionGet()).thenReturn(null); - doThrow(new RuntimeException()).when(bulkRequestBuilder).execute(); - when(client.prepareBulk()).thenReturn(bulkRequestBuilder); - - List records = QueryInsightsTestUtils.generateQueryInsightRecords(2); - try { - localIndexExporter.export(records); - } catch (Exception e) { - fail("No exception should be thrown when exporting query insights data"); - } - } - - public void testClose() { - try { - localIndexExporter.close(); - } catch (Exception e) { - fail("No exception should be thrown when closing local index exporter"); - } - } - - public void testGetAndSetIndexPattern() { - DateTimeFormatter newFormatter = mock(DateTimeFormatter.class); - localIndexExporter.setIndexPattern(newFormatter); - assert (localIndexExporter.getIndexPattern() == newFormatter); - } -} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactoryTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactoryTests.java deleted file mode 100644 index f01dd2c17509c..0000000000000 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactoryTests.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.core.exporter; - -import org.opensearch.client.Client; -import org.opensearch.common.settings.Settings; -import org.opensearch.test.OpenSearchTestCase; -import org.joda.time.format.DateTimeFormat; -import org.junit.Before; - -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_QUERIES_EXPORTER_TYPE; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORTER_TYPE; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORT_INDEX; -import static org.mockito.Mockito.mock; - -/** - * Granular tests for the {@link QueryInsightsExporterFactoryTests} class. - */ -public class QueryInsightsExporterFactoryTests extends OpenSearchTestCase { - private final String format = "YYYY.MM.dd"; - - private final Client client = mock(Client.class); - private QueryInsightsExporterFactory queryInsightsExporterFactory; - - @Before - public void setup() { - queryInsightsExporterFactory = new QueryInsightsExporterFactory(client); - } - - public void testValidateConfigWhenResetExporter() { - Settings.Builder settingsBuilder = Settings.builder(); - // empty settings - Settings settings = settingsBuilder.build(); - try { - queryInsightsExporterFactory.validateExporterConfig(settings); - } catch (Exception e) { - fail("No exception should be thrown when setting is null"); - } - } - - public void testInvalidExporterTypeConfig() { - Settings.Builder settingsBuilder = Settings.builder(); - Settings settings = settingsBuilder.put(EXPORTER_TYPE, "some_invalid_type").build(); - assertThrows(IllegalArgumentException.class, () -> { queryInsightsExporterFactory.validateExporterConfig(settings); }); - } - - public void testInvalidLocalIndexConfig() { - Settings.Builder settingsBuilder = Settings.builder(); - assertThrows(IllegalArgumentException.class, () -> { - queryInsightsExporterFactory.validateExporterConfig( - settingsBuilder.put(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE).put(EXPORT_INDEX, "").build() - ); - }); - assertThrows(IllegalArgumentException.class, () -> { - queryInsightsExporterFactory.validateExporterConfig( - settingsBuilder.put(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE).put(EXPORT_INDEX, "some_invalid_pattern").build() - ); - }); - } - - public void testCreateAndCloseExporter() { - QueryInsightsExporter exporter1 = queryInsightsExporterFactory.createExporter(SinkType.LOCAL_INDEX, format); - assertTrue(exporter1 instanceof LocalIndexExporter); - QueryInsightsExporter exporter2 = queryInsightsExporterFactory.createExporter(SinkType.DEBUG, format); - assertTrue(exporter2 instanceof DebugExporter); - QueryInsightsExporter exporter3 = queryInsightsExporterFactory.createExporter(SinkType.DEBUG, format); - assertTrue(exporter3 instanceof DebugExporter); - try { - queryInsightsExporterFactory.closeExporter(exporter1); - queryInsightsExporterFactory.closeExporter(exporter2); - queryInsightsExporterFactory.closeAllExporters(); - } catch (Exception e) { - fail("No exception should be thrown when closing exporter"); - } - } - - public void testUpdateExporter() { - LocalIndexExporter exporter = new LocalIndexExporter(client, DateTimeFormat.forPattern("yyyy-MM-dd")); - queryInsightsExporterFactory.updateExporter(exporter, "yyyy-MM-dd-HH"); - assertEquals(DateTimeFormat.forPattern("yyyy-MM-dd-HH"), exporter.getIndexPattern()); - } - -} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java deleted file mode 100644 index 86de44c680188..0000000000000 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.core.listener; - -import org.opensearch.action.search.SearchPhaseContext; -import org.opensearch.action.search.SearchRequest; -import org.opensearch.action.search.SearchRequestContext; -import org.opensearch.action.search.SearchTask; -import org.opensearch.action.search.SearchType; -import org.opensearch.action.support.replication.ClusterStateCreationUtils; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.util.io.IOUtils; -import org.opensearch.core.tasks.TaskId; -import org.opensearch.plugin.insights.QueryInsightsTestUtils; -import org.opensearch.plugin.insights.core.service.QueryInsightsService; -import org.opensearch.plugin.insights.core.service.TopQueriesService; -import org.opensearch.plugin.insights.rules.model.Attribute; -import org.opensearch.plugin.insights.rules.model.MetricType; -import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; -import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.opensearch.search.aggregations.support.ValueType; -import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.tasks.Task; -import org.opensearch.test.ClusterServiceUtils; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.TestThreadPool; -import org.opensearch.threadpool.ThreadPool; -import org.junit.Before; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.Phaser; -import java.util.concurrent.TimeUnit; - -import org.mockito.ArgumentCaptor; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -/** - * Unit Tests for {@link QueryInsightsListener}. - */ -public class QueryInsightsListenerTests extends OpenSearchTestCase { - private final SearchRequestContext searchRequestContext = mock(SearchRequestContext.class); - private final SearchPhaseContext searchPhaseContext = mock(SearchPhaseContext.class); - private final SearchRequest searchRequest = mock(SearchRequest.class); - private final QueryInsightsService queryInsightsService = mock(QueryInsightsService.class); - private final TopQueriesService topQueriesService = mock(TopQueriesService.class); - private final ThreadPool threadPool = new TestThreadPool("QueryInsightsThreadPool"); - private ClusterService clusterService; - - @Before - public void setup() { - Settings.Builder settingsBuilder = Settings.builder(); - Settings settings = settingsBuilder.build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - QueryInsightsTestUtils.registerAllQueryInsightsSettings(clusterSettings); - ClusterState state = ClusterStateCreationUtils.stateWithActivePrimary("test", true, 1 + randomInt(3), randomInt(2)); - clusterService = ClusterServiceUtils.createClusterService(threadPool, state.getNodes().getLocalNode(), clusterSettings); - ClusterServiceUtils.setState(clusterService, state); - when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(true); - when(queryInsightsService.getTopQueriesService(MetricType.LATENCY)).thenReturn(topQueriesService); - - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - threadPool.getThreadContext().setHeaders(new Tuple<>(Collections.singletonMap(Task.X_OPAQUE_ID, "userLabel"), new HashMap<>())); - } - - @Override - public void tearDown() throws Exception { - super.tearDown(); - IOUtils.close(clusterService); - ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); - } - - @SuppressWarnings("unchecked") - public void testOnRequestEnd() throws InterruptedException { - Long timestamp = System.currentTimeMillis() - 100L; - SearchType searchType = SearchType.QUERY_THEN_FETCH; - - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1").userValueTypeHint(ValueType.STRING).field("type.keyword")); - searchSourceBuilder.size(0); - SearchTask task = new SearchTask( - 0, - "n/a", - "n/a", - () -> "test", - TaskId.EMPTY_TASK_ID, - Collections.singletonMap(Task.X_OPAQUE_ID, "userLabel") - ); - - String[] indices = new String[] { "index-1", "index-2" }; - - Map phaseLatencyMap = new HashMap<>(); - phaseLatencyMap.put("expand", 0L); - phaseLatencyMap.put("query", 20L); - phaseLatencyMap.put("fetch", 1L); - - int numberOfShards = 10; - - QueryInsightsListener queryInsightsListener = new QueryInsightsListener(clusterService, queryInsightsService); - - when(searchRequest.getOrCreateAbsoluteStartMillis()).thenReturn(timestamp); - when(searchRequest.searchType()).thenReturn(searchType); - when(searchRequest.source()).thenReturn(searchSourceBuilder); - when(searchRequest.indices()).thenReturn(indices); - when(searchRequestContext.phaseTookMap()).thenReturn(phaseLatencyMap); - when(searchPhaseContext.getRequest()).thenReturn(searchRequest); - when(searchPhaseContext.getNumShards()).thenReturn(numberOfShards); - when(searchPhaseContext.getTask()).thenReturn(task); - ArgumentCaptor captor = ArgumentCaptor.forClass(SearchQueryRecord.class); - - queryInsightsListener.onRequestEnd(searchPhaseContext, searchRequestContext); - - verify(queryInsightsService, times(1)).addRecord(captor.capture()); - SearchQueryRecord generatedRecord = captor.getValue(); - assertEquals(timestamp.longValue(), generatedRecord.getTimestamp()); - assertEquals(numberOfShards, generatedRecord.getAttributes().get(Attribute.TOTAL_SHARDS)); - assertEquals(searchType.toString().toLowerCase(Locale.ROOT), generatedRecord.getAttributes().get(Attribute.SEARCH_TYPE)); - assertEquals(searchSourceBuilder.toString(), generatedRecord.getAttributes().get(Attribute.SOURCE)); - Map labels = (Map) generatedRecord.getAttributes().get(Attribute.LABELS); - assertEquals("userLabel", labels.get(Task.X_OPAQUE_ID)); - } - - public void testConcurrentOnRequestEnd() throws InterruptedException { - Long timestamp = System.currentTimeMillis() - 100L; - SearchType searchType = SearchType.QUERY_THEN_FETCH; - - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1").userValueTypeHint(ValueType.STRING).field("type.keyword")); - searchSourceBuilder.size(0); - SearchTask task = new SearchTask( - 0, - "n/a", - "n/a", - () -> "test", - TaskId.EMPTY_TASK_ID, - Collections.singletonMap(Task.X_OPAQUE_ID, "userLabel") - ); - - String[] indices = new String[] { "index-1", "index-2" }; - - Map phaseLatencyMap = new HashMap<>(); - phaseLatencyMap.put("expand", 0L); - phaseLatencyMap.put("query", 20L); - phaseLatencyMap.put("fetch", 1L); - - int numberOfShards = 10; - - final List searchListenersList = new ArrayList<>(); - - when(searchRequest.getOrCreateAbsoluteStartMillis()).thenReturn(timestamp); - when(searchRequest.searchType()).thenReturn(searchType); - when(searchRequest.source()).thenReturn(searchSourceBuilder); - when(searchRequest.indices()).thenReturn(indices); - when(searchRequestContext.phaseTookMap()).thenReturn(phaseLatencyMap); - when(searchPhaseContext.getRequest()).thenReturn(searchRequest); - when(searchPhaseContext.getNumShards()).thenReturn(numberOfShards); - when(searchPhaseContext.getTask()).thenReturn(task); - - int numRequests = 50; - Thread[] threads = new Thread[numRequests]; - Phaser phaser = new Phaser(numRequests + 1); - CountDownLatch countDownLatch = new CountDownLatch(numRequests); - - for (int i = 0; i < numRequests; i++) { - searchListenersList.add(new QueryInsightsListener(clusterService, queryInsightsService)); - } - - for (int i = 0; i < numRequests; i++) { - int finalI = i; - threads[i] = new Thread(() -> { - phaser.arriveAndAwaitAdvance(); - QueryInsightsListener thisListener = searchListenersList.get(finalI); - thisListener.onRequestEnd(searchPhaseContext, searchRequestContext); - countDownLatch.countDown(); - }); - threads[i].start(); - } - phaser.arriveAndAwaitAdvance(); - countDownLatch.await(); - - verify(queryInsightsService, times(numRequests)).addRecord(any()); - } - - public void testSetEnabled() { - when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(true); - QueryInsightsListener queryInsightsListener = new QueryInsightsListener(clusterService, queryInsightsService); - queryInsightsListener.setEnableTopQueries(MetricType.LATENCY, true); - assertTrue(queryInsightsListener.isEnabled()); - - when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(false); - when(queryInsightsService.isCollectionEnabled(MetricType.CPU)).thenReturn(false); - when(queryInsightsService.isCollectionEnabled(MetricType.MEMORY)).thenReturn(false); - queryInsightsListener.setEnableTopQueries(MetricType.LATENCY, false); - assertFalse(queryInsightsListener.isEnabled()); - } -} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java deleted file mode 100644 index 75a5768f50681..0000000000000 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.core.service; - -import org.opensearch.client.Client; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.plugin.insights.QueryInsightsTestUtils; -import org.opensearch.plugin.insights.rules.model.MetricType; -import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; -import org.opensearch.plugin.insights.settings.QueryInsightsSettings; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.ThreadPool; -import org.junit.Before; - -import static org.mockito.Mockito.mock; - -/** - * Unit Tests for {@link QueryInsightsService}. - */ -public class QueryInsightsServiceTests extends OpenSearchTestCase { - private final ThreadPool threadPool = mock(ThreadPool.class); - private final Client client = mock(Client.class); - private QueryInsightsService queryInsightsService; - - @Before - public void setup() { - Settings.Builder settingsBuilder = Settings.builder(); - Settings settings = settingsBuilder.build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - QueryInsightsTestUtils.registerAllQueryInsightsSettings(clusterSettings); - queryInsightsService = new QueryInsightsService(clusterSettings, threadPool, client); - queryInsightsService.enableCollection(MetricType.LATENCY, true); - queryInsightsService.enableCollection(MetricType.CPU, true); - queryInsightsService.enableCollection(MetricType.MEMORY, true); - } - - public void testAddRecordToLimitAndDrain() { - SearchQueryRecord record = QueryInsightsTestUtils.generateQueryInsightRecords(1, 1, System.currentTimeMillis(), 0).get(0); - for (int i = 0; i < QueryInsightsSettings.QUERY_RECORD_QUEUE_CAPACITY; i++) { - assertTrue(queryInsightsService.addRecord(record)); - } - // exceed capacity - assertFalse(queryInsightsService.addRecord(record)); - queryInsightsService.drainRecords(); - assertEquals( - QueryInsightsSettings.DEFAULT_TOP_N_SIZE, - queryInsightsService.getTopQueriesService(MetricType.LATENCY).getTopQueriesRecords(false).size() - ); - } - - public void testClose() { - try { - queryInsightsService.doClose(); - } catch (Exception e) { - fail("No exception expected when closing query insights service"); - } - } -} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java deleted file mode 100644 index 3efd4c86833cc..0000000000000 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.core.service; - -import org.opensearch.cluster.coordination.DeterministicTaskQueue; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.plugin.insights.QueryInsightsTestUtils; -import org.opensearch.plugin.insights.core.exporter.QueryInsightsExporterFactory; -import org.opensearch.plugin.insights.rules.model.MetricType; -import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; -import org.opensearch.plugin.insights.settings.QueryInsightsSettings; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.ThreadPool; -import org.junit.Before; - -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static org.mockito.Mockito.mock; - -/** - * Unit Tests for {@link QueryInsightsService}. - */ -public class TopQueriesServiceTests extends OpenSearchTestCase { - private TopQueriesService topQueriesService; - private final ThreadPool threadPool = mock(ThreadPool.class); - private final QueryInsightsExporterFactory queryInsightsExporterFactory = mock(QueryInsightsExporterFactory.class); - - @Before - public void setup() { - topQueriesService = new TopQueriesService(MetricType.LATENCY, threadPool, queryInsightsExporterFactory); - topQueriesService.setTopNSize(Integer.MAX_VALUE); - topQueriesService.setWindowSize(new TimeValue(Long.MAX_VALUE)); - topQueriesService.setEnabled(true); - } - - public void testIngestQueryDataWithLargeWindow() { - final List records = QueryInsightsTestUtils.generateQueryInsightRecords(10); - topQueriesService.consumeRecords(records); - assertTrue( - QueryInsightsTestUtils.checkRecordsEqualsWithoutOrder( - topQueriesService.getTopQueriesRecords(false), - records, - MetricType.LATENCY - ) - ); - } - - public void testRollingWindows() { - List records; - // Create 5 records at Now - 10 minutes to make sure they belong to the last window - records = QueryInsightsTestUtils.generateQueryInsightRecords(5, 5, System.currentTimeMillis() - 1000 * 60 * 10, 0); - topQueriesService.setWindowSize(TimeValue.timeValueMinutes(10)); - topQueriesService.consumeRecords(records); - assertEquals(0, topQueriesService.getTopQueriesRecords(true).size()); - - // Create 10 records at now + 1 minute, to make sure they belong to the current window - records = QueryInsightsTestUtils.generateQueryInsightRecords(10, 10, System.currentTimeMillis() + 1000 * 60, 0); - topQueriesService.setWindowSize(TimeValue.timeValueMinutes(10)); - topQueriesService.consumeRecords(records); - assertEquals(10, topQueriesService.getTopQueriesRecords(true).size()); - } - - public void testSmallNSize() { - final List records = QueryInsightsTestUtils.generateQueryInsightRecords(10); - topQueriesService.setTopNSize(1); - topQueriesService.consumeRecords(records); - assertEquals(1, topQueriesService.getTopQueriesRecords(false).size()); - } - - public void testValidateTopNSize() { - assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateTopNSize(QueryInsightsSettings.MAX_N_SIZE + 1); }); - } - - public void testGetTopQueriesWhenNotEnabled() { - topQueriesService.setEnabled(false); - assertThrows(IllegalArgumentException.class, () -> { topQueriesService.getTopQueriesRecords(false); }); - } - - public void testValidateWindowSize() { - assertThrows(IllegalArgumentException.class, () -> { - topQueriesService.validateWindowSize(new TimeValue(QueryInsightsSettings.MAX_WINDOW_SIZE.getSeconds() + 1, TimeUnit.SECONDS)); - }); - assertThrows(IllegalArgumentException.class, () -> { - topQueriesService.validateWindowSize(new TimeValue(QueryInsightsSettings.MIN_WINDOW_SIZE.getSeconds() - 1, TimeUnit.SECONDS)); - }); - assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateWindowSize(new TimeValue(2, TimeUnit.DAYS)); }); - assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateWindowSize(new TimeValue(7, TimeUnit.MINUTES)); }); - } - - private static void runUntilTimeoutOrFinish(DeterministicTaskQueue deterministicTaskQueue, long duration) { - final long endTime = deterministicTaskQueue.getCurrentTimeMillis() + duration; - while (deterministicTaskQueue.getCurrentTimeMillis() < endTime - && (deterministicTaskQueue.hasRunnableTasks() || deterministicTaskQueue.hasDeferredTasks())) { - if (deterministicTaskQueue.hasDeferredTasks() && randomBoolean()) { - deterministicTaskQueue.advanceTime(); - } else if (deterministicTaskQueue.hasRunnableTasks()) { - deterministicTaskQueue.runRandomTask(); - } - } - } -} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequestTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequestTests.java deleted file mode 100644 index 619fd4b33a3dc..0000000000000 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequestTests.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.action.top_queries; - -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.plugin.insights.rules.model.MetricType; -import org.opensearch.test.OpenSearchTestCase; - -/** - * Granular tests for the {@link TopQueriesRequest} class. - */ -public class TopQueriesRequestTests extends OpenSearchTestCase { - - /** - * Check that we can set the metric type - */ - public void testSetMetricType() throws Exception { - TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY, randomAlphaOfLength(5)); - TopQueriesRequest deserializedRequest = roundTripRequest(request); - assertEquals(request.getMetricType(), deserializedRequest.getMetricType()); - } - - /** - * Serialize and deserialize a request. - * @param request A request to serialize. - * @return The deserialized, "round-tripped" request. - */ - private static TopQueriesRequest roundTripRequest(TopQueriesRequest request) throws Exception { - try (BytesStreamOutput out = new BytesStreamOutput()) { - request.writeTo(out); - try (StreamInput in = out.bytes().streamInput()) { - return new TopQueriesRequest(in); - } - } - } -} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponseTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponseTests.java deleted file mode 100644 index eeee50d3da703..0000000000000 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponseTests.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.action.top_queries; - -import org.opensearch.cluster.ClusterName; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.plugin.insights.QueryInsightsTestUtils; -import org.opensearch.plugin.insights.rules.model.MetricType; -import org.opensearch.test.OpenSearchTestCase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * Granular tests for the {@link TopQueriesResponse} class. - */ -public class TopQueriesResponseTests extends OpenSearchTestCase { - - /** - * Check serialization and deserialization - */ - public void testSerialize() throws Exception { - TopQueries topQueries = QueryInsightsTestUtils.createRandomTopQueries(); - ClusterName clusterName = new ClusterName("test-cluster"); - TopQueriesResponse response = new TopQueriesResponse(clusterName, List.of(topQueries), new ArrayList<>(), 10, MetricType.LATENCY); - TopQueriesResponse deserializedResponse = roundTripResponse(response); - assertEquals(response.toString(), deserializedResponse.toString()); - } - - public void testToXContent() throws IOException { - char[] expectedXcontent = - "{\"top_queries\":[{\"timestamp\":1706574180000,\"node_id\":\"node_for_top_queries_test\",\"search_type\":\"query_then_fetch\",\"latency\":1}]}" - .toCharArray(); - TopQueries topQueries = QueryInsightsTestUtils.createFixedTopQueries(); - ClusterName clusterName = new ClusterName("test-cluster"); - TopQueriesResponse response = new TopQueriesResponse(clusterName, List.of(topQueries), new ArrayList<>(), 10, MetricType.LATENCY); - XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); - char[] xContent = BytesReference.bytes(response.toXContent(builder, ToXContent.EMPTY_PARAMS)).utf8ToString().toCharArray(); - Arrays.sort(expectedXcontent); - Arrays.sort(xContent); - - assertEquals(Arrays.hashCode(expectedXcontent), Arrays.hashCode(xContent)); - } - - /** - * Serialize and deserialize a TopQueriesResponse. - * @param response A response to serialize. - * @return The deserialized, "round-tripped" response. - */ - private static TopQueriesResponse roundTripResponse(TopQueriesResponse response) throws Exception { - try (BytesStreamOutput out = new BytesStreamOutput()) { - response.writeTo(out); - try (StreamInput in = out.bytes().streamInput()) { - return new TopQueriesResponse(in); - } - } - } -} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesTests.java deleted file mode 100644 index 7db08b53ad1df..0000000000000 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesTests.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.action.top_queries; - -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.plugin.insights.QueryInsightsTestUtils; -import org.opensearch.test.OpenSearchTestCase; - -import java.io.IOException; - -/** - * Tests for {@link TopQueries}. - */ -public class TopQueriesTests extends OpenSearchTestCase { - - public void testTopQueries() throws IOException { - TopQueries topQueries = QueryInsightsTestUtils.createRandomTopQueries(); - try (BytesStreamOutput out = new BytesStreamOutput()) { - topQueries.writeTo(out); - try (StreamInput in = out.bytes().streamInput()) { - TopQueries readTopQueries = new TopQueries(in); - assertTrue( - QueryInsightsTestUtils.checkRecordsEquals(topQueries.getTopQueriesRecord(), readTopQueries.getTopQueriesRecord()) - ); - } - } - } -} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java deleted file mode 100644 index ad45b53ec5363..0000000000000 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.model; - -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.plugin.insights.QueryInsightsTestUtils; -import org.opensearch.test.OpenSearchTestCase; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -/** - * Granular tests for the {@link SearchQueryRecord} class. - */ -public class SearchQueryRecordTests extends OpenSearchTestCase { - - /** - * Check that if the serialization, deserialization and equals functions are working as expected - */ - public void testSerializationAndEquals() throws Exception { - List records = QueryInsightsTestUtils.generateQueryInsightRecords(10); - List copiedRecords = new ArrayList<>(); - for (SearchQueryRecord record : records) { - copiedRecords.add(roundTripRecord(record)); - } - assertTrue(QueryInsightsTestUtils.checkRecordsEquals(records, copiedRecords)); - - } - - public void testAllMetricTypes() { - Set allMetrics = MetricType.allMetricTypes(); - Set expected = new HashSet<>(Arrays.asList(MetricType.LATENCY, MetricType.CPU, MetricType.MEMORY)); - assertEquals(expected, allMetrics); - } - - public void testCompare() { - SearchQueryRecord record1 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); - SearchQueryRecord record2 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); - assertEquals(0, SearchQueryRecord.compare(record1, record2, MetricType.LATENCY)); - } - - public void testEqual() { - SearchQueryRecord record1 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); - SearchQueryRecord record2 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); - assertEquals(record1, record2); - } - - /** - * Serialize and deserialize a SearchQueryRecord. - * @param record A SearchQueryRecord to serialize. - * @return The deserialized, "round-tripped" record. - */ - private static SearchQueryRecord roundTripRecord(SearchQueryRecord record) throws Exception { - try (BytesStreamOutput out = new BytesStreamOutput()) { - record.writeTo(out); - try (StreamInput in = out.bytes().streamInput()) { - return new SearchQueryRecord(in); - } - } - } -} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesActionTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesActionTests.java deleted file mode 100644 index ac19fa2a7348f..0000000000000 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesActionTests.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.resthandler.top_queries; - -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; -import org.opensearch.rest.RestHandler; -import org.opensearch.rest.RestRequest; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.rest.FakeRestRequest; - -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; - -import static org.opensearch.plugin.insights.rules.resthandler.top_queries.RestTopQueriesAction.ALLOWED_METRICS; - -public class RestTopQueriesActionTests extends OpenSearchTestCase { - - public void testEmptyNodeIdsValidType() { - Map params = new HashMap<>(); - params.put("type", randomFrom(ALLOWED_METRICS)); - RestRequest restRequest = buildRestRequest(params); - TopQueriesRequest actual = RestTopQueriesAction.prepareRequest(restRequest); - assertEquals(0, actual.nodesIds().length); - } - - public void testNodeIdsValid() { - Map params = new HashMap<>(); - params.put("type", randomFrom(ALLOWED_METRICS)); - String[] nodes = randomArray(1, 10, String[]::new, () -> randomAlphaOfLengthBetween(5, 10)); - params.put("nodeId", String.join(",", nodes)); - - RestRequest restRequest = buildRestRequest(params); - TopQueriesRequest actual = RestTopQueriesAction.prepareRequest(restRequest); - assertArrayEquals(nodes, actual.nodesIds()); - } - - public void testInValidType() { - Map params = new HashMap<>(); - params.put("type", randomAlphaOfLengthBetween(5, 10).toUpperCase(Locale.ROOT)); - - RestRequest restRequest = buildRestRequest(params); - Exception exception = assertThrows(IllegalArgumentException.class, () -> { RestTopQueriesAction.prepareRequest(restRequest); }); - assertEquals( - String.format(Locale.ROOT, "request [/_insights/top_queries] contains invalid metric type [%s]", params.get("type")), - exception.getMessage() - ); - } - - public void testGetRoutes() { - RestTopQueriesAction action = new RestTopQueriesAction(); - List routes = action.routes(); - assertEquals(2, routes.size()); - assertEquals("query_insights_top_queries_action", action.getName()); - } - - private FakeRestRequest buildRestRequest(Map params) { - return new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) - .withPath("/_insights/top_queries") - .withParams(params) - .build(); - } -} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java deleted file mode 100644 index d05cf7b6a636f..0000000000000 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.insights.rules.transport.top_queries; - -import org.opensearch.action.support.ActionFilters; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.plugin.insights.core.service.QueryInsightsService; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; -import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; -import org.opensearch.plugin.insights.rules.model.MetricType; -import org.opensearch.plugin.insights.settings.QueryInsightsSettings; -import org.opensearch.test.ClusterServiceUtils; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportService; -import org.junit.Before; - -import java.util.List; - -import static org.mockito.Mockito.mock; - -public class TransportTopQueriesActionTests extends OpenSearchTestCase { - - private final ThreadPool threadPool = mock(ThreadPool.class); - - private final Settings.Builder settingsBuilder = Settings.builder(); - private final Settings settings = settingsBuilder.build(); - private final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - private final ClusterService clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, threadPool); - private final TransportService transportService = mock(TransportService.class); - private final QueryInsightsService topQueriesByLatencyService = mock(QueryInsightsService.class); - private final ActionFilters actionFilters = mock(ActionFilters.class); - private final TransportTopQueriesAction transportTopQueriesAction = new TransportTopQueriesAction( - threadPool, - clusterService, - transportService, - topQueriesByLatencyService, - actionFilters - ); - private final DummyParentAction dummyParentAction = new DummyParentAction( - threadPool, - clusterService, - transportService, - topQueriesByLatencyService, - actionFilters - ); - - class DummyParentAction extends TransportTopQueriesAction { - public DummyParentAction( - ThreadPool threadPool, - ClusterService clusterService, - TransportService transportService, - QueryInsightsService topQueriesByLatencyService, - ActionFilters actionFilters - ) { - super(threadPool, clusterService, transportService, topQueriesByLatencyService, actionFilters); - } - - public TopQueriesResponse createNewResponse() { - TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); - return newResponse(request, List.of(), List.of()); - } - } - - @Before - public void setup() { - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); - } - - public void testNewResponse() { - TopQueriesResponse response = dummyParentAction.createNewResponse(); - assertNotNull(response); - } - -} diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index f3aa64316b667..6844311927db0 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,11 +44,11 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.49.1' + api 'com.azure:azure-core:1.51.0' api 'com.azure:azure-json:1.1.0' - api 'com.azure:azure-xml:1.0.0' - api 'com.azure:azure-storage-common:12.21.2' - api 'com.azure:azure-core-http-netty:1.15.1' + api 'com.azure:azure-xml:1.1.0' + api 'com.azure:azure-storage-common:12.25.1' + api 'com.azure:azure-core-http-netty:1.15.3' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" @@ -61,15 +61,15 @@ dependencies { // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' api "net.java.dev.jna:jna-platform:${versions.jna}" - api 'com.microsoft.azure:msal4j:1.15.1' + api 'com.microsoft.azure:msal4j:1.16.2' api 'com.nimbusds:oauth2-oidc-sdk:11.9.1' api 'com.nimbusds:nimbus-jose-jwt:9.40' api 'com.nimbusds:content-type:2.3' api 'com.nimbusds:lang-tag:1.7' // Both msal4j:1.14.3 and oauth2-oidc-sdk:11.9.1 has compile dependency on different versions of json-smart, // selected the higher version which is 2.5.0 - api 'net.minidev:json-smart:2.5.0' - api 'net.minidev:accessors-smart:2.5.0' + api 'net.minidev:json-smart:2.5.1' + api 'net.minidev:accessors-smart:2.5.1' api "org.ow2.asm:asm:${versions.asm}" // End of transitive dependencies for azure-identity api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" diff --git a/plugins/repository-azure/licenses/accessors-smart-2.5.0.jar.sha1 b/plugins/repository-azure/licenses/accessors-smart-2.5.0.jar.sha1 deleted file mode 100644 index 1578c94fcdc7b..0000000000000 --- a/plugins/repository-azure/licenses/accessors-smart-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aca011492dfe9c26f4e0659028a4fe0970829dd8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/accessors-smart-2.5.1.jar.sha1 b/plugins/repository-azure/licenses/accessors-smart-2.5.1.jar.sha1 new file mode 100644 index 0000000000000..8f7452437323d --- /dev/null +++ b/plugins/repository-azure/licenses/accessors-smart-2.5.1.jar.sha1 @@ -0,0 +1 @@ +19b820261eb2e7de7d5bde11d1c06e4501dd7e5f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 deleted file mode 100644 index d487c08c26e94..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7c44282eaa0f5a3be4b920d6a057509adfe8674 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 new file mode 100644 index 0000000000000..7200f59af2f9a --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 @@ -0,0 +1 @@ +ff5d0aedf75ca45ec0ace24673f790d2f7a57096 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1 deleted file mode 100644 index 3a0747a0daacb..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -036f7466a521aa99c79a491a9cf20444667df78b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.3.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.3.jar.sha1 new file mode 100644 index 0000000000000..3cea52ba67ce5 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.3.jar.sha1 @@ -0,0 +1 @@ +03b5bd5f5c16eea71f130119dbfb1fe5239f806a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.21.2.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.21.2.jar.sha1 deleted file mode 100644 index b3c73774764df..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-common-12.21.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d2676d4fc40a501bd5d0437b8d2bfb9926022bea \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.25.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.25.1.jar.sha1 new file mode 100644 index 0000000000000..822a60d81ca27 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-common-12.25.1.jar.sha1 @@ -0,0 +1 @@ +96e2df76ce9a8fa084ae289bb59295d565f2b8d5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1 b/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1 deleted file mode 100644 index 798ec5d95c6ac..0000000000000 --- a/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba584703bd47e9e789343ee3332f0f5a64f7f187 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-xml-1.1.0.jar.sha1 b/plugins/repository-azure/licenses/azure-xml-1.1.0.jar.sha1 new file mode 100644 index 0000000000000..4f9cfcac02f6e --- /dev/null +++ b/plugins/repository-azure/licenses/azure-xml-1.1.0.jar.sha1 @@ -0,0 +1 @@ +8218a00c07f9f66d5dc7ae2ba613da6890867497 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 deleted file mode 100644 index 3915ab2616beb..0000000000000 --- a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e6a168dba62aa63743b9e2b83f4e0f0dfdc143d3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f9c31c168926d --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.2.jar.sha1 @@ -0,0 +1 @@ +ad58f5bd089e743ac6e5999b2d1e3cf8515cea9a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 deleted file mode 100644 index db26ebbf738f7..0000000000000 --- a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0969b0c3cb8c75d759e9a6c585c44c9b9f3a4f75 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..a61bf643d69e6 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.2.jar.sha1 @@ -0,0 +1 @@ +267b85e9ba2892a37be6d80aa9ca1438a0d8c210 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 deleted file mode 100644 index bb8ecfe34d295..0000000000000 --- a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f77e7bf0e64dfcf53bfdcf2764ad7ab92b78a4da \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..d9d7975146c22 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +c2978b818ef2f2b2738b387c143624eab611d917 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/json-smart-2.5.0.jar.sha1 b/plugins/repository-azure/licenses/json-smart-2.5.0.jar.sha1 deleted file mode 100644 index 3ec055efa1255..0000000000000 --- a/plugins/repository-azure/licenses/json-smart-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -57a64f421b472849c40e77d2e7cce3a141b41e99 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/json-smart-2.5.1.jar.sha1 b/plugins/repository-azure/licenses/json-smart-2.5.1.jar.sha1 new file mode 100644 index 0000000000000..fe23968afce1e --- /dev/null +++ b/plugins/repository-azure/licenses/json-smart-2.5.1.jar.sha1 @@ -0,0 +1 @@ +4c11d2808d009132dfbbf947ebf37de6bf266c8e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.15.1.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.15.1.jar.sha1 deleted file mode 100644 index 797f21d0d4995..0000000000000 --- a/plugins/repository-azure/licenses/msal4j-1.15.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd1daa94b81bd97153536b661c31295f99cbb8e7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 new file mode 100644 index 0000000000000..1363e5a0793d2 --- /dev/null +++ b/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 @@ -0,0 +1 @@ +b43ec4dd657f8ed5922bc0a8ccbe49000968bd15 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 deleted file mode 100644 index 5e3f819012811..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f988dbb527efb0e7cf7d444cc50b0fc3f5f380ec \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..a42a41b6387c8 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +06724b184ee870ecc4d8fc36931beeb3c387b0ee \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 deleted file mode 100644 index 06c86b8fda557..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..d4767d06b22bf --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +7fa28b510f0f16f4d5d7188b86bef59e048f62f9 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 deleted file mode 100644 index 226ee06d39d6c..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea52ef6617a9b69b0baaebb7f0b80373527f9607 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5291a16c10448 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +9aed7e78c467d06a47a45b5b27466380a6427e2f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 deleted file mode 100644 index dcc2b0c7ca923..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1e459c8630bb7c942b79a97e62dd728798de6a8c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..cf50574b87da0 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b23c87a85451b3b0e7c3e8e89698cea6831a8418 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 deleted file mode 100644 index b22ad6784809b..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ac6a3d96935129ba45ea768ad30e31cad0d8c4d \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..24e8177190e04 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +375872f1c16bb51aac016ff6ee4f5d28b1288d4d \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 0847ac3034db7..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8dad0e3104dc8 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b50ff619cdcdc48e748cba3405c9988529f28f60 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 deleted file mode 100644 index 2f4d023c88c80..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a5ef52a470a82d9313e2e1ad8ba064bdbd38948 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.22.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.22.jar.sha1 new file mode 100644 index 0000000000000..cc894568c5760 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.22.jar.sha1 @@ -0,0 +1 @@ +08356b59b29f86e7142c9daca0434653a64ae64b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 deleted file mode 100644 index 6c031e00e39c1..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d4ee98405a5856cf0c9d7c1a70f3f14631e3c46 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.22.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.22.jar.sha1 new file mode 100644 index 0000000000000..2402813f831ce --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.22.jar.sha1 @@ -0,0 +1 @@ +2faf64b3822b0512f15d72a325e2826eb8564413 \ No newline at end of file diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java index f39ed185d8b35..4f30247f0af08 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java @@ -141,6 +141,9 @@ public Void run() { // - https://github.com/Azure/azure-sdk-for-java/pull/25004 // - https://github.com/Azure/azure-sdk-for-java/pull/24374 Configuration.getGlobalConfiguration().put("AZURE_JACKSON_ADAPTER_USE_ACCESS_HELPER", "true"); + // See please: + // - https://github.com/Azure/azure-sdk-for-java/issues/37464 + Configuration.getGlobalConfiguration().put("AZURE_ENABLE_SHUTDOWN_HOOK_WITH_PRIVILEGE", "true"); } public AzureStorageService(Settings settings) { diff --git a/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy index e8fbe35ebab1d..eedcfd98da150 100644 --- a/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy @@ -38,6 +38,7 @@ grant { permission java.lang.RuntimePermission "accessDeclaredMembers"; permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; permission java.lang.RuntimePermission "setContextClassLoader"; + permission java.lang.RuntimePermission "shutdownHooks"; // azure client set Authenticator for proxy username/password permission java.net.NetPermission "setDefaultAuthenticator"; diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index 71ffd0fd959f1..970388498ee26 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -157,7 +157,7 @@ private BlobContainer createBlobContainer(final int maxRetries) { + "/"; clientSettings.put(ENDPOINT_SUFFIX_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint); clientSettings.put(MAX_RETRIES_SETTING.getConcreteSettingForNamespace(clientName).getKey(), maxRetries); - clientSettings.put(TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), TimeValue.timeValueMillis(2000)); + clientSettings.put(TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), TimeValue.timeValueMillis(5000)); final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString(ACCOUNT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), "account"); @@ -171,7 +171,7 @@ RequestRetryOptions createRetryPolicy(final AzureStorageSettings azureStorageSet return new RequestRetryOptions( RetryPolicyType.EXPONENTIAL, azureStorageSettings.getMaxRetries(), - 1, + 5, 10L, 100L, secondaryHost diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 63eb783649884..eeb5e2cd88317 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -66,17 +66,17 @@ dependencies { } api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api 'org.apache.avro:avro:1.11.3' + api 'org.apache.avro:avro:1.12.0' api 'com.google.code.gson:gson:2.11.0' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" - api 'commons-cli:commons-cli:1.8.0' + api 'commons-cli:commons-cli:1.9.0' api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.11.0' api "commons-io:commons-io:${versions.commonsio}" - api 'org.apache.commons:commons-lang3:3.14.0' + api 'org.apache.commons:commons-lang3:3.16.0' implementation 'com.google.re2j:re2j:1.7' api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" @@ -425,19 +425,6 @@ thirdPartyAudit { 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', 'org.apache.hadoop.shaded.org.xbill.DNS.spi.DNSJavaNameServiceDescriptor', - - 'org.apache.avro.reflect.FieldAccessUnsafe', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeBooleanField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeByteField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCachedField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCharField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCustomEncodedField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeDoubleField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeFloatField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeIntField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeLongField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeObjectField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeShortField', ) } diff --git a/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 deleted file mode 100644 index fb43ecbcf22c9..0000000000000 --- a/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02b463409b373bff9ece09f54a43d42da5cea55a \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/avro-1.12.0.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.12.0.jar.sha1 new file mode 100644 index 0000000000000..83f7bb3677159 --- /dev/null +++ b/plugins/repository-hdfs/licenses/avro-1.12.0.jar.sha1 @@ -0,0 +1 @@ +6e692a464b213f6df49f8e3e7fcf42df0dbb7639 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 deleted file mode 100644 index 65102052409ea..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -41a4bff12057eecb6daaf9c7f36c237815be3da1 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.9.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.9.0.jar.sha1 new file mode 100644 index 0000000000000..9a97a11dbe8d5 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-cli-1.9.0.jar.sha1 @@ -0,0 +1 @@ +e1cdfa8bf40ccbb7440b2d1232f9f45bb20a1844 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 deleted file mode 100644 index d783e07e40902..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1ed471194b02f2c6cb734a0cd6f6f107c673afae \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.16.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.16.0.jar.sha1 new file mode 100644 index 0000000000000..ef4f1c1fc2002 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-lang3-3.16.0.jar.sha1 @@ -0,0 +1 @@ +3eb54effe40946dfb06dc5cd6c7ce4116cd51ea4 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 deleted file mode 100644 index 076124a7d1f89..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8fba10bb4911517eb1bdcc05ef392499dda4d5ac \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.112.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..7c36b789e839c --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +d6b2e543749a86957777a46cf68aaa337cc558cb \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.17.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.17.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 deleted file mode 100644 index 6784ac6c3b64f..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5c26883046fed --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +bdc12df04bb6858890b8aa108060b5b365a26102 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 deleted file mode 100644 index 3d86194de9213..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1fd224fdd0b44 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +c87f2ec3d9a97bd2b793d16817abb2bab93a7fc3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 deleted file mode 100644 index 4ef1adb818300..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..22d35128c3ad5 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +81af1040bfa977f98dd0e1bd9639513ea862ca04 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 deleted file mode 100644 index 06c86b8fda557..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..d4767d06b22bf --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +7fa28b510f0f16f4d5d7188b86bef59e048f62f9 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 16cb1cce7f504..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..47af3100f0f2d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b2798069092a981a832b7510d0462ee9efb7a80e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 deleted file mode 100644 index 2f70f791f65ed..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8b30272861770 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +3d5e2d5bcc6baeeb8c13a230980c6132a778e036 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 deleted file mode 100644 index 621cbf58f3133..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1a094fa19a623 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +58a631d9d44c4ed7cc0dcc9cffa6641da9374d72 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 deleted file mode 100644 index ac96e7545ed58..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5fbfde0836e0c --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +77cd136dd3843f5e7cbcf68c824975d745c49ddb \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 deleted file mode 100644 index 97001777eadf5..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b97d32eb1489043e478deea99bd93ce487b82f6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..0196dacfe92ba --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +67e590356eb53c20aaabd67f61ae66f628e62e3d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 0847ac3034db7..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8dad0e3104dc8 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b50ff619cdcdc48e748cba3405c9988529f28f60 \ No newline at end of file diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 66d172e3dc7f3..872d928aa093f 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -86,7 +86,9 @@ thirdPartyAudit { 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', 'kotlin.io.path.PathsKt', 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', - 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener' + 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.StructuredConfigProperties' ) } diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.39.0.jar.sha1 deleted file mode 100644 index 415fe8f3d8aaa..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -396b89a66526bd5694ad3bef4604b876177e0b44 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..ead8fb235fa12 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 @@ -0,0 +1 @@ +ec5ad3b420c9fba4b340e85a3199fd0f2accd023 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.39.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.39.0-alpha.jar.sha1 deleted file mode 100644 index 9c3c9f43d153c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.39.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a1fd96155e1b58726300bbf8457630713035e51 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..b601a4fb5246f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 @@ -0,0 +1 @@ +fd387313cc37a6e93062e9a80a2526634d22cb19 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.39.0.jar.sha1 deleted file mode 100644 index 115d4ccb1f34b..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0601fb1c06f661afeffbc73a1dbe29797b2f13b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..74b7cb25cdfe5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 @@ -0,0 +1 @@ +3d7cf15ef425053e24e825160ca7b4ac08d721aa \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.39.0.jar.sha1 deleted file mode 100644 index a10b92995becd..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -570d71e39e36fe2caad142557bde0c11fcdb3b92 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..d8d8f75850cb6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +cf92f4c1b60c2359c12f6f323f6a2a623c333910 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1 deleted file mode 100644 index f43393104296a..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f5b528f8d6f8531836eabba698979516964b24ed \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..3e1212943f894 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 @@ -0,0 +1 @@ +8dee21440b811004ecc1c36c1cd44f9d3494546c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1 deleted file mode 100644 index 5adba2ba0f342..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04fc0e4983253ea58430c3d24b6b3c5c95f84dc9 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..21a29cc8445e5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 @@ -0,0 +1 @@ +d86e60b6d49e389ebe5797d42a7288a20d30c162 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.39.0.jar.sha1 deleted file mode 100644 index ea9c293f25025..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a2b8571e36b11c3153d31ec87ec69cc168af8036 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..ae522ac698aa8 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +aeba3075b8dfd97779edadc0a3711d999bb0e396 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1 deleted file mode 100644 index dcf23f16ac89f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a8947a2e28924ad9374e319150a23837926ca4b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..a741d0a167d60 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 @@ -0,0 +1 @@ +368d7905d6a0a313c63e3a91f895a3a08500519e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.39.0.jar.sha1 deleted file mode 100644 index f603af04d8012..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba9afdf3ef1ea51e42999fd68c959e3ceb219399 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..972e7de1c74be --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 @@ -0,0 +1 @@ +c740e8f7d0d914d6acd310ac53901bb8753c6e8d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.39.0.jar.sha1 deleted file mode 100644 index f9419f6ccfbee..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fb8168627bf0059445f61081eaa47c4ab787fc2e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..c56ca0b9e8169 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +b820861f85ba83db0ad896c47f723208d7473d5a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1 deleted file mode 100644 index 63269f239eacd..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b6b45155399bc9fa563945f3e3a77416d7165948 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..39db6cb73727f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 @@ -0,0 +1 @@ +f88ee292f5605c87dfe85c8d90131bce9f0b3b8e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1 deleted file mode 100644 index f18c8259c1adc..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -522d46926cc06a4c18829da7e4c4340bdf5673c3 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..6dcd496e033d3 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 @@ -0,0 +1 @@ +9d1200befb28e3e9f61073ac3de23cc55e509dc7 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1 deleted file mode 100644 index 03b81424f46d5..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0b72722a5bbea5f46319bf08b2caed5b8f987a92 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..161e400f87077 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 @@ -0,0 +1 @@ +d9bbc2e2e800317d72fbf3141ae8391e95fa6229 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1 deleted file mode 100644 index 7cf8e7e8ede28..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -76b3d4ca0a8f20b27c1590ceece54f0c7fb5857e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..e986b4b53388e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 @@ -0,0 +1 @@ +906d916bee46f60260c09314284b5948c54a0662 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 deleted file mode 100644 index 6784ac6c3b64f..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5c26883046fed --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +bdc12df04bb6858890b8aa108060b5b365a26102 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 deleted file mode 100644 index 3d86194de9213..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1fd224fdd0b44 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +c87f2ec3d9a97bd2b793d16817abb2bab93a7fc3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 deleted file mode 100644 index 4ef1adb818300..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..22d35128c3ad5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +81af1040bfa977f98dd0e1bd9639513ea862ca04 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 16cb1cce7f504..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..47af3100f0f2d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b2798069092a981a832b7510d0462ee9efb7a80e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 deleted file mode 100644 index 2f70f791f65ed..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8b30272861770 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +3d5e2d5bcc6baeeb8c13a230980c6132a778e036 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 deleted file mode 100644 index 621cbf58f3133..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1a094fa19a623 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +58a631d9d44c4ed7cc0dcc9cffa6641da9374d72 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 deleted file mode 100644 index ac96e7545ed58..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5fbfde0836e0c --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +77cd136dd3843f5e7cbcf68c824975d745c49ddb \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle index 1a94def3fdff1..089e57f062a9f 100644 --- a/plugins/transport-reactor-netty4/build.gradle +++ b/plugins/transport-reactor-netty4/build.gradle @@ -46,7 +46,7 @@ dependencies { api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" - testImplementation "io.projectreactor:reactor-test:${versions.reactor}" + javaRestTestImplementation "io.projectreactor:reactor-test:${versions.reactor}" testImplementation project(":modules:transport-netty4") } @@ -80,6 +80,10 @@ javaRestTest { systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' } +testClusters.javaRestTest { + setting 'http.type', 'reactor-netty4' +} + thirdPartyAudit { ignoreMissingClasses( 'com.aayushatharva.brotli4j.Brotli4jLoader', diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 deleted file mode 100644 index 6784ac6c3b64f..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5c26883046fed --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +bdc12df04bb6858890b8aa108060b5b365a26102 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 deleted file mode 100644 index 3d86194de9213..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1fd224fdd0b44 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +c87f2ec3d9a97bd2b793d16817abb2bab93a7fc3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 deleted file mode 100644 index 5e3f819012811..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f988dbb527efb0e7cf7d444cc50b0fc3f5f380ec \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..a42a41b6387c8 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +06724b184ee870ecc4d8fc36931beeb3c387b0ee \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 deleted file mode 100644 index 4ef1adb818300..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..22d35128c3ad5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +81af1040bfa977f98dd0e1bd9639513ea862ca04 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 deleted file mode 100644 index 06c86b8fda557..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..d4767d06b22bf --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +7fa28b510f0f16f4d5d7188b86bef59e048f62f9 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 16cb1cce7f504..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..47af3100f0f2d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b2798069092a981a832b7510d0462ee9efb7a80e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 deleted file mode 100644 index 2f70f791f65ed..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8b30272861770 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +3d5e2d5bcc6baeeb8c13a230980c6132a778e036 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 deleted file mode 100644 index 621cbf58f3133..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1a094fa19a623 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +58a631d9d44c4ed7cc0dcc9cffa6641da9374d72 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 deleted file mode 100644 index b22ad6784809b..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ac6a3d96935129ba45ea768ad30e31cad0d8c4d \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..24e8177190e04 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +375872f1c16bb51aac016ff6ee4f5d28b1288d4d \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 deleted file mode 100644 index ac96e7545ed58..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5fbfde0836e0c --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +77cd136dd3843f5e7cbcf68c824975d745c49ddb \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 0847ac3034db7..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8dad0e3104dc8 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b50ff619cdcdc48e748cba3405c9988529f28f60 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 deleted file mode 100644 index 2f4d023c88c80..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a5ef52a470a82d9313e2e1ad8ba064bdbd38948 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.22.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.22.jar.sha1 new file mode 100644 index 0000000000000..cc894568c5760 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.22.jar.sha1 @@ -0,0 +1 @@ +08356b59b29f86e7142c9daca0434653a64ae64b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 deleted file mode 100644 index 6c031e00e39c1..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d4ee98405a5856cf0c9d7c1a70f3f14631e3c46 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.22.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.22.jar.sha1 new file mode 100644 index 0000000000000..2402813f831ce --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.22.jar.sha1 @@ -0,0 +1 @@ +2faf64b3822b0512f15d72a325e2826eb8564413 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4BadRequestIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4BadRequestIT.java new file mode 100644 index 0000000000000..62834483b5e9b --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4BadRequestIT.java @@ -0,0 +1,115 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.rest; + +import org.opensearch.client.Request; +import org.opensearch.client.RequestOptions; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.opensearch.test.rest.yaml.ObjectPath; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.Map; + +import static org.opensearch.core.rest.RestStatus.REQUEST_URI_TOO_LONG; +import static org.hamcrest.Matchers.equalTo; + +public class ReactorNetty4BadRequestIT extends OpenSearchRestTestCase { + + public void testBadRequest() throws IOException { + final Response response = client().performRequest(new Request("GET", "/_nodes/settings")); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("nodes"); + int maxMaxInitialLineLength = Integer.MIN_VALUE; + final Setting httpMaxInitialLineLength = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; + final String key = httpMaxInitialLineLength.getKey().substring("http.".length()); + for (Map.Entry entry : map.entrySet()) { + @SuppressWarnings("unchecked") + final Map settings = (Map) ((Map) entry.getValue()).get("settings"); + final int maxIntialLineLength; + if (settings.containsKey("http")) { + @SuppressWarnings("unchecked") + final Map httpSettings = (Map) settings.get("http"); + if (httpSettings.containsKey(key)) { + maxIntialLineLength = ByteSizeValue.parseBytesSizeValue((String) httpSettings.get(key), key).bytesAsInt(); + } else { + maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt(); + } + } else { + maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt(); + } + maxMaxInitialLineLength = Math.max(maxMaxInitialLineLength, maxIntialLineLength); + } + + final String path = "/" + new String(new byte[maxMaxInitialLineLength], Charset.forName("UTF-8")).replace('\0', 'a'); + final ResponseException e = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request(randomFrom("GET", "POST", "PUT"), path)) + ); + // The reactor-netty implementation does not provide a hook to customize or intercept request decoder errors at the moment (see + // please https://github.com/reactor/reactor-netty/issues/3327). + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(REQUEST_URI_TOO_LONG.getStatus())); + } + + public void testInvalidParameterValue() throws IOException { + final Request request = new Request("GET", "/_cluster/settings"); + request.addParameter("pretty", "neither-true-nor-false"); + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + final Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(400)); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("error"); + assertThat(map.get("type"), equalTo("illegal_argument_exception")); + assertThat(map.get("reason"), equalTo("Failed to parse value [neither-true-nor-false] as only [true] or [false] are allowed.")); + } + + public void testInvalidHeaderValue() throws IOException { + final Request request = new Request("GET", "/_cluster/settings"); + final RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Content-Type", "\t"); + request.setOptions(options); + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + final Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(400)); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("error"); + assertThat(map.get("type"), equalTo("content_type_header_exception")); + assertThat(map.get("reason"), equalTo("java.lang.IllegalArgumentException: invalid Content-Type header []")); + } +} diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4HeadBodyIsEmptyIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4HeadBodyIsEmptyIT.java new file mode 100644 index 0000000000000..663eb9ef6e946 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4HeadBodyIsEmptyIT.java @@ -0,0 +1,204 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.rest; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.hamcrest.Matcher; + +import java.io.IOException; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.core.rest.RestStatus.NOT_FOUND; +import static org.opensearch.core.rest.RestStatus.OK; +import static org.hamcrest.Matchers.greaterThan; + +public class ReactorNetty4HeadBodyIsEmptyIT extends OpenSearchRestTestCase { + public void testHeadRoot() throws IOException { + headTestCase("/", emptyMap(), greaterThan(0)); + headTestCase("/", singletonMap("pretty", ""), greaterThan(0)); + headTestCase("/", singletonMap("pretty", "true"), greaterThan(0)); + } + + private void createTestDoc() throws IOException { + createTestDoc("test"); + } + + private void createTestDoc(final String indexName) throws IOException { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.field("test", "test"); + } + builder.endObject(); + Request request = new Request("PUT", "/" + indexName + "/_doc/" + "1"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + } + } + + public void testDocumentExists() throws IOException { + createTestDoc(); + headTestCase("/test/_doc/1", emptyMap(), greaterThan(0)); + headTestCase("/test/_doc/1", singletonMap("pretty", "true"), greaterThan(0)); + headTestCase("/test/_doc/2", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + + public void testIndexExists() throws IOException { + createTestDoc(); + headTestCase("/test", emptyMap(), greaterThan(0)); + headTestCase("/test", singletonMap("pretty", "true"), greaterThan(0)); + } + + public void testAliasExists() throws IOException { + createTestDoc(); + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startArray("actions"); + { + builder.startObject(); + { + builder.startObject("add"); + { + builder.field("index", "test"); + builder.field("alias", "test_alias"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + + Request request = new Request("POST", "/_aliases"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + headTestCase("/_alias/test_alias", emptyMap(), greaterThan(0)); + headTestCase("/test/_alias/test_alias", emptyMap(), greaterThan(0)); + } + } + + public void testAliasDoesNotExist() throws IOException { + createTestDoc(); + headTestCase("/_alias/test_alias", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + headTestCase("/test/_alias/test_alias", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + + public void testTemplateExists() throws IOException { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.array("index_patterns", "*"); + builder.startObject("settings"); + { + builder.field("number_of_replicas", 0); + } + builder.endObject(); + } + builder.endObject(); + + Request request = new Request("PUT", "/_template/template"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + headTestCase("/_template/template", emptyMap(), greaterThan(0)); + } + } + + public void testGetSourceAction() throws IOException { + createTestDoc(); + headTestCase("/test/_source/1", emptyMap(), greaterThan(0)); + headTestCase("/test/_source/2", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("mappings"); + { + builder.startObject("_source"); + { + builder.field("enabled", false); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + + Request request = new Request("PUT", "/test-no-source"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + createTestDoc("test-no-source"); + headTestCase("/test-no-source/_source/1", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + } + + public void testException() throws IOException { + /* + * This will throw an index not found exception which will be sent on the channel; previously when handling HEAD requests that would + * throw an exception, the content was swallowed and a content length header of zero was returned. Instead of swallowing the content + * we now let it rise up to the upstream channel so that it can compute the content length that would be returned. This test case is + * a test for this situation. + */ + headTestCase("/index-not-found-exception", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + + private void headTestCase(final String url, final Map params, final Matcher matcher) throws IOException { + headTestCase(url, params, OK.getStatus(), matcher); + } + + private void headTestCase( + final String url, + final Map params, + final int expectedStatusCode, + final Matcher matcher, + final String... expectedWarnings + ) throws IOException { + Request request = new Request("HEAD", url); + for (Map.Entry param : params.entrySet()) { + request.addParameter(param.getKey(), param.getValue()); + } + request.setOptions(expectWarnings(expectedWarnings)); + Response response = client().performRequest(request); + assertEquals(expectedStatusCode, response.getStatusLine().getStatusCode()); + assertThat(Integer.valueOf(response.getHeader("Content-Length")), matcher); + assertNull("HEAD requests shouldn't have a response body but " + url + " did", response.getEntity()); + } + +} diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java new file mode 100644 index 0000000000000..c564e289e3f88 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.client.StreamingRequest; +import org.opensearch.client.StreamingResponse; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.opensearch.test.rest.yaml.ObjectPath; +import org.junit.After; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import reactor.core.publisher.Flux; +import reactor.test.StepVerifier; +import reactor.test.scheduler.VirtualTimeScheduler; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.collection.IsEmptyCollection.empty; + +public class ReactorNetty4StreamingIT extends OpenSearchRestTestCase { + @After + @Override + public void tearDown() throws Exception { + final Request request = new Request("DELETE", "/test-streaming"); + request.addParameter("ignore_unavailable", "true"); + + final Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + super.tearDown(); + } + + public void testStreamingRequest() throws IOException { + final VirtualTimeScheduler scheduler = VirtualTimeScheduler.create(true); + + final Stream stream = IntStream.range(1, 6) + .mapToObj(id -> "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"" + id + "\" } }\n" + "{ \"name\": \"josh\" }\n"); + + final Duration delay = Duration.ofMillis(1); + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(delay, scheduler).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "true"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + scheduler.advanceTimeBy(delay); /* emit first element */ + + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"1\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"2\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"3\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"4\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"5\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectComplete() + .verify(); + + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(streamingResponse.getWarnings(), empty()); + + final Request request = new Request("GET", "/test-streaming/_count"); + final Response response = client().performRequest(request); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Integer count = objectPath.evaluate("count"); + assertThat(count, equalTo(5)); + } + + public void testStreamingBadRequest() throws IOException { + final Stream stream = Stream.of( + "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"1\" } }\n" + "{ \"name\": \"josh\" }\n" + ); + + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "not-supported-policy"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectErrorMatches( + ex -> ex instanceof ResponseException && ((ResponseException) ex).getResponse().getStatusLine().getStatusCode() == 400 + ) + .verify(Duration.ofSeconds(10)); + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(400)); + assertThat(streamingResponse.getWarnings(), empty()); + } + + public void testStreamingBadStream() throws IOException { + final VirtualTimeScheduler scheduler = VirtualTimeScheduler.create(true); + + final Stream stream = Stream.of( + "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"1\" } }\n" + "{ \"name\": \"josh\" }\n", + "{ \"name\": \"josh\" }\n" + ); + + final Duration delay = Duration.ofMillis(1); + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(delay, scheduler).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + scheduler.advanceTimeBy(delay); /* emit first element */ + + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"1\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"type\":\"illegal_argument_exception\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectComplete() + .verify(); + + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(streamingResponse.getWarnings(), empty()); + } +} diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingStressIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingStressIT.java new file mode 100644 index 0000000000000..a978af1b11db4 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingStressIT.java @@ -0,0 +1,95 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest; + +import org.apache.hc.core5.http.ConnectionClosedException; +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.client.StreamingRequest; +import org.opensearch.client.StreamingResponse; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.junit.After; + +import java.io.InterruptedIOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Stream; + +import reactor.core.publisher.Flux; +import reactor.test.subscriber.TestSubscriber; + +import static org.hamcrest.CoreMatchers.anyOf; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.collection.IsEmptyCollection.empty; + +public class ReactorNetty4StreamingStressIT extends OpenSearchRestTestCase { + @After + @Override + public void tearDown() throws Exception { + final Request request = new Request("DELETE", "/test-stress-streaming"); + request.addParameter("ignore_unavailable", "true"); + + final Response response = adminClient().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + super.tearDown(); + } + + public void testCloseClientStreamingRequest() throws Exception { + final AtomicInteger id = new AtomicInteger(0); + final Stream stream = Stream.generate( + () -> "{ \"index\": { \"_index\": \"test-stress-streaming\", \"_id\": \"" + + id.incrementAndGet() + + "\" } }\n" + + "{ \"name\": \"josh\" }\n" + ); + + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(Duration.ofMillis(500)).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "true"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + TestSubscriber subscriber = TestSubscriber.create(); + streamingResponse.getBody().subscribe(subscriber); + + final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + try { + // Await for subscriber to receive at least one chunk + assertBusy(() -> assertThat(subscriber.getReceivedOnNext(), not(empty()))); + + // Close client forceably + executor.schedule(() -> { + client().close(); + return null; + }, 2, TimeUnit.SECONDS); + + // Await for subscriber to terminate + subscriber.block(Duration.ofSeconds(10)); + assertThat( + subscriber.expectTerminalError(), + anyOf(instanceOf(InterruptedIOException.class), instanceOf(ConnectionClosedException.class)) + ); + } finally { + executor.shutdown(); + if (executor.awaitTermination(1, TimeUnit.SECONDS) == false) { + executor.shutdownNow(); + } + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java index 906bbfd072da8..7f4a8f6cdef02 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java @@ -44,6 +44,7 @@ import java.util.List; import java.util.Optional; +import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.channel.ChannelOption; import io.netty.channel.socket.nio.NioChannelOption; @@ -390,7 +391,9 @@ protected Publisher incomingRequest(HttpServerRequest request, HttpServerR response.chunkedTransfer(false); response.compression(true); r.headers().forEach(h -> response.addHeader(h.getKey(), h.getValue())); - return Mono.from(response.sendObject(r.content())); + + final ByteBuf content = r.content().copy(); + return Mono.from(response.sendObject(content)); }); } } diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java index 7df0b3c0c35fe..3dae2d57cf6a6 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java @@ -55,9 +55,14 @@ public void addCloseListener(ActionListener listener) { @Override public void sendResponse(HttpResponse response, ActionListener listener) { - emitter.next(createResponse(response)); - listener.onResponse(null); - emitter.complete(); + try { + emitter.next(createResponse(response)); + listener.onResponse(null); + emitter.complete(); + } catch (final Exception ex) { + emitter.error(ex); + listener.onFailure(ex); + } } @Override diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java index 56dadea0477c5..1aa03aa9967e2 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java @@ -101,6 +101,8 @@ public void receiveChunk(HttpChunk message) { lastChunkReceived = true; producer.complete(); } + } catch (final Exception ex) { + producer.error(ex); } finally { message.close(); } diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java index f34f54e561021..8ed6710c8a1e3 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java @@ -44,7 +44,7 @@ public void subscribe(Subscriber s) { } HttpChunk createChunk(HttpContent chunk, boolean last) { - return new ReactorNetty4HttpChunk(chunk.content().retain(), last); + return new ReactorNetty4HttpChunk(chunk.copy().content(), last); } StreamingHttpChannel httpChannel() { diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java index 616edccdfc396..6aaccc500072b 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java @@ -21,7 +21,11 @@ class ReactorNetty4StreamingResponseProducer implements StreamingHttpContentSend private volatile FluxSink emitter; ReactorNetty4StreamingResponseProducer() { - this.sender = Flux.create(emitter -> this.emitter = emitter); + this.sender = Flux.create(emitter -> register(emitter)); + } + + private void register(FluxSink emitter) { + this.emitter = emitter; } @Override diff --git a/plugins/query-insights/build.gradle b/plugins/workload-management/build.gradle similarity index 61% rename from plugins/query-insights/build.gradle rename to plugins/workload-management/build.gradle index eabbd395bd3bd..cb14d22ef149f 100644 --- a/plugins/query-insights/build.gradle +++ b/plugins/workload-management/build.gradle @@ -9,9 +9,12 @@ * GitHub history for details. */ +apply plugin: 'opensearch.yaml-rest-test' +apply plugin: 'opensearch.internal-cluster-test' + opensearchplugin { - description 'OpenSearch Query Insights Plugin.' - classname 'org.opensearch.plugin.insights.QueryInsightsPlugin' + description 'OpenSearch Workload Management Plugin.' + classname 'org.opensearch.plugin.wlm.WorkloadManagementPlugin' } dependencies { diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java new file mode 100644 index 0000000000000..64f510fa1db67 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm; + +import org.opensearch.action.ActionRequest; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.inject.Module; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.IndexScopedSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.plugin.wlm.action.CreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.GetQueryGroupAction; +import org.opensearch.plugin.wlm.action.TransportCreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.TransportDeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.TransportGetQueryGroupAction; +import org.opensearch.plugin.wlm.rest.RestCreateQueryGroupAction; +import org.opensearch.plugin.wlm.rest.RestDeleteQueryGroupAction; +import org.opensearch.plugin.wlm.rest.RestGetQueryGroupAction; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.rest.RestController; +import org.opensearch.rest.RestHandler; + +import java.util.Collection; +import java.util.List; +import java.util.function.Supplier; + +/** + * Plugin class for WorkloadManagement + */ +public class WorkloadManagementPlugin extends Plugin implements ActionPlugin { + + /** + * Default constructor + */ + public WorkloadManagementPlugin() {} + + @Override + public List> getActions() { + return List.of( + new ActionPlugin.ActionHandler<>(CreateQueryGroupAction.INSTANCE, TransportCreateQueryGroupAction.class), + new ActionPlugin.ActionHandler<>(GetQueryGroupAction.INSTANCE, TransportGetQueryGroupAction.class), + new ActionPlugin.ActionHandler<>(DeleteQueryGroupAction.INSTANCE, TransportDeleteQueryGroupAction.class) + ); + } + + @Override + public List getRestHandlers( + Settings settings, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster + ) { + return List.of(new RestCreateQueryGroupAction(), new RestGetQueryGroupAction(), new RestDeleteQueryGroupAction()); + } + + @Override + public List> getSettings() { + return List.of(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT); + } + + @Override + public Collection createGuiceModules() { + return List.of(new WorkloadManagementPluginModule()); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java new file mode 100644 index 0000000000000..b7c7805639eb2 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm; + +import org.opensearch.common.inject.AbstractModule; +import org.opensearch.common.inject.Singleton; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; + +/** + * Guice Module to manage WorkloadManagement related objects + */ +public class WorkloadManagementPluginModule extends AbstractModule { + + /** + * Constructor for WorkloadManagementPluginModule + */ + public WorkloadManagementPluginModule() {} + + @Override + protected void configure() { + // Bind QueryGroupPersistenceService as a singleton to ensure a single instance is used, + // preventing multiple throttling key registrations in the constructor. + bind(QueryGroupPersistenceService.class).in(Singleton.class); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupAction.java new file mode 100644 index 0000000000000..14cb8cfcd125a --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupAction.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionType; + +/** + * Transport action to create QueryGroup + * + * @opensearch.experimental + */ +public class CreateQueryGroupAction extends ActionType { + + /** + * An instance of CreateQueryGroupAction + */ + public static final CreateQueryGroupAction INSTANCE = new CreateQueryGroupAction(); + + /** + * Name for CreateQueryGroupAction + */ + public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_create"; + + /** + * Default constructor + */ + private CreateQueryGroupAction() { + super(NAME, CreateQueryGroupResponse::new); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java new file mode 100644 index 0000000000000..ff6422be36885 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.common.UUIDs; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.XContentParser; +import org.joda.time.Instant; + +import java.io.IOException; + +/** + * A request for create QueryGroup + * User input schema: + * { + * "name": "analytics", + * "resiliency_mode": "enforced", + * "resource_limits": { + * "cpu" : 0.4, + * "memory" : 0.2 + * } + * } + * + * @opensearch.experimental + */ +public class CreateQueryGroupRequest extends ActionRequest { + private final QueryGroup queryGroup; + + /** + * Constructor for CreateQueryGroupRequest + * @param queryGroup - A {@link QueryGroup} object + */ + public CreateQueryGroupRequest(QueryGroup queryGroup) { + this.queryGroup = queryGroup; + } + + /** + * Constructor for CreateQueryGroupRequest + * @param in - A {@link StreamInput} object + */ + public CreateQueryGroupRequest(StreamInput in) throws IOException { + super(in); + queryGroup = new QueryGroup(in); + } + + /** + * Generate a CreateQueryGroupRequest from XContent + * @param parser - A {@link XContentParser} object + */ + public static CreateQueryGroupRequest fromXContent(XContentParser parser) throws IOException { + QueryGroup.Builder builder = QueryGroup.Builder.fromXContent(parser); + return new CreateQueryGroupRequest(builder._id(UUIDs.randomBase64UUID()).updatedAt(Instant.now().getMillis()).build()); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + queryGroup.writeTo(out); + } + + /** + * QueryGroup getter + */ + public QueryGroup getQueryGroup() { + return queryGroup; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponse.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponse.java new file mode 100644 index 0000000000000..9a2a8178c0a29 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponse.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Response for the create API for QueryGroup + * + * @opensearch.experimental + */ +public class CreateQueryGroupResponse extends ActionResponse implements ToXContent, ToXContentObject { + private final QueryGroup queryGroup; + private final RestStatus restStatus; + + /** + * Constructor for CreateQueryGroupResponse + * @param queryGroup - The QueryGroup to be included in the response + * @param restStatus - The restStatus for the response + */ + public CreateQueryGroupResponse(final QueryGroup queryGroup, RestStatus restStatus) { + this.queryGroup = queryGroup; + this.restStatus = restStatus; + } + + /** + * Constructor for CreateQueryGroupResponse + * @param in - A {@link StreamInput} object + */ + public CreateQueryGroupResponse(StreamInput in) throws IOException { + queryGroup = new QueryGroup(in); + restStatus = RestStatus.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + queryGroup.writeTo(out); + RestStatus.writeTo(out, restStatus); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return queryGroup.toXContent(builder, params); + } + + /** + * queryGroup getter + */ + public QueryGroup getQueryGroup() { + return queryGroup; + } + + /** + * restStatus getter + */ + public RestStatus getRestStatus() { + return restStatus; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java new file mode 100644 index 0000000000000..c78952a2f89ad --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.master.AcknowledgedResponse; + +/** + * Transport action for delete QueryGroup + * + * @opensearch.experimental + */ +public class DeleteQueryGroupAction extends ActionType { + + /** + /** + * An instance of DeleteQueryGroupAction + */ + public static final DeleteQueryGroupAction INSTANCE = new DeleteQueryGroupAction(); + + /** + * Name for DeleteQueryGroupAction + */ + public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_delete"; + + /** + * Default constructor + */ + private DeleteQueryGroupAction() { + super(NAME, AcknowledgedResponse::new); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java new file mode 100644 index 0000000000000..e514943c2c7e9 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request for delete QueryGroup + * + * @opensearch.experimental + */ +public class DeleteQueryGroupRequest extends AcknowledgedRequest { + private final String name; + + /** + * Default constructor for DeleteQueryGroupRequest + * @param name - name for the QueryGroup to get + */ + public DeleteQueryGroupRequest(String name) { + this.name = name; + } + + /** + * Constructor for DeleteQueryGroupRequest + * @param in - A {@link StreamInput} object + */ + public DeleteQueryGroupRequest(StreamInput in) throws IOException { + super(in); + name = in.readOptionalString(); + } + + @Override + public ActionRequestValidationException validate() { + if (name == null) { + ActionRequestValidationException actionRequestValidationException = new ActionRequestValidationException(); + actionRequestValidationException.addValidationError("QueryGroup name is missing"); + return actionRequestValidationException; + } + return null; + } + + /** + * Name getter + */ + public String getName() { + return name; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(name); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupAction.java new file mode 100644 index 0000000000000..0200185580f7d --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupAction.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionType; + +/** + * Transport action to get QueryGroup + * + * @opensearch.experimental + */ +public class GetQueryGroupAction extends ActionType { + + /** + * An instance of GetQueryGroupAction + */ + public static final GetQueryGroupAction INSTANCE = new GetQueryGroupAction(); + + /** + * Name for GetQueryGroupAction + */ + public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_get"; + + /** + * Default constructor + */ + private GetQueryGroupAction() { + super(NAME, GetQueryGroupResponse::new); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequest.java new file mode 100644 index 0000000000000..0524c615a84e7 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequest.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request for get QueryGroup + * + * @opensearch.experimental + */ +public class GetQueryGroupRequest extends ClusterManagerNodeReadRequest { + final String name; + + /** + * Default constructor for GetQueryGroupRequest + * @param name - name for the QueryGroup to get + */ + public GetQueryGroupRequest(String name) { + this.name = name; + } + + /** + * Constructor for GetQueryGroupRequest + * @param in - A {@link StreamInput} object + */ + public GetQueryGroupRequest(StreamInput in) throws IOException { + super(in); + name = in.readOptionalString(); + } + + @Override + public ActionRequestValidationException validate() { + if (name != null) { + QueryGroup.validateName(name); + } + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(name); + } + + /** + * Name getter + */ + public String getName() { + return name; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponse.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponse.java new file mode 100644 index 0000000000000..547c501e6a28e --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponse.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collection; + +/** + * Response for the get API for QueryGroup + * + * @opensearch.experimental + */ +public class GetQueryGroupResponse extends ActionResponse implements ToXContent, ToXContentObject { + private final Collection queryGroups; + private final RestStatus restStatus; + + /** + * Constructor for GetQueryGroupResponse + * @param queryGroups - The QueryGroup list to be fetched + * @param restStatus - The rest status of the request + */ + public GetQueryGroupResponse(final Collection queryGroups, RestStatus restStatus) { + this.queryGroups = queryGroups; + this.restStatus = restStatus; + } + + /** + * Constructor for GetQueryGroupResponse + * @param in - A {@link StreamInput} object + */ + public GetQueryGroupResponse(StreamInput in) throws IOException { + this.queryGroups = in.readList(QueryGroup::new); + restStatus = RestStatus.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(queryGroups); + RestStatus.writeTo(out, restStatus); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray("query_groups"); + for (QueryGroup group : queryGroups) { + group.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + /** + * queryGroups getter + */ + public Collection getQueryGroups() { + return queryGroups; + } + + /** + * restStatus getter + */ + public RestStatus getRestStatus() { + return restStatus; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java new file mode 100644 index 0000000000000..190ff17261bb4 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +/** + * Transport action to create QueryGroup + * + * @opensearch.experimental + */ +public class TransportCreateQueryGroupAction extends HandledTransportAction { + + private final QueryGroupPersistenceService queryGroupPersistenceService; + + /** + * Constructor for TransportCreateQueryGroupAction + * + * @param actionName - action name + * @param transportService - a {@link TransportService} object + * @param actionFilters - a {@link ActionFilters} object + * @param queryGroupPersistenceService - a {@link QueryGroupPersistenceService} object + */ + @Inject + public TransportCreateQueryGroupAction( + String actionName, + TransportService transportService, + ActionFilters actionFilters, + QueryGroupPersistenceService queryGroupPersistenceService + ) { + super(CreateQueryGroupAction.NAME, transportService, actionFilters, CreateQueryGroupRequest::new); + this.queryGroupPersistenceService = queryGroupPersistenceService; + } + + @Override + protected void doExecute(Task task, CreateQueryGroupRequest request, ActionListener listener) { + queryGroupPersistenceService.persistInClusterStateMetadata(request.getQueryGroup(), listener); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java new file mode 100644 index 0000000000000..e4d3908d4a208 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for delete QueryGroup + * + * @opensearch.experimental + */ +public class TransportDeleteQueryGroupAction extends TransportClusterManagerNodeAction { + + private final QueryGroupPersistenceService queryGroupPersistenceService; + + /** + * Constructor for TransportDeleteQueryGroupAction + * + * @param clusterService - a {@link ClusterService} object + * @param transportService - a {@link TransportService} object + * @param actionFilters - a {@link ActionFilters} object + * @param threadPool - a {@link ThreadPool} object + * @param indexNameExpressionResolver - a {@link IndexNameExpressionResolver} object + * @param queryGroupPersistenceService - a {@link QueryGroupPersistenceService} object + */ + @Inject + public TransportDeleteQueryGroupAction( + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + ThreadPool threadPool, + IndexNameExpressionResolver indexNameExpressionResolver, + QueryGroupPersistenceService queryGroupPersistenceService + ) { + super( + DeleteQueryGroupAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + DeleteQueryGroupRequest::new, + indexNameExpressionResolver + ); + this.queryGroupPersistenceService = queryGroupPersistenceService; + } + + @Override + protected void clusterManagerOperation( + DeleteQueryGroupRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + queryGroupPersistenceService.deleteInClusterStateMetadata(request, listener); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse read(StreamInput in) throws IOException { + return new AcknowledgedResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(DeleteQueryGroupRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupAction.java new file mode 100644 index 0000000000000..51bb21b255511 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupAction.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.search.pipeline.SearchPipelineService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collection; + +/** + * Transport action to get QueryGroup + * + * @opensearch.experimental + */ +public class TransportGetQueryGroupAction extends TransportClusterManagerNodeReadAction { + private static final Logger logger = LogManager.getLogger(SearchPipelineService.class); + + /** + * Constructor for TransportGetQueryGroupAction + * + * @param clusterService - a {@link ClusterService} object + * @param transportService - a {@link TransportService} object + * @param actionFilters - a {@link ActionFilters} object + * @param threadPool - a {@link ThreadPool} object + * @param indexNameExpressionResolver - a {@link IndexNameExpressionResolver} object + */ + @Inject + public TransportGetQueryGroupAction( + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + ThreadPool threadPool, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + GetQueryGroupAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + GetQueryGroupRequest::new, + indexNameExpressionResolver, + true + ); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected GetQueryGroupResponse read(StreamInput in) throws IOException { + return new GetQueryGroupResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(GetQueryGroupRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + @Override + protected void clusterManagerOperation(GetQueryGroupRequest request, ClusterState state, ActionListener listener) + throws Exception { + final String name = request.getName(); + final Collection resultGroups = QueryGroupPersistenceService.getFromClusterStateMetadata(name, state); + + if (resultGroups.isEmpty() && name != null && !name.isEmpty()) { + logger.warn("No QueryGroup exists with the provided name: {}", name); + throw new ResourceNotFoundException("No QueryGroup exists with the provided name: " + name); + } + listener.onResponse(new GetQueryGroupResponse(resultGroups, RestStatus.OK)); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/package-info.java new file mode 100644 index 0000000000000..9921500df8a81 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package for the action classes of WorkloadManagementPlugin + */ +package org.opensearch.plugin.wlm.action; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/package-info.java new file mode 100644 index 0000000000000..84c99967b226b --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Base package for WorkloadManagementPlugin + */ +package org.opensearch.plugin.wlm; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateQueryGroupAction.java new file mode 100644 index 0000000000000..b0e0af4f9d17f --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateQueryGroupAction.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.plugin.wlm.action.CreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.CreateQueryGroupRequest; +import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.rest.action.RestResponseListener; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.rest.RestRequest.Method.POST; +import static org.opensearch.rest.RestRequest.Method.PUT; + +/** + * Rest action to create a QueryGroup + * + * @opensearch.experimental + */ +public class RestCreateQueryGroupAction extends BaseRestHandler { + + /** + * Constructor for RestCreateQueryGroupAction + */ + public RestCreateQueryGroupAction() {} + + @Override + public String getName() { + return "create_query_group"; + } + + /** + * The list of {@link Route}s that this RestHandler is responsible for handling. + */ + @Override + public List routes() { + return List.of(new Route(POST, "_wlm/query_group/"), new Route(PUT, "_wlm/query_group/")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + try (XContentParser parser = request.contentParser()) { + CreateQueryGroupRequest createQueryGroupRequest = CreateQueryGroupRequest.fromXContent(parser); + return channel -> client.execute(CreateQueryGroupAction.INSTANCE, createQueryGroupRequest, createQueryGroupResponse(channel)); + } + } + + private RestResponseListener createQueryGroupResponse(final RestChannel channel) { + return new RestResponseListener<>(channel) { + @Override + public RestResponse buildResponse(final CreateQueryGroupResponse response) throws Exception { + return new BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)); + } + }; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java new file mode 100644 index 0000000000000..8ad621cf8a1e4 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Rest action to delete a QueryGroup + * + * @opensearch.experimental + */ +public class RestDeleteQueryGroupAction extends BaseRestHandler { + + /** + * Constructor for RestDeleteQueryGroupAction + */ + public RestDeleteQueryGroupAction() {} + + @Override + public String getName() { + return "delete_query_group"; + } + + /** + * The list of {@link Route}s that this RestHandler is responsible for handling. + */ + @Override + public List routes() { + return List.of(new Route(DELETE, "_wlm/query_group/{name}")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + DeleteQueryGroupRequest deleteQueryGroupRequest = new DeleteQueryGroupRequest(request.param("name")); + deleteQueryGroupRequest.clusterManagerNodeTimeout( + request.paramAsTime("cluster_manager_timeout", deleteQueryGroupRequest.clusterManagerNodeTimeout()) + ); + deleteQueryGroupRequest.timeout(request.paramAsTime("timeout", deleteQueryGroupRequest.timeout())); + return channel -> client.execute(DeleteQueryGroupAction.INSTANCE, deleteQueryGroupRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestGetQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestGetQueryGroupAction.java new file mode 100644 index 0000000000000..c250bd2979e98 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestGetQueryGroupAction.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.plugin.wlm.action.GetQueryGroupAction; +import org.opensearch.plugin.wlm.action.GetQueryGroupRequest; +import org.opensearch.plugin.wlm.action.GetQueryGroupResponse; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.rest.action.RestResponseListener; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Rest action to get a QueryGroup0 + * + * @opensearch.experimental + */ +public class RestGetQueryGroupAction extends BaseRestHandler { + + /** + * Constructor for RestGetQueryGroupAction + */ + public RestGetQueryGroupAction() {} + + @Override + public String getName() { + return "get_query_group"; + } + + /** + * The list of {@link Route}s that this RestHandler is responsible for handling. + */ + @Override + public List routes() { + return List.of(new Route(GET, "_wlm/query_group/{name}"), new Route(GET, "_wlm/query_group/")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + final GetQueryGroupRequest getQueryGroupRequest = new GetQueryGroupRequest(request.param("name")); + return channel -> client.execute(GetQueryGroupAction.INSTANCE, getQueryGroupRequest, getQueryGroupResponse(channel)); + } + + private RestResponseListener getQueryGroupResponse(final RestChannel channel) { + return new RestResponseListener<>(channel) { + @Override + public RestResponse buildResponse(final GetQueryGroupResponse response) throws Exception { + return new BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)); + } + }; + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/package-info.java similarity index 68% rename from plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/package-info.java rename to plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/package-info.java index 5068f28234f6d..7d7cb9028fdb8 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/package-info.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/package-info.java @@ -7,6 +7,6 @@ */ /** - * Service Classes for Query Insights + * Package for the rest classes of WorkloadManagementPlugin */ -package org.opensearch.plugin.insights.core.service; +package org.opensearch.plugin.wlm.rest; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java new file mode 100644 index 0000000000000..ba5161a2c855e --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java @@ -0,0 +1,273 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.service; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.AckedClusterStateUpdateTask; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterManagerTaskThrottler; +import org.opensearch.cluster.service.ClusterManagerTaskThrottler.ThrottlingKey; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.search.ResourceType; + +import java.util.Collection; +import java.util.EnumMap; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * This class defines the functions for QueryGroup persistence + */ +public class QueryGroupPersistenceService { + static final String SOURCE = "query-group-persistence-service"; + private static final String CREATE_QUERY_GROUP_THROTTLING_KEY = "create-query-group"; + private static final String DELETE_QUERY_GROUP_THROTTLING_KEY = "delete-query-group"; + private static final Logger logger = LogManager.getLogger(QueryGroupPersistenceService.class); + /** + * max QueryGroup count setting name + */ + public static final String QUERY_GROUP_COUNT_SETTING_NAME = "node.query_group.max_count"; + /** + * default max queryGroup count on any node at any given point in time + */ + private static final int DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE = 100; + /** + * min queryGroup count on any node at any given point in time + */ + private static final int MIN_QUERY_GROUP_COUNT_VALUE = 1; + /** + * max QueryGroup count setting + */ + public static final Setting MAX_QUERY_GROUP_COUNT = Setting.intSetting( + QUERY_GROUP_COUNT_SETTING_NAME, + DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE, + 0, + QueryGroupPersistenceService::validateMaxQueryGroupCount, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private final ClusterService clusterService; + private volatile int maxQueryGroupCount; + final ThrottlingKey createQueryGroupThrottlingKey; + final ThrottlingKey deleteQueryGroupThrottlingKey; + + /** + * Constructor for QueryGroupPersistenceService + * + * @param clusterService {@link ClusterService} - The cluster service to be used by QueryGroupPersistenceService + * @param settings {@link Settings} - The settings to be used by QueryGroupPersistenceService + * @param clusterSettings {@link ClusterSettings} - The cluster settings to be used by QueryGroupPersistenceService + */ + @Inject + public QueryGroupPersistenceService( + final ClusterService clusterService, + final Settings settings, + final ClusterSettings clusterSettings + ) { + this.clusterService = clusterService; + this.createQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(CREATE_QUERY_GROUP_THROTTLING_KEY, true); + this.deleteQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(DELETE_QUERY_GROUP_THROTTLING_KEY, true); + setMaxQueryGroupCount(MAX_QUERY_GROUP_COUNT.get(settings)); + clusterSettings.addSettingsUpdateConsumer(MAX_QUERY_GROUP_COUNT, this::setMaxQueryGroupCount); + } + + /** + * Set maxQueryGroupCount to be newMaxQueryGroupCount + * @param newMaxQueryGroupCount - the max number of QueryGroup allowed + */ + public void setMaxQueryGroupCount(int newMaxQueryGroupCount) { + validateMaxQueryGroupCount(newMaxQueryGroupCount); + this.maxQueryGroupCount = newMaxQueryGroupCount; + } + + /** + * Validator for maxQueryGroupCount + * @param maxQueryGroupCount - the maxQueryGroupCount number to be verified + */ + private static void validateMaxQueryGroupCount(int maxQueryGroupCount) { + if (maxQueryGroupCount > DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE || maxQueryGroupCount < MIN_QUERY_GROUP_COUNT_VALUE) { + throw new IllegalArgumentException(QUERY_GROUP_COUNT_SETTING_NAME + " should be in range [1-100]."); + } + } + + /** + * Update cluster state to include the new QueryGroup + * @param queryGroup {@link QueryGroup} - the QueryGroup we're currently creating + * @param listener - ActionListener for CreateQueryGroupResponse + */ + public void persistInClusterStateMetadata(QueryGroup queryGroup, ActionListener listener) { + clusterService.submitStateUpdateTask(SOURCE, new ClusterStateUpdateTask(Priority.NORMAL) { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + return saveQueryGroupInClusterState(queryGroup, currentState); + } + + @Override + public ThrottlingKey getClusterManagerThrottlingKey() { + return createQueryGroupThrottlingKey; + } + + @Override + public void onFailure(String source, Exception e) { + logger.warn("failed to save QueryGroup object due to error: {}, for source: {}.", e.getMessage(), source); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + CreateQueryGroupResponse response = new CreateQueryGroupResponse(queryGroup, RestStatus.OK); + listener.onResponse(response); + } + }); + } + + /** + * This method will be executed before we submit the new cluster state + * @param queryGroup - the QueryGroup we're currently creating + * @param currentClusterState - the cluster state before the update + */ + ClusterState saveQueryGroupInClusterState(final QueryGroup queryGroup, final ClusterState currentClusterState) { + final Map existingQueryGroups = currentClusterState.metadata().queryGroups(); + String groupName = queryGroup.getName(); + + // check if maxQueryGroupCount will breach + if (existingQueryGroups.size() == maxQueryGroupCount) { + logger.warn("{} value exceeded its assigned limit of {}.", QUERY_GROUP_COUNT_SETTING_NAME, maxQueryGroupCount); + throw new IllegalStateException("Can't create more than " + maxQueryGroupCount + " QueryGroups in the system."); + } + + // check for duplicate name + Optional findExistingGroup = existingQueryGroups.values() + .stream() + .filter(group -> group.getName().equals(groupName)) + .findFirst(); + if (findExistingGroup.isPresent()) { + logger.warn("QueryGroup with name {} already exists. Not creating a new one.", groupName); + throw new IllegalArgumentException("QueryGroup with name " + groupName + " already exists. Not creating a new one."); + } + + // check if there's any resource allocation that exceed limit of 1.0 + Map totalUsageMap = calculateTotalUsage(existingQueryGroups, queryGroup); + for (ResourceType resourceType : queryGroup.getResourceLimits().keySet()) { + if (totalUsageMap.get(resourceType) > 1) { + logger.warn("Total resource allocation for {} will go above the max limit of 1.0.", resourceType.getName()); + throw new IllegalArgumentException( + "Total resource allocation for " + resourceType.getName() + " will go above the max limit of 1.0." + ); + } + } + + return ClusterState.builder(currentClusterState) + .metadata(Metadata.builder(currentClusterState.metadata()).put(queryGroup).build()) + .build(); + } + + /** + * This method calculates the existing total usage of the all the resource limits + * @param existingQueryGroups - existing QueryGroups in the system + * @param queryGroup - the QueryGroup we're creating or updating + */ + private Map calculateTotalUsage(Map existingQueryGroups, QueryGroup queryGroup) { + final Map map = new EnumMap<>(ResourceType.class); + map.putAll(queryGroup.getResourceLimits()); + for (QueryGroup currGroup : existingQueryGroups.values()) { + if (!currGroup.getName().equals(queryGroup.getName())) { + for (ResourceType resourceType : queryGroup.getResourceLimits().keySet()) { + map.compute(resourceType, (k, v) -> v + currGroup.getResourceLimits().get(resourceType)); + } + } + } + return map; + } + + /** + * Get the QueryGroups with the specified name from cluster state + * @param name - the QueryGroup name we are getting + * @param currentState - current cluster state + */ + public static Collection getFromClusterStateMetadata(String name, ClusterState currentState) { + final Map currentGroups = currentState.getMetadata().queryGroups(); + if (name == null || name.isEmpty()) { + return currentGroups.values(); + } + return currentGroups.values() + .stream() + .filter(group -> group.getName().equals(name)) + .findAny() + .stream() + .collect(Collectors.toList()); + } + + /** + * Modify cluster state to delete the QueryGroup + * @param deleteQueryGroupRequest - request to delete a QueryGroup + * @param listener - ActionListener for AcknowledgedResponse + */ + public void deleteInClusterStateMetadata( + DeleteQueryGroupRequest deleteQueryGroupRequest, + ActionListener listener + ) { + clusterService.submitStateUpdateTask(SOURCE, new AckedClusterStateUpdateTask<>(deleteQueryGroupRequest, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return deleteQueryGroupInClusterState(deleteQueryGroupRequest.getName(), currentState); + } + + @Override + public ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey() { + return deleteQueryGroupThrottlingKey; + } + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + }); + } + + /** + * Modify cluster state to delete the QueryGroup, and return the new cluster state + * @param name - the name for QueryGroup to be deleted + * @param currentClusterState - current cluster state + */ + ClusterState deleteQueryGroupInClusterState(final String name, final ClusterState currentClusterState) { + final Metadata metadata = currentClusterState.metadata(); + final QueryGroup queryGroupToRemove = metadata.queryGroups() + .values() + .stream() + .filter(queryGroup -> queryGroup.getName().equals(name)) + .findAny() + .orElseThrow(() -> new ResourceNotFoundException("No QueryGroup exists with the provided name: " + name)); + + return ClusterState.builder(currentClusterState).metadata(Metadata.builder(metadata).remove(queryGroupToRemove).build()).build(); + } + + /** + * maxQueryGroupCount getter + */ + public int getMaxQueryGroupCount() { + return maxQueryGroupCount; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/package-info.java new file mode 100644 index 0000000000000..5848e9c936623 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package for the service classes of WorkloadManagementPlugin + */ +package org.opensearch.plugin.wlm.service; diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/QueryGroupTestUtils.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/QueryGroupTestUtils.java new file mode 100644 index 0000000000000..5ba1ad5334712 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/QueryGroupTestUtils.java @@ -0,0 +1,144 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm; + +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterApplierService; +import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.cluster.metadata.QueryGroup.builder; +import static org.opensearch.search.ResourceType.fromName; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; + +public class QueryGroupTestUtils { + public static final String NAME_ONE = "query_group_one"; + public static final String NAME_TWO = "query_group_two"; + public static final String _ID_ONE = "AgfUO5Ja9yfsYlONlYi3TQ=="; + public static final String _ID_TWO = "G5iIqHy4g7eK1qIAAAAIH53=1"; + public static final String NAME_NONE_EXISTED = "query_group_none_existed"; + public static final String MEMORY_STRING = "memory"; + public static final String MONITOR_STRING = "monitor"; + public static final long TIMESTAMP_ONE = 4513232413L; + public static final long TIMESTAMP_TWO = 4513232415L; + public static final QueryGroup queryGroupOne = builder().name(NAME_ONE) + ._id(_ID_ONE) + .mode(MONITOR_STRING) + .resourceLimits(Map.of(fromName(MEMORY_STRING), 0.3)) + .updatedAt(TIMESTAMP_ONE) + .build(); + + public static final QueryGroup queryGroupTwo = builder().name(NAME_TWO) + ._id(_ID_TWO) + .mode(MONITOR_STRING) + .resourceLimits(Map.of(fromName(MEMORY_STRING), 0.6)) + .updatedAt(TIMESTAMP_TWO) + .build(); + + public static List queryGroupList() { + List list = new ArrayList<>(); + list.add(queryGroupOne); + list.add(queryGroupTwo); + return list; + } + + public static ClusterState clusterState() { + final Metadata metadata = Metadata.builder().queryGroups(Map.of(_ID_ONE, queryGroupOne, _ID_TWO, queryGroupTwo)).build(); + return ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + } + + public static Set> clusterSettingsSet() { + Set> set = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + set.add(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT); + assertFalse(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT)); + return set; + } + + public static Settings settings() { + return Settings.builder().build(); + } + + public static ClusterSettings clusterSettings() { + return new ClusterSettings(settings(), clusterSettingsSet()); + } + + public static QueryGroupPersistenceService queryGroupPersistenceService() { + ClusterApplierService clusterApplierService = new ClusterApplierService( + "name", + settings(), + clusterSettings(), + mock(ThreadPool.class) + ); + clusterApplierService.setInitialState(clusterState()); + ClusterService clusterService = new ClusterService( + settings(), + clusterSettings(), + mock(ClusterManagerService.class), + clusterApplierService + ); + return new QueryGroupPersistenceService(clusterService, settings(), clusterSettings()); + } + + public static Tuple preparePersistenceServiceSetup(Map queryGroups) { + Metadata metadata = Metadata.builder().queryGroups(queryGroups).build(); + Settings settings = Settings.builder().build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); + ClusterApplierService clusterApplierService = new ClusterApplierService( + "name", + settings(), + clusterSettings(), + mock(ThreadPool.class) + ); + clusterApplierService.setInitialState(clusterState); + ClusterService clusterService = new ClusterService( + settings(), + clusterSettings(), + mock(ClusterManagerService.class), + clusterApplierService + ); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + settings, + clusterSettings + ); + return new Tuple(queryGroupPersistenceService, clusterState); + } + + public static void assertEqualQueryGroups(Collection collectionOne, Collection collectionTwo) { + assertEquals(collectionOne.size(), collectionTwo.size()); + List listOne = new ArrayList<>(collectionOne); + List listTwo = new ArrayList<>(collectionTwo); + listOne.sort(Comparator.comparing(QueryGroup::getName)); + listTwo.sort(Comparator.comparing(QueryGroup::getName)); + for (int i = 0; i < listOne.size(); i++) { + assertTrue(listOne.get(i).equals(listTwo.get(i))); + } + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequestTests.java new file mode 100644 index 0000000000000..b0fa96a46df80 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequestTests.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.assertEqualQueryGroups; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupOne; + +public class CreateQueryGroupRequestTests extends OpenSearchTestCase { + + /** + * Test case to verify the serialization and deserialization of CreateQueryGroupRequest. + */ + public void testSerialization() throws IOException { + CreateQueryGroupRequest request = new CreateQueryGroupRequest(queryGroupOne); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + CreateQueryGroupRequest otherRequest = new CreateQueryGroupRequest(streamInput); + List list1 = new ArrayList<>(); + List list2 = new ArrayList<>(); + list1.add(queryGroupOne); + list2.add(otherRequest.getQueryGroup()); + assertEqualQueryGroups(list1, list2); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java new file mode 100644 index 0000000000000..ecb9a6b2dc0d2 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.mockito.Mockito.mock; + +public class CreateQueryGroupResponseTests extends OpenSearchTestCase { + + /** + * Test case to verify serialization and deserialization of CreateQueryGroupResponse. + */ + public void testSerialization() throws IOException { + CreateQueryGroupResponse response = new CreateQueryGroupResponse(QueryGroupTestUtils.queryGroupOne, RestStatus.OK); + BytesStreamOutput out = new BytesStreamOutput(); + response.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + CreateQueryGroupResponse otherResponse = new CreateQueryGroupResponse(streamInput); + assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); + QueryGroup responseGroup = response.getQueryGroup(); + QueryGroup otherResponseGroup = otherResponse.getQueryGroup(); + List listOne = new ArrayList<>(); + List listTwo = new ArrayList<>(); + listOne.add(responseGroup); + listTwo.add(otherResponseGroup); + QueryGroupTestUtils.assertEqualQueryGroups(listOne, listTwo); + } + + /** + * Test case to validate the toXContent method of CreateQueryGroupResponse. + */ + public void testToXContentCreateQueryGroup() throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + CreateQueryGroupResponse response = new CreateQueryGroupResponse(QueryGroupTestUtils.queryGroupOne, RestStatus.OK); + String actual = response.toXContent(builder, mock(ToXContent.Params.class)).toString(); + String expected = "{\n" + + " \"_id\" : \"AgfUO5Ja9yfsYlONlYi3TQ==\",\n" + + " \"name\" : \"query_group_one\",\n" + + " \"resiliency_mode\" : \"monitor\",\n" + + " \"updated_at\" : 4513232413,\n" + + " \"resource_limits\" : {\n" + + " \"memory\" : 0.3\n" + + " }\n" + + "}"; + assertEquals(expected, actual); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java new file mode 100644 index 0000000000000..bc2e4f0faca4c --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DeleteQueryGroupRequestTests extends OpenSearchTestCase { + + /** + * Test case to verify the serialization and deserialization of DeleteQueryGroupRequest. + */ + public void testSerialization() throws IOException { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest(QueryGroupTestUtils.NAME_ONE); + assertEquals(QueryGroupTestUtils.NAME_ONE, request.getName()); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + DeleteQueryGroupRequest otherRequest = new DeleteQueryGroupRequest(streamInput); + assertEquals(request.getName(), otherRequest.getName()); + } + + /** + * Test case to validate a DeleteQueryGroupRequest. + */ + public void testSerializationWithNull() throws IOException { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest((String) null); + ActionRequestValidationException actionRequestValidationException = request.validate(); + assertFalse(actionRequestValidationException.getMessage().isEmpty()); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequestTests.java new file mode 100644 index 0000000000000..32b5f7ec9e2c3 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequestTests.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class GetQueryGroupRequestTests extends OpenSearchTestCase { + + /** + * Test case to verify the serialization and deserialization of GetQueryGroupRequest. + */ + public void testSerialization() throws IOException { + GetQueryGroupRequest request = new GetQueryGroupRequest(QueryGroupTestUtils.NAME_ONE); + assertEquals(QueryGroupTestUtils.NAME_ONE, request.getName()); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + GetQueryGroupRequest otherRequest = new GetQueryGroupRequest(streamInput); + assertEquals(request.getName(), otherRequest.getName()); + } + + /** + * Test case to verify the serialization and deserialization of GetQueryGroupRequest when name is null. + */ + public void testSerializationWithNull() throws IOException { + GetQueryGroupRequest request = new GetQueryGroupRequest((String) null); + assertNull(request.getName()); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + GetQueryGroupRequest otherRequest = new GetQueryGroupRequest(streamInput); + assertEquals(request.getName(), otherRequest.getName()); + } + + /** + * Test case the validation function of GetQueryGroupRequest + */ + public void testValidation() { + GetQueryGroupRequest request = new GetQueryGroupRequest("a".repeat(51)); + assertThrows(IllegalArgumentException.class, request::validate); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponseTests.java new file mode 100644 index 0000000000000..774f4b2d8db52 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponseTests.java @@ -0,0 +1,151 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.mockito.Mockito.mock; + +public class GetQueryGroupResponseTests extends OpenSearchTestCase { + + /** + * Test case to verify the serialization and deserialization of GetQueryGroupResponse. + */ + public void testSerializationSingleQueryGroup() throws IOException { + List list = new ArrayList<>(); + list.add(QueryGroupTestUtils.queryGroupOne); + GetQueryGroupResponse response = new GetQueryGroupResponse(list, RestStatus.OK); + assertEquals(response.getQueryGroups(), list); + + BytesStreamOutput out = new BytesStreamOutput(); + response.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + + GetQueryGroupResponse otherResponse = new GetQueryGroupResponse(streamInput); + assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); + QueryGroupTestUtils.assertEqualQueryGroups(response.getQueryGroups(), otherResponse.getQueryGroups()); + } + + /** + * Test case to verify the serialization and deserialization of GetQueryGroupResponse when the result contains multiple QueryGroups. + */ + public void testSerializationMultipleQueryGroup() throws IOException { + GetQueryGroupResponse response = new GetQueryGroupResponse(QueryGroupTestUtils.queryGroupList(), RestStatus.OK); + assertEquals(response.getQueryGroups(), QueryGroupTestUtils.queryGroupList()); + + BytesStreamOutput out = new BytesStreamOutput(); + response.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + + GetQueryGroupResponse otherResponse = new GetQueryGroupResponse(streamInput); + assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); + assertEquals(2, otherResponse.getQueryGroups().size()); + QueryGroupTestUtils.assertEqualQueryGroups(response.getQueryGroups(), otherResponse.getQueryGroups()); + } + + /** + * Test case to verify the serialization and deserialization of GetQueryGroupResponse when the result is empty. + */ + public void testSerializationNull() throws IOException { + List list = new ArrayList<>(); + GetQueryGroupResponse response = new GetQueryGroupResponse(list, RestStatus.OK); + assertEquals(response.getQueryGroups(), list); + + BytesStreamOutput out = new BytesStreamOutput(); + response.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + + GetQueryGroupResponse otherResponse = new GetQueryGroupResponse(streamInput); + assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); + assertEquals(0, otherResponse.getQueryGroups().size()); + } + + /** + * Test case to verify the toXContent of GetQueryGroupResponse. + */ + public void testToXContentGetSingleQueryGroup() throws IOException { + List queryGroupList = new ArrayList<>(); + queryGroupList.add(QueryGroupTestUtils.queryGroupOne); + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + GetQueryGroupResponse response = new GetQueryGroupResponse(queryGroupList, RestStatus.OK); + String actual = response.toXContent(builder, mock(ToXContent.Params.class)).toString(); + String expected = "{\n" + + " \"query_groups\" : [\n" + + " {\n" + + " \"_id\" : \"AgfUO5Ja9yfsYlONlYi3TQ==\",\n" + + " \"name\" : \"query_group_one\",\n" + + " \"resiliency_mode\" : \"monitor\",\n" + + " \"updated_at\" : 4513232413,\n" + + " \"resource_limits\" : {\n" + + " \"memory\" : 0.3\n" + + " }\n" + + " }\n" + + " ]\n" + + "}"; + assertEquals(expected, actual); + } + + /** + * Test case to verify the toXContent of GetQueryGroupResponse when the result contains multiple QueryGroups. + */ + public void testToXContentGetMultipleQueryGroup() throws IOException { + List queryGroupList = new ArrayList<>(); + queryGroupList.add(QueryGroupTestUtils.queryGroupOne); + queryGroupList.add(QueryGroupTestUtils.queryGroupTwo); + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + GetQueryGroupResponse response = new GetQueryGroupResponse(queryGroupList, RestStatus.OK); + String actual = response.toXContent(builder, mock(ToXContent.Params.class)).toString(); + String expected = "{\n" + + " \"query_groups\" : [\n" + + " {\n" + + " \"_id\" : \"AgfUO5Ja9yfsYlONlYi3TQ==\",\n" + + " \"name\" : \"query_group_one\",\n" + + " \"resiliency_mode\" : \"monitor\",\n" + + " \"updated_at\" : 4513232413,\n" + + " \"resource_limits\" : {\n" + + " \"memory\" : 0.3\n" + + " }\n" + + " },\n" + + " {\n" + + " \"_id\" : \"G5iIqHy4g7eK1qIAAAAIH53=1\",\n" + + " \"name\" : \"query_group_two\",\n" + + " \"resiliency_mode\" : \"monitor\",\n" + + " \"updated_at\" : 4513232415,\n" + + " \"resource_limits\" : {\n" + + " \"memory\" : 0.6\n" + + " }\n" + + " }\n" + + " ]\n" + + "}"; + assertEquals(expected, actual); + } + + /** + * Test case to verify toXContent of GetQueryGroupResponse when the result contains zero QueryGroup. + */ + public void testToXContentGetZeroQueryGroup() throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + GetQueryGroupResponse otherResponse = new GetQueryGroupResponse(new ArrayList<>(), RestStatus.OK); + String actual = otherResponse.toXContent(builder, mock(ToXContent.Params.class)).toString(); + String expected = "{\n" + " \"query_groups\" : [ ]\n" + "}"; + assertEquals(expected, actual); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java new file mode 100644 index 0000000000000..253d65f8da80f --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class TransportDeleteQueryGroupActionTests extends OpenSearchTestCase { + + ClusterService clusterService = mock(ClusterService.class); + TransportService transportService = mock(TransportService.class); + ActionFilters actionFilters = mock(ActionFilters.class); + ThreadPool threadPool = mock(ThreadPool.class); + IndexNameExpressionResolver indexNameExpressionResolver = mock(IndexNameExpressionResolver.class); + QueryGroupPersistenceService queryGroupPersistenceService = mock(QueryGroupPersistenceService.class); + + TransportDeleteQueryGroupAction action = new TransportDeleteQueryGroupAction( + clusterService, + transportService, + actionFilters, + threadPool, + indexNameExpressionResolver, + queryGroupPersistenceService + ); + + /** + * Test case to validate the construction for TransportDeleteQueryGroupAction + */ + public void testConstruction() { + assertNotNull(action); + assertEquals(ThreadPool.Names.SAME, action.executor()); + } + + /** + * Test case to validate the clusterManagerOperation function in TransportDeleteQueryGroupAction + */ + public void testClusterManagerOperation() throws Exception { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest("testGroup"); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + ClusterState clusterState = mock(ClusterState.class); + action.clusterManagerOperation(request, clusterState, listener); + verify(queryGroupPersistenceService).deleteInClusterStateMetadata(eq(request), eq(listener)); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupActionTests.java new file mode 100644 index 0000000000000..755b11a5f4b89 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupActionTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_NONE_EXISTED; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.clusterState; +import static org.mockito.Mockito.mock; + +public class TransportGetQueryGroupActionTests extends OpenSearchTestCase { + + /** + * Test case for ClusterManagerOperation function + */ + @SuppressWarnings("unchecked") + public void testClusterManagerOperation() throws Exception { + GetQueryGroupRequest getQueryGroupRequest1 = new GetQueryGroupRequest(NAME_NONE_EXISTED); + GetQueryGroupRequest getQueryGroupRequest2 = new GetQueryGroupRequest(NAME_ONE); + TransportGetQueryGroupAction transportGetQueryGroupAction = new TransportGetQueryGroupAction( + mock(ClusterService.class), + mock(TransportService.class), + mock(ActionFilters.class), + mock(ThreadPool.class), + mock(IndexNameExpressionResolver.class) + ); + assertThrows( + ResourceNotFoundException.class, + () -> transportGetQueryGroupAction.clusterManagerOperation(getQueryGroupRequest1, clusterState(), mock(ActionListener.class)) + ); + transportGetQueryGroupAction.clusterManagerOperation(getQueryGroupRequest2, clusterState(), mock(ActionListener.class)); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java new file mode 100644 index 0000000000000..72191e076bb87 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.List; + +import org.mockito.ArgumentCaptor; + +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.rest.RestRequest.Method.DELETE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + +public class RestDeleteQueryGroupActionTests extends OpenSearchTestCase { + /** + * Test case to validate the construction for RestDeleteQueryGroupAction + */ + public void testConstruction() { + RestDeleteQueryGroupAction action = new RestDeleteQueryGroupAction(); + assertNotNull(action); + assertEquals("delete_query_group", action.getName()); + List routes = action.routes(); + assertEquals(1, routes.size()); + RestHandler.Route route = routes.get(0); + assertEquals(DELETE, route.getMethod()); + assertEquals("_wlm/query_group/{name}", route.getPath()); + } + + /** + * Test case to validate the prepareRequest logic for RestDeleteQueryGroupAction + */ + @SuppressWarnings("unchecked") + public void testPrepareRequest() throws Exception { + RestDeleteQueryGroupAction restDeleteQueryGroupAction = new RestDeleteQueryGroupAction(); + NodeClient nodeClient = mock(NodeClient.class); + RestRequest realRequest = new FakeRestRequest(); + realRequest.params().put("name", NAME_ONE); + ; + RestRequest spyRequest = spy(realRequest); + + doReturn(TimeValue.timeValueSeconds(30)).when(spyRequest).paramAsTime(eq("cluster_manager_timeout"), any(TimeValue.class)); + doReturn(TimeValue.timeValueSeconds(60)).when(spyRequest).paramAsTime(eq("timeout"), any(TimeValue.class)); + + CheckedConsumer consumer = restDeleteQueryGroupAction.prepareRequest(spyRequest, nodeClient); + assertNotNull(consumer); + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(DeleteQueryGroupRequest.class); + ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(RestToXContentListener.class); + doNothing().when(nodeClient).execute(eq(DeleteQueryGroupAction.INSTANCE), requestCaptor.capture(), listenerCaptor.capture()); + + consumer.accept(mock(RestChannel.class)); + DeleteQueryGroupRequest capturedRequest = requestCaptor.getValue(); + assertEquals(NAME_ONE, capturedRequest.getName()); + assertEquals(TimeValue.timeValueSeconds(30), capturedRequest.clusterManagerNodeTimeout()); + assertEquals(TimeValue.timeValueSeconds(60), capturedRequest.timeout()); + verify(nodeClient).execute( + eq(DeleteQueryGroupAction.INSTANCE), + any(DeleteQueryGroupRequest.class), + any(RestToXContentListener.class) + ); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java new file mode 100644 index 0000000000000..a516ffdde839e --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java @@ -0,0 +1,359 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.service; + +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.AckedClusterStateUpdateTask; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.search.ResourceType; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import org.mockito.ArgumentCaptor; + +import static org.opensearch.cluster.metadata.QueryGroup.builder; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.MEMORY_STRING; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.MONITOR_STRING; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_NONE_EXISTED; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_TWO; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_ONE; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_TWO; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.assertEqualQueryGroups; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.clusterSettings; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.clusterSettingsSet; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.clusterState; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.preparePersistenceServiceSetup; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupList; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupOne; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupPersistenceService; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupTwo; +import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.QUERY_GROUP_COUNT_SETTING_NAME; +import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.SOURCE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.argThat; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class QueryGroupPersistenceServiceTests extends OpenSearchTestCase { + + /** + * Test case to validate the creation logic of a QueryGroup + */ + public void testCreateQueryGroup() { + Tuple setup = preparePersistenceServiceSetup(new HashMap<>()); + QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + ClusterState newClusterState = queryGroupPersistenceService1.saveQueryGroupInClusterState(queryGroupOne, clusterState); + Map updatedGroupsMap = newClusterState.getMetadata().queryGroups(); + assertEquals(1, updatedGroupsMap.size()); + assertTrue(updatedGroupsMap.containsKey(_ID_ONE)); + List listOne = new ArrayList<>(); + List listTwo = new ArrayList<>(); + listOne.add(queryGroupOne); + listTwo.add(updatedGroupsMap.get(_ID_ONE)); + assertEqualQueryGroups(listOne, listTwo); + } + + /** + * Test case to validate the logic for adding a new QueryGroup to a cluster state that already contains + * an existing QueryGroup + */ + public void testCreateAnotherQueryGroup() { + Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_ONE, queryGroupOne)); + QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + ClusterState newClusterState = queryGroupPersistenceService1.saveQueryGroupInClusterState(queryGroupTwo, clusterState); + Map updatedGroups = newClusterState.getMetadata().queryGroups(); + assertEquals(2, updatedGroups.size()); + assertTrue(updatedGroups.containsKey(_ID_TWO)); + Collection values = updatedGroups.values(); + assertEqualQueryGroups(queryGroupList(), new ArrayList<>(values)); + } + + /** + * Test case to ensure the error is thrown when we try to create another QueryGroup with duplicate name + */ + public void testCreateQueryGroupDuplicateName() { + Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_ONE, queryGroupOne)); + QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + QueryGroup toCreate = builder().name(NAME_ONE) + ._id("W5iIqHyhgi4K1qIAAAAIHw==") + .mode(MONITOR_STRING) + .resourceLimits(Map.of(ResourceType.fromName(MEMORY_STRING), 0.3)) + .updatedAt(1690934400000L) + .build(); + assertThrows(RuntimeException.class, () -> queryGroupPersistenceService1.saveQueryGroupInClusterState(toCreate, clusterState)); + } + + /** + * Test case to ensure the error is thrown when we try to create another QueryGroup that will make + * the total resource limits go above 1 + */ + public void testCreateQueryGroupOverflowAllocation() { + Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_TWO, queryGroupTwo)); + QueryGroup toCreate = builder().name(NAME_ONE) + ._id("W5iIqHyhgi4K1qIAAAAIHw==") + .mode(MONITOR_STRING) + .resourceLimits(Map.of(ResourceType.fromName(MEMORY_STRING), 0.41)) + .updatedAt(1690934400000L) + .build(); + + QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + assertThrows(RuntimeException.class, () -> queryGroupPersistenceService1.saveQueryGroupInClusterState(toCreate, clusterState)); + } + + /** + * Test case to ensure the error is thrown when we already have the max allowed number of QueryGroups, but + * we want to create another one + */ + public void testCreateQueryGroupOverflowCount() { + QueryGroup toCreate = builder().name(NAME_NONE_EXISTED) + ._id("W5iIqHyhgi4K1qIAAAAIHw==") + .mode(MONITOR_STRING) + .resourceLimits(Map.of(ResourceType.fromName(MEMORY_STRING), 0.5)) + .updatedAt(1690934400000L) + .build(); + Metadata metadata = Metadata.builder().queryGroups(Map.of(_ID_ONE, queryGroupOne, _ID_TWO, queryGroupTwo)).build(); + Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 2).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); + ClusterService clusterService = new ClusterService(settings, clusterSettings, mock(ThreadPool.class)); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + QueryGroupPersistenceService queryGroupPersistenceService1 = new QueryGroupPersistenceService( + clusterService, + settings, + clusterSettings + ); + assertThrows(RuntimeException.class, () -> queryGroupPersistenceService1.saveQueryGroupInClusterState(toCreate, clusterState)); + } + + /** + * Tests the invalid value of {@code node.query_group.max_count} + */ + public void testInvalidMaxQueryGroupCount() { + Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 2).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); + ClusterService clusterService = new ClusterService(settings, clusterSettings, mock(ThreadPool.class)); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + settings, + clusterSettings + ); + assertThrows(IllegalArgumentException.class, () -> queryGroupPersistenceService.setMaxQueryGroupCount(-1)); + } + + /** + * Tests the valid value of {@code node.query_group.max_count} + */ + public void testValidMaxSandboxCountSetting() { + Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 100).build(); + ClusterService clusterService = new ClusterService(settings, clusterSettings(), mock(ThreadPool.class)); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + settings, + clusterSettings() + ); + queryGroupPersistenceService.setMaxQueryGroupCount(50); + assertEquals(50, queryGroupPersistenceService.getMaxQueryGroupCount()); + } + + /** + * Tests PersistInClusterStateMetadata function + */ + public void testPersistInClusterStateMetadata() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); + verify(clusterService).submitStateUpdateTask(eq(SOURCE), any()); + } + + /** + * Tests PersistInClusterStateMetadata function with inner functions + */ + public void testPersistInClusterStateMetadataInner() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + ArgumentCaptor captor = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); + queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); + verify(clusterService, times(1)).submitStateUpdateTask(eq(SOURCE), captor.capture()); + ClusterStateUpdateTask capturedTask = captor.getValue(); + assertEquals(queryGroupPersistenceService.createQueryGroupThrottlingKey, capturedTask.getClusterManagerThrottlingKey()); + + doAnswer(invocation -> { + ClusterStateUpdateTask task = invocation.getArgument(1); + task.clusterStateProcessed(SOURCE, mock(ClusterState.class), mock(ClusterState.class)); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); + verify(listener).onResponse(any(CreateQueryGroupResponse.class)); + } + + /** + * Tests PersistInClusterStateMetadata function with failure + */ + public void testPersistInClusterStateMetadataFailure() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + doAnswer(invocation -> { + ClusterStateUpdateTask task = invocation.getArgument(1); + Exception exception = new RuntimeException("Test Exception"); + task.onFailure(SOURCE, exception); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); + verify(listener).onFailure(any(RuntimeException.class)); + } + + /** + * Tests getting a single QueryGroup + */ + public void testGetSingleQueryGroup() { + Collection groupsCollections = QueryGroupPersistenceService.getFromClusterStateMetadata(NAME_ONE, clusterState()); + List groups = new ArrayList<>(groupsCollections); + assertEquals(1, groups.size()); + QueryGroup queryGroup = groups.get(0); + List listOne = new ArrayList<>(); + List listTwo = new ArrayList<>(); + listOne.add(QueryGroupTestUtils.queryGroupOne); + listTwo.add(queryGroup); + QueryGroupTestUtils.assertEqualQueryGroups(listOne, listTwo); + } + + /** + * Tests getting all QueryGroups + */ + public void testGetAllQueryGroups() { + assertEquals(2, QueryGroupTestUtils.clusterState().metadata().queryGroups().size()); + Collection groupsCollections = QueryGroupPersistenceService.getFromClusterStateMetadata(null, clusterState()); + List res = new ArrayList<>(groupsCollections); + assertEquals(2, res.size()); + Set currentNAME = res.stream().map(QueryGroup::getName).collect(Collectors.toSet()); + assertTrue(currentNAME.contains(QueryGroupTestUtils.NAME_ONE)); + assertTrue(currentNAME.contains(QueryGroupTestUtils.NAME_TWO)); + QueryGroupTestUtils.assertEqualQueryGroups(QueryGroupTestUtils.queryGroupList(), res); + } + + /** + * Tests getting a QueryGroup with invalid name + */ + public void testGetNonExistedQueryGroups() { + Collection groupsCollections = QueryGroupPersistenceService.getFromClusterStateMetadata( + NAME_NONE_EXISTED, + clusterState() + ); + List groups = new ArrayList<>(groupsCollections); + assertEquals(0, groups.size()); + } + + /** + * Tests setting maxQueryGroupCount + */ + public void testMaxQueryGroupCount() { + assertThrows(IllegalArgumentException.class, () -> QueryGroupTestUtils.queryGroupPersistenceService().setMaxQueryGroupCount(-1)); + QueryGroupPersistenceService queryGroupPersistenceService = QueryGroupTestUtils.queryGroupPersistenceService(); + queryGroupPersistenceService.setMaxQueryGroupCount(50); + assertEquals(50, queryGroupPersistenceService.getMaxQueryGroupCount()); + } + + /** + * Tests delete a single QueryGroup + */ + public void testDeleteSingleQueryGroup() { + ClusterState newClusterState = queryGroupPersistenceService().deleteQueryGroupInClusterState(NAME_TWO, clusterState()); + Map afterDeletionGroups = newClusterState.getMetadata().queryGroups(); + assertFalse(afterDeletionGroups.containsKey(_ID_TWO)); + assertEquals(1, afterDeletionGroups.size()); + List oldQueryGroups = new ArrayList<>(); + oldQueryGroups.add(queryGroupOne); + assertEqualQueryGroups(new ArrayList<>(afterDeletionGroups.values()), oldQueryGroups); + } + + /** + * Tests delete a QueryGroup with invalid name + */ + public void testDeleteNonExistedQueryGroup() { + assertThrows( + ResourceNotFoundException.class, + () -> queryGroupPersistenceService().deleteQueryGroupInClusterState(NAME_NONE_EXISTED, clusterState()) + ); + } + + /** + * Tests DeleteInClusterStateMetadata function + */ + @SuppressWarnings("unchecked") + public void testDeleteInClusterStateMetadata() throws Exception { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest(NAME_ONE); + ClusterService clusterService = mock(ClusterService.class); + + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + doAnswer(invocation -> { + AckedClusterStateUpdateTask task = invocation.getArgument(1); + ClusterState initialState = clusterState(); + ClusterState newState = task.execute(initialState); + assertNotNull(newState); + assertEquals(queryGroupPersistenceService.deleteQueryGroupThrottlingKey, task.getClusterManagerThrottlingKey()); + task.onAllNodesAcked(null); + verify(listener).onResponse(argThat(response -> response.isAcknowledged())); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + queryGroupPersistenceService.deleteInClusterStateMetadata(request, listener); + verify(clusterService).submitStateUpdateTask(eq(SOURCE), any(AckedClusterStateUpdateTask.class)); + } +} diff --git a/plugins/workload-management/src/yamlRestTest/java/org/opensearch/plugin/wlm/WorkloadManagementClientYamlTestSuiteIT.java b/plugins/workload-management/src/yamlRestTest/java/org/opensearch/plugin/wlm/WorkloadManagementClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..9ec4a36ff6a5b --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/java/org/opensearch/plugin/wlm/WorkloadManagementClientYamlTestSuiteIT.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.plugin.wlm; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; +import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; + +/** Runs yaml rest tests */ +public class WorkloadManagementClientYamlTestSuiteIT extends OpenSearchClientYamlSuiteTestCase { + + public WorkloadManagementClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return OpenSearchClientYamlSuiteTestCase.createParameters(); + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_query_group_context.json new file mode 100644 index 0000000000000..bb4620c01f2d6 --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_query_group_context.json @@ -0,0 +1,18 @@ +{ + "create_query_group_context": { + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_wlm/query_group", + "methods": ["PUT", "POST"], + "parts": {} + } + ] + }, + "params":{}, + "body":{ + "description":"The QueryGroup schema" + } + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json new file mode 100644 index 0000000000000..16930427fc2fe --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json @@ -0,0 +1,22 @@ +{ + "delete_query_group_context": { + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_wlm/query_group/{name}", + "methods": [ + "DELETE" + ], + "parts": { + "name": { + "type": "string", + "description": "QueryGroup name" + } + } + } + ] + }, + "params":{} + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/get_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/get_query_group_context.json new file mode 100644 index 0000000000000..e0d552be616b2 --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/get_query_group_context.json @@ -0,0 +1,25 @@ +{ + "get_query_group_context": { + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_wlm/query_group", + "methods": ["GET"], + "parts": {} + }, + { + "path": "/_wlm/query_group/{name}", + "methods": ["GET"], + "parts": { + "name": { + "type": "string", + "description": "QueryGroup name" + } + } + } + ] + }, + "params":{} + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml new file mode 100644 index 0000000000000..a00314986a5cf --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml @@ -0,0 +1,114 @@ +"test CRUD Operations for QueryGroup API ": + - skip: + version: " - 2.16.99" + reason: "QueryGroup WorkloadManagement feature was added in 2.17" + + - do: + create_query_group_context: + body: + { + "name": "analytics", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.4, + "memory": 0.2 + } + } + + - match: { name: "analytics" } + - match: { resiliency_mode: "monitor" } + - match: { resource_limits.cpu: 0.4 } + - match: { resource_limits.memory: 0.2 } + + - do: + get_query_group_context: + name: "analytics" + + - match: { query_groups.0.name: "analytics" } + - match: { query_groups.0.resiliency_mode: "monitor" } + - match: { query_groups.0.resource_limits.cpu: 0.4 } + - match: { query_groups.0.resource_limits.memory: 0.2 } + + - do: + catch: /illegal_argument_exception/ + create_query_group_context: + body: + { + "name": "analytics", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.4, + "memory": 0.2 + } + } + + - do: + catch: /illegal_argument_exception/ + create_query_group_context: + body: + { + "name": "analytics2", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.61, + "memory": 0.2 + } + } + + - do: + catch: /illegal_argument_exception/ + create_query_group_context: + body: + { + "name": "analytics2", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": -0.1, + "memory": 0.2 + } + } + + - do: + catch: /illegal_argument_exception/ + create_query_group_context: + body: + { + "name": "", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.1, + "memory": 0.2 + } + } + + - do: + create_query_group_context: + body: + { + "name": "analytics2", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.35, + "memory": 0.25 + } + } + + - match: { name: "analytics2" } + - match: { resiliency_mode: "monitor" } + - match: { resource_limits.cpu: 0.35 } + - match: { resource_limits.memory: 0.25 } + + - do: + get_query_group_context: + name: "analytics2" + + - match: { query_groups.0.name: "analytics2" } + - match: { query_groups.0.resiliency_mode: "monitor" } + - match: { query_groups.0.resource_limits.cpu: 0.35 } + - match: { query_groups.0.resource_limits.memory: 0.25 } + + - do: + delete_query_group_context: + name: "analytics2" + + - match: { acknowledged: true } diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java b/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java index 5169ce18fff79..dd5248738569e 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java @@ -380,7 +380,7 @@ public static String escapePath(Path path) { } /** - * Recursively copy the the source directory to the target directory, preserving permissions. + * Recursively copy the source directory to the target directory, preserving permissions. */ public static void copyDirectory(Path source, Path target) throws IOException { Files.walkFileTree(source, new SimpleFileVisitor() { diff --git a/qa/smoke-test-http/build.gradle b/qa/smoke-test-http/build.gradle index f48ddc26d929b..496fda6bb717d 100644 --- a/qa/smoke-test-http/build.gradle +++ b/qa/smoke-test-http/build.gradle @@ -35,6 +35,7 @@ apply plugin: 'opensearch.test-with-dependencies' dependencies { testImplementation project(path: ':modules:transport-netty4') // for http + testImplementation project(path: ':plugins:transport-reactor-netty4') // for http testImplementation project(path: ':plugins:transport-nio') testImplementation project(path: ':plugins:identity-shiro') // for http } diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java index 42c7fd667fd8f..741660f972bfb 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java @@ -152,8 +152,8 @@ public void testDanglingIndicesCanBeImported() throws Exception { * 1, then create two indices and delete them both while one node in * the cluster is stopped. The deletion of the second pushes the deletion * of the first out of the graveyard. When the stopped node is resumed, - * only the second index will be found into the graveyard and the the - * other will be considered dangling, and can therefore be listed and + * only the second index will be found into the graveyard and the other + * will be considered dangling, and can therefore be listed and * deleted through the API */ public void testDanglingIndicesCanBeDeleted() throws Exception { diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java index 08974b902c418..6d8e80a0a63ea 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java @@ -38,6 +38,7 @@ import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.nio.MockNioTransportPlugin; import org.opensearch.transport.nio.NioTransportPlugin; +import org.opensearch.transport.reactor.ReactorNetty4Plugin; import org.junit.BeforeClass; import java.util.Arrays; @@ -53,7 +54,7 @@ public abstract class HttpSmokeTestCase extends OpenSearchIntegTestCase { @BeforeClass public static void setUpTransport() { nodeTransportTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class)); - nodeHttpTypeKey = getHttpTypeKey(randomFrom(Netty4ModulePlugin.class, NioTransportPlugin.class)); + nodeHttpTypeKey = getHttpTypeKey(randomFrom(Netty4ModulePlugin.class, NioTransportPlugin.class, ReactorNetty4Plugin.class)); clientTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class)); } @@ -71,6 +72,8 @@ private static String getTypeKey(Class clazz) { private static String getHttpTypeKey(Class clazz) { if (clazz.equals(NioTransportPlugin.class)) { return NioTransportPlugin.NIO_HTTP_TRANSPORT_NAME; + } else if (clazz.equals(ReactorNetty4Plugin.class)) { + return ReactorNetty4Plugin.REACTOR_NETTY_HTTP_TRANSPORT_NAME; } else { assert clazz.equals(Netty4ModulePlugin.class); return Netty4ModulePlugin.NETTY_HTTP_TRANSPORT_NAME; @@ -92,7 +95,7 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Collection> nodePlugins() { - return Arrays.asList(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class); + return Arrays.asList(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class, ReactorNetty4Plugin.class); } @Override diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java index 78398e10b9ce8..1a806b033eb8a 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java @@ -26,6 +26,8 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.nio.NioTransportPlugin; +import org.opensearch.transport.reactor.ReactorNetty4Plugin; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.StringContains.containsString; @@ -42,7 +44,7 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Collection> nodePlugins() { - return Arrays.asList(OpenSearchTestCase.getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class, ShiroIdentityPlugin.class); + return Arrays.asList(OpenSearchTestCase.getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class, ReactorNetty4Plugin.class, ShiroIdentityPlugin.class); } diff --git a/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml b/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml index a08090100989a..4fabd038cf915 100644 --- a/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml +++ b/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml @@ -3,5 +3,8 @@ + + + diff --git a/release-notes/opensearch.release-notes-1.3.18.md b/release-notes/opensearch.release-notes-1.3.18.md new file mode 100644 index 0000000000000..75c38dd285a63 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.18.md @@ -0,0 +1,4 @@ +## 2024-07-09 Version 1.3.18 Release Notes + +### Upgrades +- Bump `netty` from 4.1.110.Final to 4.1.111.Final ([#14356](https://github.com/opensearch-project/OpenSearch/pull/14356)) diff --git a/release-notes/opensearch.release-notes-2.16.0.md b/release-notes/opensearch.release-notes-2.16.0.md new file mode 100644 index 0000000000000..193aa6b53714c --- /dev/null +++ b/release-notes/opensearch.release-notes-2.16.0.md @@ -0,0 +1,92 @@ +## 2024-07-24 Version 2.16.0 Release Notes + +## [2.16.0] +### Added +- Add fingerprint ingest processor ([#13724](https://github.com/opensearch-project/OpenSearch/pull/13724)) +- [Remote Store] Rate limiter for remote store low priority uploads ([#14374](https://github.com/opensearch-project/OpenSearch/pull/14374/)) +- Apply the date histogram rewrite optimization to range aggregation ([#13865](https://github.com/opensearch-project/OpenSearch/pull/13865)) +- [Writable Warm] Add composite directory implementation and integrate it with FileCache ([12782](https://github.com/opensearch-project/OpenSearch/pull/12782)) +- [Workload Management] Add QueryGroup schema ([13669](https://github.com/opensearch-project/OpenSearch/pull/13669)) +- Add batching supported processor base type AbstractBatchingProcessor ([#14554](https://github.com/opensearch-project/OpenSearch/pull/14554)) +- Fix race condition while parsing derived fields from search definition ([14445](https://github.com/opensearch-project/OpenSearch/pull/14445)) +- Add `strict_allow_templates` dynamic mapping option ([#14555](https://github.com/opensearch-project/OpenSearch/pull/14555)) +- Add allowlist setting for ingest-common and search-pipeline-common processors ([#14439](https://github.com/opensearch-project/OpenSearch/issues/14439)) +- [Workload Management] add queryGroupId header propagator across requests and nodes ([#14614](https://github.com/opensearch-project/OpenSearch/pull/14614)) +- Create SystemIndexRegistry with helper method matchesSystemIndex ([#14415](https://github.com/opensearch-project/OpenSearch/pull/14415)) +- Print reason why parent task was cancelled ([#14604](https://github.com/opensearch-project/OpenSearch/issues/14604)) +- Add matchesPluginSystemIndexPattern to SystemIndexRegistry ([#14750](https://github.com/opensearch-project/OpenSearch/pull/14750)) +- Add Plugin interface for loading application based configuration templates (([#14659](https://github.com/opensearch-project/OpenSearch/issues/14659))) +- Refactor remote-routing-table service inline with remote state interfaces([#14668](https://github.com/opensearch-project/OpenSearch/pull/14668)) +- Add shard-diff path to diff manifest to reduce number of read calls remote store (([#14684](https://github.com/opensearch-project/OpenSearch/pull/14684))) +- Add SortResponseProcessor to Search Pipelines (([#14785](https://github.com/opensearch-project/OpenSearch/issues/14785))) +- Add prefix mode verification setting for repository verification (([#14790](https://github.com/opensearch-project/OpenSearch/pull/14790))) +- Add SplitResponseProcessor to Search Pipelines (([#14800](https://github.com/opensearch-project/OpenSearch/issues/14800))) +- Optimize TransportNodesAction to not send DiscoveryNodes for NodeStats, NodesInfo and ClusterStats call ([14749](https://github.com/opensearch-project/OpenSearch/pull/14749)) +- Reduce logging in DEBUG for MasterService:run ([#14795](https://github.com/opensearch-project/OpenSearch/pull/14795)) +- Refactor remote-routing-table service inline with remote state interfaces([#14668](https://github.com/opensearch-project/OpenSearch/pull/14668)) +- Add rest, transport layer changes for hot to warm tiering - dedicated setup (([#13980](https://github.com/opensearch-project/OpenSearch/pull/13980)) +- Enabling term version check on local state for all ClusterManager Read Transport Actions ([#14273](https://github.com/opensearch-project/OpenSearch/pull/14273)) +- Optimize Cluster Stats Indices to precomute node level stats ([#14426](https://github.com/opensearch-project/OpenSearch/pull/14426)) +- Create listener to refresh search thread resource usage ([#14832](https://github.com/opensearch-project/OpenSearch/pull/14832)) +- Add logic to create index templates (v2) using context field ([#14811](https://github.com/opensearch-project/OpenSearch/pull/14811)) + +### Dependencies +- Update to Apache Lucene 9.11.1 ([#14042](https://github.com/opensearch-project/OpenSearch/pull/14042), [#14576](https://github.com/opensearch-project/OpenSearch/pull/14576)) +- Bump `netty` from 4.1.110.Final to 4.1.111.Final ([#14356](https://github.com/opensearch-project/OpenSearch/pull/14356)) +- Bump `org.wiremock:wiremock-standalone` from 3.3.1 to 3.6.0 ([#14361](https://github.com/opensearch-project/OpenSearch/pull/14361)) +- Bump `reactor` from 3.5.17 to 3.5.19 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395), [#14697](https://github.com/opensearch-project/OpenSearch/pull/14697)) +- Bump `reactor-netty` from 1.1.19 to 1.1.21 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395), [#14697](https://github.com/opensearch-project/OpenSearch/pull/14697)) +- Bump `commons-net:commons-net` from 3.10.0 to 3.11.1 ([#14396](https://github.com/opensearch-project/OpenSearch/pull/14396)) +- Bump `com.nimbusds:nimbus-jose-jwt` from 9.37.3 to 9.40 ([#14398](https://github.com/opensearch-project/OpenSearch/pull/14398)) +- Bump `org.apache.commons:commons-configuration2` from 2.10.1 to 2.11.0 ([#14399](https://github.com/opensearch-project/OpenSearch/pull/14399)) +- Bump `com.gradle.develocity` from 3.17.4 to 3.17.5 ([#14397](https://github.com/opensearch-project/OpenSearch/pull/14397)) +- Bump `opentelemetry` from 1.36.0 to 1.40.0 ([#14457](https://github.com/opensearch-project/OpenSearch/pull/14457), [#14674](https://github.com/opensearch-project/OpenSearch/pull/14674)) +- Bump `opentelemetry-semconv` from 1.25.0-alpha to 1.26.0-alpha ([#14674](https://github.com/opensearch-project/OpenSearch/pull/14674)) +- Bump `azure-identity` from 1.11.4 to 1.13.0, Bump `msal4j` from 1.14.3 to 1.15.1, Bump `msal4j-persistence-extension` from 1.2.0 to 1.3.0 ([#14506](https://github.com/opensearch-project/OpenSearch/pull/14673)) +- Bump `com.azure:azure-storage-common` from 12.21.2 to 12.25.1 ([#14517](https://github.com/opensearch-project/OpenSearch/pull/14517)) +- Bump `com.microsoft.azure:msal4j` from 1.15.1 to 1.16.0 ([#14610](https://github.com/opensearch-project/OpenSearch/pull/14610)) +- Bump `com.github.spullara.mustache.java:compiler` from 0.9.13 to 0.9.14 ([#14672](https://github.com/opensearch-project/OpenSearch/pull/14672)) +- Bump `net.minidev:accessors-smart` from 2.5.0 to 2.5.1 ([#14673](https://github.com/opensearch-project/OpenSearch/pull/14673)) +- Bump `jackson` from 2.17.1 to 2.17.2 ([#14687](https://github.com/opensearch-project/OpenSearch/pull/14687)) +- Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#14748](https://github.com/opensearch-project/OpenSearch/pull/14748)) + +### Changed +- [Tiered Caching] Move query recomputation logic outside write lock ([#14187](https://github.com/opensearch-project/OpenSearch/pull/14187)) +- unsignedLongRangeQuery now returns MatchNoDocsQuery if the lower bounds are greater than the upper bounds ([#14416](https://github.com/opensearch-project/OpenSearch/pull/14416)) +- Make the class CommunityIdProcessor final ([#14448](https://github.com/opensearch-project/OpenSearch/pull/14448)) +- Updated the `indices.query.bool.max_clause_count` setting from being static to dynamically updateable ([#13568](https://github.com/opensearch-project/OpenSearch/pull/13568)) +- Allow @InternalApi annotation on classes not meant to be constructed outside of the OpenSearch core ([#14575](https://github.com/opensearch-project/OpenSearch/pull/14575)) +- Add @InternalApi annotation to japicmp exclusions ([#14597](https://github.com/opensearch-project/OpenSearch/pull/14597)) +- Allow system index warning in OpenSearchRestTestCase.refreshAllIndices ([#14635](https://github.com/opensearch-project/OpenSearch/pull/14635)) +- Make reroute iteration time-bound for large shard allocations ([#14848](https://github.com/opensearch-project/OpenSearch/pull/14848)) + +### Deprecated +- Deprecate batch_size parameter on bulk API ([#14725](https://github.com/opensearch-project/OpenSearch/pull/14725)) + +### Removed +- Remove query categorization changes ([#14759](https://github.com/opensearch-project/OpenSearch/pull/14759)) + +### Fixed +- Fix bug in SBP cancellation logic ([#13259](https://github.com/opensearch-project/OpenSearch/pull/13474)) +- Fix handling of Short and Byte data types in ScriptProcessor ingest pipeline ([#14379](https://github.com/opensearch-project/OpenSearch/issues/14379)) +- Switch to iterative version of WKT format parser ([#14086](https://github.com/opensearch-project/OpenSearch/pull/14086)) +- Fix match_phrase_prefix_query not working on text field with multiple values and index_prefixes ([#10959](https://github.com/opensearch-project/OpenSearch/pull/10959)) +- Fix the computed max shards of cluster to avoid int overflow ([#14155](https://github.com/opensearch-project/OpenSearch/pull/14155)) +- Fixed rest-high-level client searchTemplate & mtermVectors endpoints to have a leading slash ([#14465](https://github.com/opensearch-project/OpenSearch/pull/14465)) +- Write shard level metadata blob when snapshotting searchable snapshot indexes ([#13190](https://github.com/opensearch-project/OpenSearch/pull/13190)) +- Fix aggs result of NestedAggregator with sub NestedAggregator ([#13324](https://github.com/opensearch-project/OpenSearch/pull/13324)) +- Fix fs info reporting negative available size ([#11573](https://github.com/opensearch-project/OpenSearch/pull/11573)) +- Add ListPitInfo::getKeepAlive() getter ([#14495](https://github.com/opensearch-project/OpenSearch/pull/14495)) +- Fix FuzzyQuery in keyword field will use IndexOrDocValuesQuery when both of index and doc_value are true ([#14378](https://github.com/opensearch-project/OpenSearch/pull/14378)) +- Fix file cache initialization ([#14004](https://github.com/opensearch-project/OpenSearch/pull/14004)) +- Handle NPE in GetResult if "found" field is missing ([#14552](https://github.com/opensearch-project/OpenSearch/pull/14552)) +- Fix create or update alias API doesn't throw exception for unsupported parameters ([#14719](https://github.com/opensearch-project/OpenSearch/pull/14719)) +- Refactoring FilterPath.parse by using an iterative approach ([#14200](https://github.com/opensearch-project/OpenSearch/pull/14200)) +- Refactoring Grok.validatePatternBank by using an iterative approach ([#14206](https://github.com/opensearch-project/OpenSearch/pull/14206)) +- Fix NPE when creating index with index.number_of_replicas set to null ([#14812](https://github.com/opensearch-project/OpenSearch/pull/14812)) +- Update help output for _cat ([#14722](https://github.com/opensearch-project/OpenSearch/pull/14722)) +- Fix bulk upsert ignores the default_pipeline and final_pipeline when auto-created index matches the index template ([#12891](https://github.com/opensearch-project/OpenSearch/pull/12891)) +- Fix NPE in ReplicaShardAllocator ([#14385](https://github.com/opensearch-project/OpenSearch/pull/14385)) +- Use circuit breaker in InternalHistogram when adding empty buckets ([#14754](https://github.com/opensearch-project/OpenSearch/pull/14754)) +- Create new IndexInput for multi part upload ([#14888](https://github.com/opensearch-project/OpenSearch/pull/14888)) +- Fix searchable snapshot failure with scripted fields ([#14411](https://github.com/opensearch-project/OpenSearch/pull/14411)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json index c3ccd25da9f86..14427b00f1bb3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json @@ -1,7 +1,7 @@ { "indices.put_alias":{ "documentation":{ - "url":"https://opensearch.org/docs/latest/api-reference/index-apis/alias/", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/update-alias/", "description":"Creates or updates an alias." }, "stability":"stable", @@ -40,6 +40,62 @@ "description":"The name of the alias to be created or updated" } } + }, + { + "path":"/{index}/_alias", + "methods":[ + "PUT" + ], + "parts":{ + "index":{ + "type":"list", + "description":"A comma-separated list of index names the alias should point to (supports wildcards); use `_all` to perform the operation on all indices." + } + } + }, + { + "path":"/{index}/_aliases", + "methods":[ + "PUT" + ], + "parts":{ + "index":{ + "type":"list", + "description":"A comma-separated list of index names the alias should point to (supports wildcards); use `_all` to perform the operation on all indices." + } + } + }, + { + "path":"/_alias/{name}", + "methods":[ + "PUT", + "POST" + ], + "parts":{ + "name":{ + "type":"string", + "description":"The name of the alias to be created or updated" + } + } + }, + { + "path":"/_aliases/{name}", + "methods":[ + "PUT", + "POST" + ], + "parts":{ + "name":{ + "type":"string", + "description":"The name of the alias to be created or updated" + } + } + }, + { + "path":"/_alias", + "methods":[ + "PUT" + ] } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_strict_allow_templates.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_strict_allow_templates.yml new file mode 100644 index 0000000000000..623cb97c37728 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_strict_allow_templates.yml @@ -0,0 +1,155 @@ +--- +"Index documents with setting dynamic parameter to strict_allow_templates in the mapping of the index": + - skip: + version: " - 2.15.99" + reason: "introduced in 2.16.0" + + - do: + indices.create: + index: test_1 + body: + mappings: + dynamic: strict_allow_templates + dynamic_templates: [ + { + strings: { + "match": "stringField*", + "match_mapping_type": "string", + "mapping": { + "type": "keyword" + } + } + }, + { + object: { + "match": "objectField*", + "match_mapping_type": "object", + "mapping": { + "type": "object", + "properties": { + "bar1": { + "type": "keyword" + }, + "bar2": { + "type": "text" + } + } + } + } + }, + { + boolean: { + "match": "booleanField*", + "match_mapping_type": "boolean", + "mapping": { + "type": "boolean" + } + } + }, + { + double: { + "match": "doubleField*", + "match_mapping_type": "double", + "mapping": { + "type": "double" + } + } + }, + { + long: { + "match": "longField*", + "match_mapping_type": "long", + "mapping": { + "type": "long" + } + } + }, + { + array: { + "match": "arrayField*", + "mapping": { + "type": "keyword" + } + } + }, + { + date: { + "match": "dateField*", + "match_mapping_type": "date", + "mapping": { + "type": "date" + } + } + } + ] + properties: + test1: + type: text + + - do: + catch: /mapping set to strict_allow_templates, dynamic introduction of \[test2\] within \[\_doc\] is not allowed/ + index: + index: test_1 + id: 1 + body: { + stringField: bar, + objectField: { + bar1: "bar1", + bar2: "bar2" + }, + test1: test1, + test2: test2 + } + + - do: + index: + index: test_1 + id: 1 + body: { + stringField: bar, + objectField: { + bar1: "bar1", + bar2: "bar2" + }, + booleanField: true, + doubleField: 1.0, + longField: 100, + arrayField: ["1","2"], + dateField: "2024-06-25T05:11:51.243Z", + test1: test1 + } + + - do: + get: + index: test_1 + id: 1 + - match: { _source: { + stringField: bar, + objectField: { + bar1: "bar1", + bar2: "bar2" + }, + booleanField: true, + doubleField: 1.0, + longField: 100, + arrayField: [ "1","2" ], + dateField: "2024-06-25T05:11:51.243Z", + test1: test1 + } + } + + - do: + indices.get_mapping: { + index: test_1 + } + + - match: {test_1.mappings.dynamic: strict_allow_templates} + - match: {test_1.mappings.properties.stringField.type: keyword} + - match: {test_1.mappings.properties.objectField.properties.bar1.type: keyword} + - match: {test_1.mappings.properties.objectField.properties.bar2.type: text} + - match: {test_1.mappings.properties.booleanField.type: boolean} + - match: {test_1.mappings.properties.doubleField.type: double} + - match: {test_1.mappings.properties.longField.type: long} + - match: {test_1.mappings.properties.arrayField.type: keyword} + - match: {test_1.mappings.properties.dateField.type: date} + - match: {test_1.mappings.properties.test1.type: text} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/115_constant_keyword.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/115_constant_keyword.yml new file mode 100644 index 0000000000000..e60981dbbf50c --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/115_constant_keyword.yml @@ -0,0 +1,332 @@ +# The test setup includes two parts: +# part1: test mapping and indexing +# part2: test query +--- +"Mappings and Indexing": + - skip: + version: " - 2.15.99" + reason: "fixed in 2.16.0" + + # Create indices with constant_keyword field type + - do: + indices.create: + index: test + body: + mappings: + properties: + genre: + type: "constant_keyword" + value: "1" + + # Index documents to test integer and string are both ok. + - do: + index: + index: test + id: 1 + body: { + "genre": "1" + } + + - do: + index: + index: test + id: 2 + body: { + "genre": 1 + } + + # Refresh + - do: + indices.refresh: + index: test + + # Check mapping + - do: + indices.get_mapping: + index: test + - is_true: test.mappings + - match: { test.mappings.properties.genre.type: constant_keyword } + - length: { test.mappings.properties.genre: 2 } + + # Verify Document Count + - do: + search: + index: test + body: { + query: { + match_all: {} + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.genre: "1" } + - match: { hits.hits.1._source.genre: 1 } + + # Delete Index when connection is teardown + - do: + indices.delete: + index: test + +--- +"Queries": + - skip: + version: " - 2.16.99" + reason: "rangeQuery and regexpQuery are introduced in 2.17.0" + + - do: + indices.create: + index: test1 + body: + mappings: + properties: + genre: + type: "constant_keyword" + value: "d3efault" + + # Index documents to test query. + - do: + index: + index: test1 + id: 1 + body: { + "genre": "d3efault" + } + + # Refresh + - do: + indices.refresh: + index: test1 + + # Test rangeQuery + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + gte: "d3efault" + } + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efault", + "include_lower": "false" + } + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + lte: "d3efault" + } + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + to: "d3efault", + include_upper: "false" + } + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efault", + to: "d3efault", + include_lower: "false", + include_upper: "true" + } + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efault", + to: "d3efault", + include_lower: "true", + include_upper: "false" + } + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: null, + to: null + } + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efault", + to: "d3efault", + include_lower: "true", + include_upper: "true" + } + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efaul", + to: "d3efault1", + include_lower: "true", + include_upper: "true" + } + } + } + } + + - length: { hits.hits: 1 } + + # Test regexpQuery + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d.*" + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d\\defau[a-z]?t" + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d\\defa[a-z]?t" + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d3efa[a-z]{3,3}" + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d3efa[a-z]{4,4}" + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + match_all: {} + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.genre: "d3efault" } + + # Delete Index when connection is teardown + - do: + indices.delete: + index: test1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/120_field_name.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/120_field_name.yml new file mode 100644 index 0000000000000..040e883b4a4c2 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/120_field_name.yml @@ -0,0 +1,38 @@ +--- +"Index documents with field name containing only dot fail with an IllegalArgumentException": + - skip: + version: " - 2.16.99" + reason: "introduced in 2.17.0" + + - do: + indices.create: + index: test_1 + + - do: + catch: /field name cannot contain only the character \[.\]/ + index: + index: test_1 + id: 1 + body: { + .: bar + } + + - do: + catch: /field name cannot contain only the character \[.\]/ + index: + index: test_1 + id: 1 + body: { + ..: bar + } + + - do: + catch: /field name cannot contain only the character \[.\]/ + index: + index: test_1 + id: 1 + body: { + foo: { + .: bar + } + } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml new file mode 100644 index 0000000000000..6239eb7b8cd22 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml @@ -0,0 +1,58 @@ +setup: + - skip: + features: allowed_warnings + - do: + allowed_warnings: + - "index template [test_template_1] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test_template_1] will take precedence during new index creation" + indices.put_index_template: + name: test_template_1 + body: + index_patterns: test-* + template: + settings: + number_of_shards: 1 + number_of_replicas: 0 + "priority": 50 + + - do: + allowed_warnings: + - "index template [test_template_2] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test_template_2] will take precedence during new index creation" + indices.put_index_template: + name: test_template_2 + body: + index_patterns: test-* + data_stream: {} + template: + settings: + number_of_shards: 1 + number_of_replicas: 0 + "priority": 51 + +--- +teardown: + - do: + indices.delete_data_stream: + name: test-1 + ignore: 404 + - do: + indices.delete_index_template: + name: test_template_1 + ignore: 404 + - do: + indices.delete_index_template: + name: test_template_2 + ignore: 404 + +--- +"Delete index template which is not used by data stream but index pattern matches": + - skip: + version: " - 2.16.99" + reason: "fixed in 2.17.0" + + - do: + indices.create_data_stream: + name: test-1 + + - do: + indices.delete_index_template: + name: test_template_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml index 77338a6ddae0b..41f87c1df28ed 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml @@ -28,6 +28,36 @@ - match: {test_index.aliases.test_alias: {}} + - do: + indices.put_alias: + index: test_index + body: {"alias": "test_alias_1"} + + - do: + indices.get_alias: + index: test_index + name: test_alias_1 + + - match: {test_index.aliases.test_alias_1: {}} + + - do: + indices.put_alias: + name: test_alias_2 + body: {"index": "test_index"} + + - do: + indices.get_alias: + index: test_index + name: test_alias_2 + + - match: {test_index.aliases.test_alias_2: {}} + + - do: + catch: bad_request + indices.put_alias: + index: null + name: null + --- "Can't create alias with invalid characters": @@ -102,3 +132,179 @@ index: test_index name: test_alias - match: {test_index.aliases.test_alias: {"filter": {"range": {"date_nanos_field": {"gt": "now-7d/d"}}}}} + +--- +"Can set index_routing": + - do: + indices.create: + index: test_index + + - do: + indices.put_alias: + index: test_index + name: test_alias + body: + index_routing: "test" + + - do: + indices.get_alias: + index: test_index + name: test_alias + - match: {test_index.aliases.test_alias: { 'index_routing': "test" }} + +--- +"Can set routing": + - do: + indices.create: + index: test_index + + - do: + indices.put_alias: + index: test_index + name: test_alias + body: + routing: "test" + + - do: + indices.get_alias: + index: test_index + name: test_alias + - match: {test_index.aliases.test_alias: { 'index_routing': "test", 'search_routing': "test" }} + +--- +"Can set search_routing": + - do: + indices.create: + index: test_index + + - do: + indices.put_alias: + index: test_index + name: test_alias + body: + search_routing: "test" + + - do: + indices.get_alias: + index: test_index + name: test_alias + - match: {test_index.aliases.test_alias: { 'search_routing': "test" }} + +--- +"Index parameter supports multiple values": + - do: + indices.create: + index: test_index + - do: + indices.create: + index: test_index1 + + - do: + indices.put_alias: + index: test_index,test_index1 + name: test_alias + + - do: + indices.get_alias: + index: test_index + name: test_alias + - match: {test_index.aliases.test_alias: { }} + - do: + indices.get_alias: + index: test_index1 + name: test_alias + - match: {test_index1.aliases.test_alias: { }} + + - do: + indices.put_alias: + body: {"index": "test_index,test_index1", "alias": "test_alias_1"} + + - do: + indices.get_alias: + index: test_index + name: test_alias_1 + - match: {test_index.aliases.test_alias_1: { }} + - do: + indices.get_alias: + index: test_index1 + name: test_alias_1 + - match: {test_index1.aliases.test_alias_1: { }} + +--- +"Index and alias in request body can override path parameters": + - do: + indices.create: + index: test_index + + - do: + indices.put_alias: + index: test_index_unknown + name: test_alias + body: {"index": "test_index"} + + - do: + indices.get_alias: + index: test_index + name: test_alias + - match: {test_index.aliases.test_alias: { }} + + - do: + indices.put_alias: + index: test_index + name: test_alias_unknown + body: {"alias": "test_alias_2"} + + - do: + indices.get_alias: + index: test_index + name: test_alias_2 + - match: {test_index.aliases.test_alias_2: { }} + + - do: + indices.put_alias: + body: {"index": "test_index", "alias": "test_alias_3"} + + - do: + indices.get_alias: + index: test_index + name: test_alias_3 + - match: {test_index.aliases.test_alias_3: { }} + +--- +"Can set is_hidden": + - skip: + version: " - 2.15.99" + reason: "Fix was introduced in 2.16.0" + - do: + indices.create: + index: test_index + + - do: + indices.put_alias: + index: test_index + name: test_alias + body: + is_hidden: true + + - do: + indices.get_alias: + index: test_index + name: test_alias + - match: {test_index.aliases.test_alias: { 'is_hidden': true }} + +--- +"Throws exception with invalid parameters": + - skip: + version: " - 2.15.99" + reason: "Fix was introduced in 2.16.0" + + - do: + indices.create: + index: test_index + + - do: + catch: /unknown field \[abc\]/ + indices.put_alias: + index: test_index + name: test_alias + body: {"abc": 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml index ca7a21df20ea4..89b47fde2a72c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml @@ -159,3 +159,34 @@ setup: indices.get_mapping: {} - match: {test_index1.mappings.properties.text.type: text} + +--- +"post a mapping with setting dynamic to strict_allow_templates": + - skip: + version: " - 2.15.99" + reason: "introduced in 2.16.0" + - do: + indices.put_mapping: + index: test_index1 + body: + dynamic: strict_allow_templates + dynamic_templates: [ + { + strings: { + "match": "foo*", + "match_mapping_type": "string", + "mapping": { + "type": "keyword" + } + } + } + ] + properties: + test1: + type: text + + - do: + indices.get_mapping: {} + + - match: {test_index1.mappings.dynamic: strict_allow_templates} + - match: {test_index1.mappings.properties.test1.type: text} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/360_date_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/360_date_histogram.yml index 0ea9d3de00926..8c8a98b2db22c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/360_date_histogram.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/360_date_histogram.yml @@ -61,3 +61,94 @@ setup: - match: { aggregations.histo.buckets.8.doc_count: 1 } - match: { aggregations.histo.buckets.12.key_as_string: "2016-06-01T00:00:00.000Z" } - match: { aggregations.histo.buckets.12.doc_count: 1 } + +--- +"Date histogram aggregation w/ filter query test": + - skip: + version: " - 2.99.99" + reason: Backport fix to 2.16 + + - do: + bulk: + refresh: true + index: dhisto-agg-w-query + body: + - '{"index": {}}' + - '{"routing": "route1", "date": "2024-08-12", "dow": "monday"}' + - '{"index": {}}' + - '{"routing": "route1", "date": "2024-08-14", "dow": "wednesday"}' + - '{"index": {}}' + - '{"routing": "route1", "date": "2024-08-19", "dow": "monday"}' + - '{"index": {}}' + - '{"routing": "route2", "date": "2024-08-13", "dow": "tuesday"}' + - '{"index": {}}' + - '{"routing": "route2", "date": "2024-08-15", "dow": "thursday"}' + + - do: + search: + index: dhisto-agg-w-query + body: + query: + bool: + must: + match_all: {} + filter: + - terms: + routing: + - "route1" + aggregations: + weekHisto: + date_histogram: + field: date + calendar_interval: week + _source: false + + - match: { hits.total.value: 3 } + - match: { aggregations.weekHisto.buckets.0.doc_count: 2 } + - match: { aggregations.weekHisto.buckets.1.doc_count: 1 } + +--- +"Date histogram aggregation w/ shared field range test": + - do: + bulk: + refresh: true + index: dhisto-agg-w-query + body: + - '{"index": {}}' + - '{"date": "2024-10-31"}' + - '{"index": {}}' + - '{"date": "2024-11-11"}' + - '{"index": {}}' + - '{"date": "2024-11-28"}' + - '{"index": {}}' + - '{"date": "2024-12-25"}' + - '{"index": {}}' + - '{"date": "2025-01-01"}' + - '{"index": {}}' + - '{"date": "2025-02-14"}' + + - do: + search: + index: dhisto-agg-w-query + body: + profile: true + query: + range: + date: + gte: "2024-01-01" + lt: "2025-01-01" + aggregations: + monthHisto: + date_histogram: + field: date + calendar_interval: month + _source: false + + - match: { hits.total.value: 4 } + - match: { aggregations.monthHisto.buckets.0.doc_count: 1 } + - match: { aggregations.monthHisto.buckets.1.doc_count: 2 } + - match: { aggregations.monthHisto.buckets.2.doc_count: 1 } + - match: { profile.shards.0.aggregations.0.debug.optimized_segments: 1 } + - match: { profile.shards.0.aggregations.0.debug.unoptimized_segments: 0 } + - match: { profile.shards.0.aggregations.0.debug.leaf_visited: 0 } + - match: { profile.shards.0.aggregations.0.debug.inner_visited: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml index 80aad96ce1f6b..1e1d2b0706d6b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml @@ -673,3 +673,82 @@ setup: - match: { aggregations.my_range.buckets.3.from: 1.5 } - is_false: aggregations.my_range.buckets.3.to - match: { aggregations.my_range.buckets.3.doc_count: 2 } + +--- +"Filter query w/ aggregation test": + - skip: + version: " - 2.99.99" + reason: Backport fix to 2.16 + + - do: + bulk: + refresh: true + index: range-agg-w-query + body: + - '{"index": {}}' + - '{"routing": "route1", "v": -10, "date": "2024-10-29"}' + - '{"index": {}}' + - '{"routing": "route1", "v": -5, "date": "2024-10-30"}' + - '{"index": {}}' + - '{"routing": "route1", "v": 10, "date": "2024-10-31"}' + - '{"index": {}}' + - '{"routing": "route2", "v": 15, "date": "2024-11-01"}' + - '{"index": {}}' + - '{"routing": "route2", "v": 20, "date": "2024-11-02"}' + + - do: + search: + index: range-agg-w-query + body: + query: + bool: + must: + match_all: {} + filter: + - terms: + routing: + - "route1" + aggregations: + NegPosAgg: + range: + field: v + keyed: true + ranges: + - to: 0 + key: "0" + - from: 0 + key: "1" + _source: false + + - match: { hits.total.value: 3 } + - match: { aggregations.NegPosAgg.buckets.0.doc_count: 2 } + - match: { aggregations.NegPosAgg.buckets.1.doc_count: 1 } + + - do: + search: + index: range-agg-w-query + body: + query: + bool: + must: + match_all: {} + filter: + - terms: + routing: + - "route1" + aggregations: + HalloweenAgg: + date_range: + field: date + format: "yyyy-MM-dd" + keyed: true + ranges: + - to: "2024-11-01" + key: "to-october" + - from: "2024-11-01" + key: "from-september" + _source: false + + - match: { hits.total.value: 3 } + - match: { aggregations.HalloweenAgg.buckets.to-october.doc_count: 3 } + - match: { aggregations.HalloweenAgg.buckets.from-september.doc_count: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml index 25d3dd160e031..6a946fb264560 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml @@ -10,7 +10,12 @@ setup: index_prefixes: min_chars: 2 max_chars: 5 - + text_with_pos_inc_gap: + type: text + position_increment_gap: 201 + index_prefixes: + min_chars: 2 + max_chars: 5 - do: index: index: test @@ -23,6 +28,18 @@ setup: id: 2 body: { text: sentence with UPPERCASE WORDS } + - do: + index: + index: test + id: 3 + body: { text: ["foo", "b-12"] } + + - do: + index: + index: test + id: 4 + body: { text_with_pos_inc_gap: ["foo", "b-12"] } + - do: indices.refresh: index: [test] @@ -116,3 +133,36 @@ setup: ] - match: {hits.total: 1} + +# related issue: https://github.com/opensearch-project/OpenSearch/issues/9203 +--- +"search index prefixes with multiple values": + - skip: + version: " - 2.15.99" + reason: "the bug was fixed since 2.16.0" + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + text: "b-12" + + - match: {hits.total: 1} + +--- +"search index prefixes with multiple values and custom position_increment_gap": + - skip: + version: " - 2.15.99" + reason: "the bug was fixed since 2.16.0" + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + text_with_pos_inc_gap: "b-12" + + - match: {hits.total: 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml new file mode 100644 index 0000000000000..d728070adb188 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml @@ -0,0 +1,184 @@ +--- +setup: + - skip: + version: " - 2.99.99" + reason: The bitmap filtering feature is available in 2.17 and later. + - do: + indices.create: + index: students + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + student_id: + type: integer + - do: + bulk: + refresh: true + body: + - { "index": { "_index": "students", "_id": "1" } } + - { "name": "Jane Doe", "student_id": 111 } + - { "index": { "_index": "students", "_id": "2" } } + - { "name": "Mary Major", "student_id": 222 } + - { "index": { "_index": "students", "_id": "3" } } + - { "name": "John Doe", "student_id": 333 } + - do: + indices.create: + index: classes + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + enrolled: + type: binary + store: true + - do: + bulk: + refresh: true + body: + - { "index": { "_index": "classes", "_id": "101" } } + - { "enrolled": "OjAAAAEAAAAAAAEAEAAAAG8A3gA=" } # 111,222 + - { "index": { "_index": "classes", "_id": "102" } } + - { "enrolled": "OjAAAAEAAAAAAAAAEAAAAG8A" } # 111 + - { "index": { "_index": "classes", "_id": "103" } } + - { "enrolled": "OjAAAAEAAAAAAAAAEAAAAE0B" } # 333 + - { "index": { "_index": "classes", "_id": "104" } } + - { "enrolled": "OjAAAAEAAAAAAAEAEAAAAN4ATQE=" } # 222,333 + - do: + cluster.health: + wait_for_status: green + +--- +"Terms lookup on a binary field with bitmap": + - do: + search: + rest_total_hits_as_int: true + index: students + body: { + "query": { + "terms": { + "student_id": { + "index": "classes", + "id": "101", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + } + } + - match: { hits.total: 2 } + - match: { hits.hits.0._source.name: Jane Doe } + - match: { hits.hits.0._source.student_id: 111 } + - match: { hits.hits.1._source.name: Mary Major } + - match: { hits.hits.1._source.student_id: 222 } + +--- +"Terms query accepting bitmap as value": + - do: + search: + rest_total_hits_as_int: true + index: students + body: { + "query": { + "terms": { + "student_id": ["OjAAAAEAAAAAAAEAEAAAAG8A3gA="], + "value_type": "bitmap" + } + } + } + - match: { hits.total: 2 } + - match: { hits.hits.0._source.name: Jane Doe } + - match: { hits.hits.0._source.student_id: 111 } + - match: { hits.hits.1._source.name: Mary Major } + - match: { hits.hits.1._source.student_id: 222 } + +--- +"Boolean must bitmap filtering": + - do: + search: + rest_total_hits_as_int: true + index: students + body: { + "query": { + "bool": { + "must": [ + { + "terms": { + "student_id": { + "index": "classes", + "id": "101", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + } + ], + "must_not": [ + { + "terms": { + "student_id": { + "index": "classes", + "id": "104", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + } + ] + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._source.name: Jane Doe } + - match: { hits.hits.0._source.student_id: 111 } + +--- +"Boolean should bitmap filtering": + - do: + search: + rest_total_hits_as_int: true + index: students + body: { + "query": { + "bool": { + "should": [ + { + "terms": { + "student_id": { + "index": "classes", + "id": "101", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + }, + { + "terms": { + "student_id": { + "index": "classes", + "id": "104", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + } + ] + } + } + } + - match: { hits.total: 3 } + - match: { hits.hits.0._source.name: Mary Major } + - match: { hits.hits.0._source.student_id: 222 } + - match: { hits.hits.1._source.name: Jane Doe } + - match: { hits.hits.1._source.student_id: 111 } + - match: { hits.hits.2._source.name: John Doe } + - match: { hits.hits.2._source.student_id: 333 } diff --git a/server/build.gradle b/server/build.gradle index b8a99facbf964..d655796674001 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -68,6 +68,7 @@ dependencies { api project(':libs:opensearch-x-content') api project(":libs:opensearch-geo") api project(":libs:opensearch-telemetry") + api project(":libs:opensearch-task-commons") compileOnly project(':libs:opensearch-plugin-classloader') @@ -125,6 +126,9 @@ dependencies { api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" + // https://mvnrepository.com/artifact/org.roaringbitmap/RoaringBitmap + implementation 'org.roaringbitmap:RoaringBitmap:1.1.0' + testImplementation(project(":test:framework")) { // tests use the locally compiled version of server exclude group: 'org.opensearch', module: 'server' @@ -409,6 +413,7 @@ tasks.register("japicmp", me.champeau.gradle.japicmp.JapicmpTask) { failOnModification = true ignoreMissingClasses = true annotationIncludes = ['@org.opensearch.common.annotation.PublicApi', '@org.opensearch.common.annotation.DeprecatedApi'] + annotationExcludes = ['@org.opensearch.common.annotation.InternalApi'] txtOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.txt") htmlOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.html") dependsOn downloadJapicmpCompareTarget diff --git a/server/licenses/RoaringBitmap-1.1.0.jar.sha1 b/server/licenses/RoaringBitmap-1.1.0.jar.sha1 new file mode 100644 index 0000000000000..bf34e11b92710 --- /dev/null +++ b/server/licenses/RoaringBitmap-1.1.0.jar.sha1 @@ -0,0 +1 @@ +9607213861158ae7060234d93ee9c9cb19f494d1 \ No newline at end of file diff --git a/server/licenses/RoaringBitmap-LICENSE.txt b/server/licenses/RoaringBitmap-LICENSE.txt new file mode 100644 index 0000000000000..3bdd0036295a6 --- /dev/null +++ b/server/licenses/RoaringBitmap-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2013-2016 the RoaringBitmap authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/server/licenses/RoaringBitmap-NOTICE.txt b/server/licenses/RoaringBitmap-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/server/licenses/jackson-core-2.17.1.jar.sha1 b/server/licenses/jackson-core-2.17.1.jar.sha1 deleted file mode 100644 index 82dab5981e652..0000000000000 --- a/server/licenses/jackson-core-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/server/licenses/jackson-core-2.17.2.jar.sha1 b/server/licenses/jackson-core-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..e15f2340980bc --- /dev/null +++ b/server/licenses/jackson-core-2.17.2.jar.sha1 @@ -0,0 +1 @@ +969a35cb35c86512acbadcdbbbfb044c877db814 \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 deleted file mode 100644 index ff42ed1f92cfe..0000000000000 --- a/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba5d8e6ecc62aa0e49c0ce935b8696352dbebc71 \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 b/server/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..069e088413ef1 --- /dev/null +++ b/server/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 @@ -0,0 +1 @@ +57fa7c1b5104bbc4599278d13933a937ee058e68 \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 deleted file mode 100644 index 47d19067cf2a6..0000000000000 --- a/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -89683ac4f0a0c2c4f69ea56b90480ed40266dac8 \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 b/server/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..28d8c8382aed3 --- /dev/null +++ b/server/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 @@ -0,0 +1 @@ +20e956b9b6f67138edd39fab7a506ded19638bcb \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 deleted file mode 100644 index 7946e994c7104..0000000000000 --- a/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4c7b8a9ea3f398116a75c146b982b22afebc4ee \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 b/server/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f3e25b7eb253c --- /dev/null +++ b/server/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 @@ -0,0 +1 @@ +78d2c73dbec62044d7cf3b544b2e0d24a1a093b0 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-analysis-common-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..f1249066d10f2 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +7e282aab7388efc911348f1eacd90e661580dda7 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.12.0-snapshot-c896995.jar.sha1 b/server/licenses/lucene-analysis-common-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index 4b545e061c52f..0000000000000 --- a/server/licenses/lucene-analysis-common-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -73696492c6e59972974cd91e03ad9464e6b5bfcd \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-backward-codecs-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..ac50c5e110a72 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +69e59ba4bed4c58836d2727d72b7f0095d2dcb92 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.12.0-snapshot-c896995.jar.sha1 b/server/licenses/lucene-backward-codecs-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index ae4ffb2b1800b..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cbb29ecc873e8c880a6f32e739655551708dbcf \ No newline at end of file diff --git a/server/licenses/lucene-core-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-core-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..e3fd1708ea428 --- /dev/null +++ b/server/licenses/lucene-core-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +51ff4940eb1024184bbaa5dae39695d2392c5bab \ No newline at end of file diff --git a/server/licenses/lucene-core-9.12.0-snapshot-c896995.jar.sha1 b/server/licenses/lucene-core-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index 299283562fddc..0000000000000 --- a/server/licenses/lucene-core-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -826b328c37ea7f27c05d685db03bf8d2b00457ff \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-grouping-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..cc5bf5bfd8ec0 --- /dev/null +++ b/server/licenses/lucene-grouping-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +5847a7d47f13ecb7f039fb9adf6f3b8e4bddde77 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.12.0-snapshot-c896995.jar.sha1 b/server/licenses/lucene-grouping-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index b0268c98167d3..0000000000000 --- a/server/licenses/lucene-grouping-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3a7003dc83197523e830f058a3748dbea96cab7 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-highlighter-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..eb14059d2cd8c --- /dev/null +++ b/server/licenses/lucene-highlighter-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +7cc0a26777a479f06fbcfae7abc23e784e1a00dc \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.12.0-snapshot-c896995.jar.sha1 b/server/licenses/lucene-highlighter-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index d87927364b5a8..0000000000000 --- a/server/licenses/lucene-highlighter-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -00eb386915c3cffa9efcef2dc4c406f8a6776afe \ No newline at end of file diff --git a/server/licenses/lucene-join-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-join-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..b87170c39c78c --- /dev/null +++ b/server/licenses/lucene-join-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +9cd99401c826d910da3c2beab8e42f1af8be6ea4 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.12.0-snapshot-c896995.jar.sha1 b/server/licenses/lucene-join-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index 25a95546ab544..0000000000000 --- a/server/licenses/lucene-join-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bb1fc572da7d473bf39672fd8ac323b15a1ffff0 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-memory-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..de591dd659cb5 --- /dev/null +++ b/server/licenses/lucene-memory-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +cfee136ecbc3df7adc38b38e020dca5e61c22773 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.12.0-snapshot-c896995.jar.sha1 b/server/licenses/lucene-memory-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index a0b3fd812561c..0000000000000 --- a/server/licenses/lucene-memory-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -05ebfcef0435f4870859a19c93020e24398bb939 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-misc-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..1a999bb9c6686 --- /dev/null +++ b/server/licenses/lucene-misc-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +afbc5adf93d4eb1a1b109ad828d1968bf16ef292 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.12.0-snapshot-c896995.jar.sha1 b/server/licenses/lucene-misc-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index 1e2cc97c37257..0000000000000 --- a/server/licenses/lucene-misc-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d5747ed1be242b59aa36b0c32b0d3bd26b1d8fb8 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-queries-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..783a26551ae8c --- /dev/null +++ b/server/licenses/lucene-queries-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +16907c36f6adb8ba8f260e05738c66afb37c72d3 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.12.0-snapshot-c896995.jar.sha1 b/server/licenses/lucene-queries-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index 31d4fe2886fc1..0000000000000 --- a/server/licenses/lucene-queries-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fb6678d7fe035e55c545450682b67be49457ef1b \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-queryparser-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..b3e9e4de96174 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +72baa9bddcf2efb71ffb695f1e9f548699ec13a0 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.12.0-snapshot-c896995.jar.sha1 b/server/licenses/lucene-queryparser-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index 754e4ea20765f..0000000000000 --- a/server/licenses/lucene-queryparser-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a11d7f56a9e78dc8e61f85b9b54ad94d73583bb3 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-sandbox-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..2aefa435b1e9a --- /dev/null +++ b/server/licenses/lucene-sandbox-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +dd3c63066f583d90b563ebaa6fbe61c603403acb \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.12.0-snapshot-c896995.jar.sha1 b/server/licenses/lucene-sandbox-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index 08c2bc48ae85b..0000000000000 --- a/server/licenses/lucene-sandbox-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -75352855bcc052abfba821f878a27fd2b328fb1c \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-spatial-extras-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..d27112c6db6ab --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +69b99530e0b05251c12863bee6a9325cafd5fdaa \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.12.0-snapshot-c896995.jar.sha1 b/server/licenses/lucene-spatial-extras-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index 5e0b7196f48c2..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -299be103216d67ca092bef177642b275224e77a6 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-spatial3d-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..29423ac0ababd --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +a67d193b4b08790169db7cf005a2429991260287 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.12.0-snapshot-c896995.jar.sha1 b/server/licenses/lucene-spatial3d-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index c79b34adea5e2..0000000000000 --- a/server/licenses/lucene-spatial3d-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -29b4a76cd0bdabe0e067063831e661dedac6e503 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-suggest-9.12.0-snapshot-847316d.jar.sha1 new file mode 100644 index 0000000000000..6ce1f639ccbb7 --- /dev/null +++ b/server/licenses/lucene-suggest-9.12.0-snapshot-847316d.jar.sha1 @@ -0,0 +1 @@ +7a1625ae39071ccbfb3af11df5a74291758f4b47 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.12.0-snapshot-c896995.jar.sha1 b/server/licenses/lucene-suggest-9.12.0-snapshot-c896995.jar.sha1 deleted file mode 100644 index 8d5334f0c4619..0000000000000 --- a/server/licenses/lucene-suggest-9.12.0-snapshot-c896995.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -597edb659e9ea93398a816e6837da7d47ef53873 \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.18.jar.sha1 b/server/licenses/reactor-core-3.5.18.jar.sha1 deleted file mode 100644 index c503f768beafa..0000000000000 --- a/server/licenses/reactor-core-3.5.18.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a8157f7d66d71a407eb77ba12bce72a38c5b4da \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.20.jar.sha1 b/server/licenses/reactor-core-3.5.20.jar.sha1 new file mode 100644 index 0000000000000..0c80be89f66c8 --- /dev/null +++ b/server/licenses/reactor-core-3.5.20.jar.sha1 @@ -0,0 +1 @@ +1fc0f91e2b93778a974339d2c24363d7f34f90b4 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java index 84d833569edcb..927a79d4884ef 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java @@ -84,6 +84,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchTransportService; import org.opensearch.action.search.SearchType; +import org.opensearch.action.support.clustermanager.term.GetTermVersionAction; +import org.opensearch.action.support.clustermanager.term.GetTermVersionRequest; import org.opensearch.action.support.replication.TransportReplicationActionTests; import org.opensearch.action.termvectors.MultiTermVectorsAction; import org.opensearch.action.termvectors.MultiTermVectorsRequest; @@ -195,6 +197,7 @@ public void cleanUp() { } public void testGetFieldMappings() { + String getFieldMappingsShardAction = GetFieldMappingsAction.NAME + "[index][s]"; interceptTransportActions(getFieldMappingsShardAction); @@ -545,13 +548,14 @@ public void testDeleteIndex() { } public void testGetMappings() { - interceptTransportActions(GetMappingsAction.NAME); - + interceptTransportActions(GetTermVersionAction.NAME, GetMappingsAction.NAME); GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices(randomIndicesOrAliases()); internalCluster().coordOnlyNodeClient().admin().indices().getMappings(getMappingsRequest).actionGet(); clearInterceptedActions(); - assertSameIndices(getMappingsRequest, GetMappingsAction.NAME); + + assertActionInvocation(GetTermVersionAction.NAME, GetTermVersionRequest.class); + assertNoActionInvocation(GetMappingsAction.NAME); } public void testPutMapping() { @@ -565,8 +569,8 @@ public void testPutMapping() { } public void testGetSettings() { - interceptTransportActions(GetSettingsAction.NAME); + interceptTransportActions(GetSettingsAction.NAME); GetSettingsRequest getSettingsRequest = new GetSettingsRequest().indices(randomIndicesOrAliases()); internalCluster().coordOnlyNodeClient().admin().indices().getSettings(getSettingsRequest).actionGet(); @@ -662,6 +666,21 @@ private static void assertSameIndices(IndicesRequest originalRequest, boolean op } } + private static void assertActionInvocation(String action, Class requestClass) { + List requests = consumeTransportRequests(action); + assertFalse(requests.isEmpty()); + for (TransportRequest internalRequest : requests) { + assertTrue(internalRequest.getClass() == requestClass); + } + } + + private static void assertNoActionInvocation(String... actions) { + for (String action : actions) { + List requests = consumeTransportRequests(action); + assertTrue(requests.isEmpty()); + } + } + private static void assertIndicesSubset(List indices, String... actions) { // indices returned by each bulk shard request need to be a subset of the original indices for (String action : actions) { @@ -781,7 +800,6 @@ public List getTransportInterceptors( } private final Set actions = new HashSet<>(); - private final Map> requests = new HashMap<>(); @Override @@ -831,6 +849,7 @@ public void messageReceived(T request, TransportChannel channel, Task task) thro } } requestHandler.messageReceived(request, channel, task); + } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index bdb36b62ada21..d8a4bed4740bf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -327,7 +327,7 @@ public void testFailedToStartChildTaskAfterCancelled() throws Exception { mainAction.startSubTask(taskId, subRequest, future); TransportException te = expectThrows(TransportException.class, future::actionGet); assertThat(te.getCause(), instanceOf(TaskCancelledException.class)); - assertThat(te.getCause().getMessage(), equalTo("The parent task was cancelled, shouldn't start any child tasks")); + assertThat(te.getCause().getMessage(), equalTo("The parent task was cancelled, shouldn't start any child tasks, by user request")); allowEntireRequest(rootRequest); waitForRootTask(rootTaskFuture); ensureAllBansRemoved(); @@ -386,7 +386,7 @@ static void waitForRootTask(ActionFuture rootTask) { assertThat( cause.getMessage(), anyOf( - equalTo("The parent task was cancelled, shouldn't start any child tasks"), + equalTo("The parent task was cancelled, shouldn't start any child tasks, by user request"), containsString("Task cancelled before it started:"), equalTo("Task was cancelled while executing") ) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java index 085a32593063a..f23cdbb50b37a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -88,7 +88,11 @@ public void testNodeCounts() { Map expectedCounts = getExpectedCounts(1, 1, 1, 1, 1, 0, 0); int numNodes = randomIntBetween(1, 5); - ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse response = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertCounts(response.getNodesStats().getCounts(), total, expectedCounts); for (int i = 0; i < numNodes; i++) { @@ -153,7 +157,11 @@ public void testNodeCountsWithDeprecatedMasterRole() throws ExecutionException, Map expectedCounts = getExpectedCounts(0, 1, 1, 0, 0, 0, 0); Client client = client(); - ClusterStatsResponse response = client.admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse response = client.admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertCounts(response.getNodesStats().getCounts(), total, expectedCounts); Set expectedRoles = Set.of(DiscoveryNodeRole.MASTER_ROLE.roleName()); @@ -176,15 +184,60 @@ private void assertShardStats(ClusterStatsIndices.ShardStats stats, int indices, assertThat(stats.getReplication(), Matchers.equalTo(replicationFactor)); } - public void testIndicesShardStats() throws ExecutionException, InterruptedException { + public void testIndicesShardStatsWithoutNodeLevelAggregations() { + internalCluster().startNode(); + ensureGreen(); + ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(false).get(); + assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); + + prepareCreate("test1").setSettings(Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 1)).get(); + + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(false).get(); + assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW)); + assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(0L)); + assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(1)); + assertShardStats(response.getIndicesStats().getShards(), 1, 2, 2, 0.0); + + // add another node, replicas should get assigned + internalCluster().startNode(); + ensureGreen(); + index("test1", "type", "1", "f", "f"); + refresh(); // make the doc visible + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(false).get(); + assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); + assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1L)); + assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0); + + prepareCreate("test2").setSettings(Settings.builder().put("number_of_shards", 3).put("number_of_replicas", 0)).get(); + ensureGreen(); + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(false).get(); + assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); + assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(2)); + assertShardStats(response.getIndicesStats().getShards(), 2, 7, 5, 2.0 / 5); + + assertThat(response.getIndicesStats().getShards().getAvgIndexPrimaryShards(), Matchers.equalTo(2.5)); + assertThat(response.getIndicesStats().getShards().getMinIndexPrimaryShards(), Matchers.equalTo(2)); + assertThat(response.getIndicesStats().getShards().getMaxIndexPrimaryShards(), Matchers.equalTo(3)); + + assertThat(response.getIndicesStats().getShards().getAvgIndexShards(), Matchers.equalTo(3.5)); + assertThat(response.getIndicesStats().getShards().getMinIndexShards(), Matchers.equalTo(3)); + assertThat(response.getIndicesStats().getShards().getMaxIndexShards(), Matchers.equalTo(4)); + + assertThat(response.getIndicesStats().getShards().getAvgIndexReplication(), Matchers.equalTo(0.5)); + assertThat(response.getIndicesStats().getShards().getMinIndexReplication(), Matchers.equalTo(0.0)); + assertThat(response.getIndicesStats().getShards().getMaxIndexReplication(), Matchers.equalTo(1.0)); + + } + + public void testIndicesShardStatsWithNodeLevelAggregations() { internalCluster().startNode(); ensureGreen(); - ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(true).get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); prepareCreate("test1").setSettings(Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 1)).get(); - response = client().admin().cluster().prepareClusterStats().get(); + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(true).get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW)); assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(0L)); assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(1)); @@ -195,14 +248,14 @@ public void testIndicesShardStats() throws ExecutionException, InterruptedExcept ensureGreen(); index("test1", "type", "1", "f", "f"); refresh(); // make the doc visible - response = client().admin().cluster().prepareClusterStats().get(); + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(true).get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1L)); assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0); prepareCreate("test2").setSettings(Settings.builder().put("number_of_shards", 3).put("number_of_replicas", 0)).get(); ensureGreen(); - response = client().admin().cluster().prepareClusterStats().get(); + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(true).get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(2)); assertShardStats(response.getIndicesStats().getShards(), 2, 7, 5, 2.0 / 5); @@ -225,7 +278,11 @@ public void testValuesSmokeScreen() throws IOException, ExecutionException, Inte internalCluster().startNodes(randomIntBetween(1, 3)); index("test1", "type", "1", "f", "f"); - ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse response = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); String msg = response.toString(); assertThat(msg, response.getTimestamp(), Matchers.greaterThan(946681200000L)); // 1 Jan 2000 assertThat(msg, response.indicesStats.getStore().getSizeInBytes(), Matchers.greaterThan(0L)); @@ -265,13 +322,21 @@ public void testAllocatedProcessors() throws Exception { internalCluster().startNode(Settings.builder().put(OpenSearchExecutors.NODE_PROCESSORS_SETTING.getKey(), 7).build()); waitForNodes(1); - ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse response = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertThat(response.getNodesStats().getOs().getAllocatedProcessors(), equalTo(7)); } public void testClusterStatusWhenStateNotRecovered() throws Exception { internalCluster().startClusterManagerOnlyNode(Settings.builder().put("gateway.recover_after_nodes", 2).build()); - ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse response = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.RED)); if (randomBoolean()) { @@ -281,14 +346,18 @@ public void testClusterStatusWhenStateNotRecovered() throws Exception { } // wait for the cluster status to settle ensureGreen(); - response = client().admin().cluster().prepareClusterStats().get(); + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(randomBoolean()).get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); } public void testFieldTypes() { internalCluster().startNode(); ensureGreen(); - ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse response = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); assertTrue(response.getIndicesStats().getMappings().getFieldTypeStats().isEmpty()); @@ -301,7 +370,7 @@ public void testFieldTypes() { + "\"eggplant\":{\"type\":\"integer\"}}}}}" ) .get(); - response = client().admin().cluster().prepareClusterStats().get(); + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(randomBoolean()).get(); assertThat(response.getIndicesStats().getMappings().getFieldTypeStats().size(), equalTo(3)); Set stats = response.getIndicesStats().getMappings().getFieldTypeStats(); for (IndexFeatureStats stat : stats) { @@ -329,7 +398,11 @@ public void testNodeRolesWithMasterLegacySettings() throws ExecutionException, I Map expectedCounts = getExpectedCounts(0, 1, 1, 0, 1, 0, 0); Client client = client(); - ClusterStatsResponse clusterStatsResponse = client.admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse clusterStatsResponse = client.admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertCounts(clusterStatsResponse.getNodesStats().getCounts(), total, expectedCounts); Set expectedRoles = Set.of( @@ -359,7 +432,11 @@ public void testNodeRolesWithClusterManagerRole() throws ExecutionException, Int Map expectedCounts = getExpectedCounts(0, 1, 1, 0, 1, 0, 0); Client client = client(); - ClusterStatsResponse clusterStatsResponse = client.admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse clusterStatsResponse = client.admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertCounts(clusterStatsResponse.getNodesStats().getCounts(), total, expectedCounts); Set expectedRoles = Set.of( @@ -383,7 +460,11 @@ public void testNodeRolesWithSeedDataNodeLegacySettings() throws ExecutionExcept Map expectedRoleCounts = getExpectedCounts(1, 1, 1, 0, 1, 0, 0); Client client = client(); - ClusterStatsResponse clusterStatsResponse = client.admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse clusterStatsResponse = client.admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertCounts(clusterStatsResponse.getNodesStats().getCounts(), total, expectedRoleCounts); Set expectedRoles = Set.of( @@ -410,7 +491,11 @@ public void testNodeRolesWithDataNodeLegacySettings() throws ExecutionException, Map expectedRoleCounts = getExpectedCounts(1, 1, 1, 0, 1, 0, 0); Client client = client(); - ClusterStatsResponse clusterStatsResponse = client.admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse clusterStatsResponse = client.admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertCounts(clusterStatsResponse.getNodesStats().getCounts(), total, expectedRoleCounts); Set> expectedNodesRoles = Set.of( diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java index 1c182b05fa4a8..fbe713d9e22c4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java @@ -406,4 +406,28 @@ public void testIndexNameInResponse() { assertEquals("Should have index name in response", "foo", response.index()); } + public void testCreateIndexWithNullReplicaCountPickUpClusterReplica() { + int numReplicas = 3; + String indexName = "test-idx-1"; + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("cluster.default_number_of_replicas", numReplicas).build()) + .get() + ); + Settings settings = Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), (String) null) + .build(); + assertAcked(client().admin().indices().prepareCreate(indexName).setSettings(settings).get()); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, internalCluster().getClusterManagerName()); + for (IndexService indexService : indicesService) { + assertEquals(indexName, indexService.index().getName()); + assertEquals( + numReplicas, + (int) indexService.getIndexSettings().getSettings().getAsInt(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, null) + ); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java index acbd68fff6dd0..009f5111078de 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -79,7 +79,7 @@ protected boolean forbidPrivateIndexSettings() { @Before public void setup() { - asyncUploadMockFsRepo = true; + asyncUploadMockFsRepo = false; } public void testCreateCloneIndex() { @@ -153,6 +153,7 @@ public void testCreateCloneIndex() { } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/15056") public void testCreateCloneIndexLowPriorityRateLimit() { Version version = VersionUtils.randomIndexCompatibleVersion(random()); int numPrimaryShards = 1; @@ -280,7 +281,7 @@ public void testCreateCloneIndexFailure() throws ExecutionException, Interrupted throw new RuntimeException(e); } finally { setFailRate(REPOSITORY_NAME, 0); - ensureGreen(); + ensureGreen(TimeValue.timeValueSeconds(40)); // clean up client().admin() .cluster() diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java index dc3c8793a93f6..928c9e33e19cb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java @@ -46,6 +46,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -69,12 +70,15 @@ import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Arrays; import java.util.HashSet; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; import java.util.stream.IntStream; @@ -89,12 +93,32 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteSplitIndexIT extends RemoteStoreBaseIntegTestCase { + @Before + public void setup() { + asyncUploadMockFsRepo = false; + } @Override protected boolean forbidPrivateIndexSettings() { return false; } + @After + public void cleanUp() throws Exception { + // Delete is async. + assertAcked( + client().admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN).get() + ); + assertBusy(() -> { + try { + assertEquals(0, getFileCount(translogRepoPath)); + } catch (IOException e) { + fail(); + } + }, 30, TimeUnit.SECONDS); + super.teardown(); + } + public Settings indexSettings() { return Settings.builder() .put(super.indexSettings()) diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 6296608c64d37..bcf23a37c0010 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -57,6 +57,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.AllocationDecision; import org.opensearch.cluster.routing.allocation.ExistingShardsAllocator; import org.opensearch.cluster.service.ClusterService; @@ -769,7 +770,7 @@ public void testMessyElectionsStillMakeClusterGoGreen() throws Exception { ensureGreen("test"); } - public void testBatchModeEnabled() throws Exception { + public void testBatchModeEnabledWithoutTimeout() throws Exception { internalCluster().startClusterManagerOnlyNodes( 1, Settings.builder().put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), true).build() @@ -797,11 +798,86 @@ public void testBatchModeEnabled() throws Exception { ); assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); assertEquals(1, gatewayAllocator.getNumberOfStartedShardBatches()); - assertEquals(1, gatewayAllocator.getNumberOfStoreShardBatches()); + // Replica shard would be marked ineligible since there are no data nodes. + // It would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); - // Now start both data nodes and ensure batch mode is working - logger.info("--> restarting the stopped nodes"); + // Now start one data node + logger.info("--> restarting the first stopped node"); + internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(0)).put(node0DataPathSettings).build()); + ensureStableCluster(2); + ensureYellow("test"); + assertEquals(0, gatewayAllocator.getNumberOfStartedShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfInFlightFetches()); + + // calling reroute and asserting on reroute response + logger.info("--> calling reroute while cluster is yellow"); + clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + assertTrue(clusterRerouteResponse.isAcknowledged()); + + // Now start last data node and ensure batch mode is working and cluster goes green + logger.info("--> restarting the second stopped node"); + internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(1)).put(node1DataPathSettings).build()); + ensureStableCluster(3); + ensureGreen("test"); + assertEquals(0, gatewayAllocator.getNumberOfStartedShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfInFlightFetches()); + } + + public void testBatchModeEnabledWithSufficientTimeoutAndClusterGreen() throws Exception { + internalCluster().startClusterManagerOnlyNodes( + 1, + Settings.builder() + .put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), true) + .put(ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "20s") + .put(ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "20s") + .build() + ); + List dataOnlyNodes = internalCluster().startDataOnlyNodes(2); + createIndex( + "test", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build() + ); + ensureGreen("test"); + Settings node0DataPathSettings = internalCluster().dataPathSettings(dataOnlyNodes.get(0)); + Settings node1DataPathSettings = internalCluster().dataPathSettings(dataOnlyNodes.get(1)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataOnlyNodes.get(0))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataOnlyNodes.get(1))); + ensureRed("test"); + ensureStableCluster(1); + + logger.info("--> Now do a protective reroute"); + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + assertTrue(clusterRerouteResponse.isAcknowledged()); + + ShardsBatchGatewayAllocator gatewayAllocator = internalCluster().getInstance( + ShardsBatchGatewayAllocator.class, + internalCluster().getClusterManagerName() + ); + assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); + assertEquals(1, gatewayAllocator.getNumberOfStartedShardBatches()); + // Replica shard would be marked ineligible since there are no data nodes. + // It would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); + + // Now start one data nodes and ensure batch mode is working + logger.info("--> restarting the first stopped node"); internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(0)).put(node0DataPathSettings).build()); + ensureStableCluster(2); + ensureYellow("test"); + assertEquals(0, gatewayAllocator.getNumberOfStartedShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfInFlightFetches()); + + // calling reroute and asserting on reroute response + logger.info("--> calling reroute while cluster is yellow"); + clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + assertTrue(clusterRerouteResponse.isAcknowledged()); + + // Now start last data node and ensure batch mode is working and cluster goes green + logger.info("--> restarting the second stopped node"); internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(1)).put(node1DataPathSettings).build()); ensureStableCluster(3); ensureGreen("test"); @@ -810,6 +886,89 @@ public void testBatchModeEnabled() throws Exception { assertEquals(0, gatewayAllocator.getNumberOfInFlightFetches()); } + public void testBatchModeEnabledWithDisabledTimeoutAndClusterGreen() throws Exception { + + internalCluster().startClusterManagerOnlyNodes( + 1, + Settings.builder().put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), true).build() + ); + List dataOnlyNodes = internalCluster().startDataOnlyNodes(2); + createNIndices(50, "test"); // this will create 50p, 50r shards + ensureStableCluster(3); + IndicesStatsResponse indicesStats = dataNodeClient().admin().indices().prepareStats().get(); + assertThat(indicesStats.getSuccessfulShards(), equalTo(100)); + ClusterHealthResponse health = client().admin() + .cluster() + .health(Requests.clusterHealthRequest().waitForGreenStatus().timeout("1m")) + .actionGet(); + assertFalse(health.isTimedOut()); + assertEquals(GREEN, health.getStatus()); + + String clusterManagerName = internalCluster().getClusterManagerName(); + Settings clusterManagerDataPathSettings = internalCluster().dataPathSettings(clusterManagerName); + Settings node0DataPathSettings = internalCluster().dataPathSettings(dataOnlyNodes.get(0)); + Settings node1DataPathSettings = internalCluster().dataPathSettings(dataOnlyNodes.get(1)); + + internalCluster().stopCurrentClusterManagerNode(); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataOnlyNodes.get(0))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataOnlyNodes.get(1))); + + // Now start cluster manager node and post that verify batches created + internalCluster().startClusterManagerOnlyNodes( + 1, + Settings.builder() + .put("node.name", clusterManagerName) + .put(clusterManagerDataPathSettings) + .put(ShardsBatchGatewayAllocator.GATEWAY_ALLOCATOR_BATCH_SIZE.getKey(), 5) + .put(ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "-1") + .put(ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "-1") + .put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), true) + .build() + ); + ensureStableCluster(1); + + logger.info("--> Now do a protective reroute"); // to avoid any race condition in test + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + assertTrue(clusterRerouteResponse.isAcknowledged()); + + ShardsBatchGatewayAllocator gatewayAllocator = internalCluster().getInstance( + ShardsBatchGatewayAllocator.class, + internalCluster().getClusterManagerName() + ); + + assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); + assertEquals(10, gatewayAllocator.getNumberOfStartedShardBatches()); + // All replica shards would be marked ineligible since there are no data nodes. + // They would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); + health = client(internalCluster().getClusterManagerName()).admin().cluster().health(Requests.clusterHealthRequest()).actionGet(); + assertFalse(health.isTimedOut()); + assertEquals(RED, health.getStatus()); + assertEquals(100, health.getUnassignedShards()); + assertEquals(0, health.getInitializingShards()); + assertEquals(0, health.getActiveShards()); + assertEquals(0, health.getRelocatingShards()); + assertEquals(0, health.getNumberOfDataNodes()); + + // Now start both data nodes and ensure batch mode is working + logger.info("--> restarting the stopped nodes"); + internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(0)).put(node0DataPathSettings).build()); + internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(1)).put(node1DataPathSettings).build()); + ensureStableCluster(3); + + // wait for cluster to turn green + health = client().admin().cluster().health(Requests.clusterHealthRequest().waitForGreenStatus().timeout("5m")).actionGet(); + assertFalse(health.isTimedOut()); + assertEquals(GREEN, health.getStatus()); + assertEquals(0, health.getUnassignedShards()); + assertEquals(0, health.getInitializingShards()); + assertEquals(100, health.getActiveShards()); + assertEquals(0, health.getRelocatingShards()); + assertEquals(2, health.getNumberOfDataNodes()); + assertEquals(0, gatewayAllocator.getNumberOfStartedShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); + } + public void testBatchModeDisabled() throws Exception { internalCluster().startClusterManagerOnlyNodes( 1, @@ -925,6 +1084,18 @@ public void testMultipleReplicaShardAssignmentWithDelayedAllocationAndDifferentN ensureGreen("test"); } + public void testAllocationExplainReturnsNoWhenExtraReplicaShardInNonBatchMode() throws Exception { + // Non batch mode - This test is to validate that we don't return AWAITING_INFO in allocation explain API when the deciders are + // returning NO + this.allocationExplainReturnsNoWhenExtraReplicaShard(false); + } + + public void testAllocationExplainReturnsNoWhenExtraReplicaShardInBatchMode() throws Exception { + // Batch mode - This test is to validate that we don't return AWAITING_INFO in allocation explain API when the deciders are + // returning NO + this.allocationExplainReturnsNoWhenExtraReplicaShard(true); + } + public void testNBatchesCreationAndAssignment() throws Exception { // we will reduce batch size to 5 to make sure we have enough batches to test assignment // Total number of primary shards = 50 (50 indices*1) @@ -978,7 +1149,9 @@ public void testNBatchesCreationAndAssignment() throws Exception { ); assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); assertEquals(10, gatewayAllocator.getNumberOfStartedShardBatches()); - assertEquals(10, gatewayAllocator.getNumberOfStoreShardBatches()); + // All replica shards would be marked ineligible since there are no data nodes. + // They would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); health = client(internalCluster().getClusterManagerName()).admin().cluster().health(Requests.clusterHealthRequest()).actionGet(); assertFalse(health.isTimedOut()); assertEquals(RED, health.getStatus()); @@ -1067,7 +1240,9 @@ public void testCulpritShardInBatch() throws Exception { ); assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); assertEquals(1, gatewayAllocator.getNumberOfStartedShardBatches()); - assertEquals(1, gatewayAllocator.getNumberOfStoreShardBatches()); + // Replica shard would be marked ineligible since there are no data nodes. + // It would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); assertTrue(clusterRerouteResponse.isAcknowledged()); health = client(internalCluster().getClusterManagerName()).admin().cluster().health(Requests.clusterHealthRequest()).actionGet(); assertFalse(health.isTimedOut()); @@ -1385,4 +1560,97 @@ private List findNodesWithShard(final boolean primary) { Collections.shuffle(requiredStartedShards, random()); return requiredStartedShards.stream().map(shard -> state.nodes().get(shard.currentNodeId()).getName()).collect(Collectors.toList()); } + + private void allocationExplainReturnsNoWhenExtraReplicaShard(boolean batchModeEnabled) throws Exception { + internalCluster().startClusterManagerOnlyNodes( + 1, + Settings.builder().put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), batchModeEnabled).build() + ); + internalCluster().startDataOnlyNodes(5); + createIndex( + "test", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 4).build() + ); + ensureGreen("test"); + ensureStableCluster(6); + + // Stop one of the nodes to make the cluster yellow + // We cannot directly create an index with replica = data node count because then the whole flow will get skipped due to + // INDEX_CREATED + List nodesWithReplicaShards = findNodesWithShard(false); + Settings replicaNodeDataPathSettings = internalCluster().dataPathSettings(nodesWithReplicaShards.get(0)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodesWithReplicaShards.get(0))); + + ensureStableCluster(5); + ensureYellow("test"); + + logger.info("--> calling allocation explain API"); + // shard should have decision NO because there is no valid node for the extra replica to go to + AllocateUnassignedDecision aud = client().admin() + .cluster() + .prepareAllocationExplain() + .setIndex("test") + .setShard(0) + .setPrimary(false) + .get() + .getExplanation() + .getShardAllocationDecision() + .getAllocateDecision(); + + assertEquals(AllocationDecision.NO, aud.getAllocationDecision()); + assertEquals("cannot allocate because allocation is not permitted to any of the nodes", aud.getExplanation()); + + // Now creating a new index with too many replicas and trying again + createIndex( + "test2", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 5).build() + ); + + ensureYellowAndNoInitializingShards("test2"); + + logger.info("--> calling allocation explain API again"); + // shard should have decision NO because there are 6 replicas and 4 data nodes + aud = client().admin() + .cluster() + .prepareAllocationExplain() + .setIndex("test2") + .setShard(0) + .setPrimary(false) + .get() + .getExplanation() + .getShardAllocationDecision() + .getAllocateDecision(); + + assertEquals(AllocationDecision.NO, aud.getAllocationDecision()); + assertEquals("cannot allocate because allocation is not permitted to any of the nodes", aud.getExplanation()); + + logger.info("--> restarting the stopped node"); + internalCluster().startDataOnlyNode( + Settings.builder().put("node.name", nodesWithReplicaShards.get(0)).put(replicaNodeDataPathSettings).build() + ); + + ensureStableCluster(6); + ensureGreen("test"); + + logger.info("--> calling allocation explain API 3rd time"); + // shard should still have decision NO because there are 6 replicas and 5 data nodes + aud = client().admin() + .cluster() + .prepareAllocationExplain() + .setIndex("test2") + .setShard(0) + .setPrimary(false) + .get() + .getExplanation() + .getShardAllocationDecision() + .getAllocateDecision(); + + assertEquals(AllocationDecision.NO, aud.getAllocationDecision()); + assertEquals("cannot allocate because allocation is not permitted to any of the nodes", aud.getExplanation()); + + internalCluster().startDataOnlyNodes(1); + + ensureStableCluster(7); + ensureGreen("test2"); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java index 5074971ab1a1f..7d2e24c777da3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java @@ -8,9 +8,17 @@ package org.opensearch.gateway.remote; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.cluster.coordination.PersistedStateStats; +import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; +import org.opensearch.discovery.DiscoveryStats; +import org.opensearch.gateway.remote.model.RemoteRoutingTableBlobStore; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -18,21 +26,29 @@ import org.junit.Before; import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Base64; +import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.remote.RemoteClusterStateCleanupManager.CLUSTER_STATE_CLEANUP_INTERVAL_DEFAULT; import static org.opensearch.gateway.remote.RemoteClusterStateCleanupManager.REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING; import static org.opensearch.gateway.remote.RemoteClusterStateCleanupManager.RETAINED_MANIFESTS; import static org.opensearch.gateway.remote.RemoteClusterStateCleanupManager.SKIP_CLEANUP_STATE_CHANGES; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_TABLE; import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteClusterStateCleanupManagerIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "test-index"; + private final RemoteStoreEnums.PathType pathType = RemoteStoreEnums.PathType.HASHED_PREFIX; @Before public void setup() { @@ -52,6 +68,11 @@ private Map initialTestSetup(int shardCount, int replicaCount, int return indexStats; } + private void initialTestSetup(int shardCount, int replicaCount, int dataNodeCount, int clusterManagerNodeCount, Settings settings) { + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount, settings); + ensureGreen(INDEX_NAME); + } + public void testRemoteCleanupTaskUpdated() { int shardCount = randomIntBetween(1, 2); int replicaCount = 1; @@ -144,6 +165,102 @@ public void testRemoteCleanupDeleteStale() throws Exception { assertTrue(response.isAcknowledged()); } + public void testRemoteCleanupDeleteStaleIndexRoutingFiles() throws Exception { + clusterSettingsSuppliedByTest = true; + Path segmentRepoPath = randomRepoPath(); + Path translogRepoPath = randomRepoPath(); + Path remoteRoutingTableRepoPath = randomRepoPath(); + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put( + buildRemoteStoreNodeAttributes( + REPOSITORY_NAME, + segmentRepoPath, + REPOSITORY_2_NAME, + translogRepoPath, + REMOTE_ROUTING_TABLE_REPO, + remoteRoutingTableRepoPath, + false + ) + ); + settingsBuilder.put( + RemoteRoutingTableBlobStore.REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING.getKey(), + RemoteStoreEnums.PathType.HASHED_PREFIX.toString() + ) + .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, REMOTE_ROUTING_TABLE_REPO) + .put(REMOTE_PUBLICATION_EXPERIMENTAL, true); + + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount, settingsBuilder.build()); + + // update cluster state 21 times to ensure that clean up has run after this will upload 42 manifest files + // to repository, if manifest files are less than that it means clean up has run + updateClusterStateNTimes(RETAINED_MANIFESTS + SKIP_CLEANUP_STATE_CHANGES + 1); + + RepositoriesService repositoriesService = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class); + BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(REPOSITORY_NAME); + BlobPath baseMetadataPath = getBaseMetadataPath(repository); + + BlobStoreRepository routingTableRepository = (BlobStoreRepository) repositoriesService.repository(REMOTE_ROUTING_TABLE_REPO); + List indexRoutingTables = new ArrayList<>(getClusterState().routingTable().indicesRouting().values()); + BlobPath indexRoutingPath = getIndexRoutingPath(baseMetadataPath, indexRoutingTables.get(0).getIndex().getUUID()); + assertBusy(() -> { + // There would be >=3 files as shards will transition from UNASSIGNED -> INIT -> STARTED state + assertTrue(routingTableRepository.blobStore().blobContainer(indexRoutingPath).listBlobs().size() >= 3); + }); + + RemoteClusterStateCleanupManager remoteClusterStateCleanupManager = internalCluster().getClusterManagerNodeInstance( + RemoteClusterStateCleanupManager.class + ); + + // set cleanup interval to 100 ms to make the test faster + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING.getKey(), "100ms")) + .get(); + + assertTrue(response.isAcknowledged()); + assertBusy(() -> assertEquals(100, remoteClusterStateCleanupManager.getStaleFileDeletionTask().getInterval().getMillis())); + + String clusterManagerNode = internalCluster().getClusterManagerName(); + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(clusterManagerNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + verifyIndexRoutingFilesDeletion(routingTableRepository, indexRoutingPath, nodesStatsResponse); + + // disable the clean up to avoid race condition during shutdown + response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING.getKey(), "-1")) + .get(); + assertTrue(response.isAcknowledged()); + } + + private void verifyIndexRoutingFilesDeletion( + BlobStoreRepository routingTableRepository, + BlobPath indexRoutingPath, + NodesStatsResponse nodesStatsResponse + ) throws Exception { + assertBusy(() -> { assertEquals(1, routingTableRepository.blobStore().blobContainer(indexRoutingPath).listBlobs().size()); }); + + // Verify index routing files delete stats + DiscoveryStats discoveryStats = nodesStatsResponse.getNodes().get(0).getDiscoveryStats(); + assertNotNull(discoveryStats.getClusterStateStats()); + for (PersistedStateStats persistedStateStats : discoveryStats.getClusterStateStats().getPersistenceStats()) { + Map extendedFields = persistedStateStats.getExtendedFields(); + assertTrue(extendedFields.containsKey(RemotePersistenceStats.INDEX_ROUTING_FILES_CLEANUP_ATTEMPT_FAILED_COUNT)); + long cleanupAttemptFailedCount = extendedFields.get(RemotePersistenceStats.INDEX_ROUTING_FILES_CLEANUP_ATTEMPT_FAILED_COUNT) + .get(); + assertEquals(0, cleanupAttemptFailedCount); + } + } + private void updateClusterStateNTimes(int n) { int newReplicaCount = randomIntBetween(0, 3); for (int i = n; i > 0; i--) { @@ -155,4 +272,22 @@ private void updateClusterStateNTimes(int n) { assertTrue(response.isAcknowledged()); } } + + private BlobPath getBaseMetadataPath(BlobStoreRepository repository) { + return repository.basePath() + .add( + Base64.getUrlEncoder() + .withoutPadding() + .encodeToString(getClusterState().getClusterName().value().getBytes(StandardCharsets.UTF_8)) + ) + .add("cluster-state") + .add(getClusterState().metadata().clusterUUID()); + } + + private BlobPath getIndexRoutingPath(BlobPath baseMetadataPath, String indexUUID) { + return pathType.path( + RemoteStorePathStrategy.PathInput.builder().basePath(baseMetadataPath.add(INDEX_ROUTING_TABLE)).indexUUID(indexUUID).build(), + RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64 + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRoutingTableServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRoutingTableServiceIT.java new file mode 100644 index 0000000000000..9b80e4f907070 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRoutingTableServiceIT.java @@ -0,0 +1,435 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.Settings; +import org.opensearch.gateway.remote.model.RemoteRoutingTableBlobStore; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.index.remote.RemoteStorePathStrategy; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_TABLE; +import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteRoutingTableServiceIT extends RemoteStoreBaseIntegTestCase { + private static final String INDEX_NAME = "test-index"; + private static final String INDEX_NAME_1 = "test-index-1"; + List indexRoutingPaths; + AtomicInteger indexRoutingFiles = new AtomicInteger(); + private final RemoteStoreEnums.PathType pathType = RemoteStoreEnums.PathType.HASHED_PREFIX; + + @Before + public void setup() { + asyncUploadMockFsRepo = false; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put( + RemoteRoutingTableBlobStore.REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING.getKey(), + RemoteStoreEnums.PathType.HASHED_PREFIX.toString() + ) + .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, REMOTE_ROUTING_TABLE_REPO) + .put(REMOTE_PUBLICATION_EXPERIMENTAL, true) + .build(); + } + + public void testRemoteRoutingTableIndexLifecycle() throws Exception { + BlobStoreRepository repository = prepareClusterAndVerifyRepository(); + + RemoteClusterStateService remoteClusterStateService = internalCluster().getClusterManagerNodeInstance( + RemoteClusterStateService.class + ); + RemoteManifestManager remoteManifestManager = remoteClusterStateService.getRemoteManifestManager(); + Optional latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + List expectedIndexNames = new ArrayList<>(); + List deletedIndexNames = new ArrayList<>(); + verifyUpdatesInManifestFile(latestManifest, expectedIndexNames, 1, deletedIndexNames, true); + + List routingTableVersions = getRoutingTableFromAllNodes(); + assertTrue(areRoutingTablesSame(routingTableVersions)); + + // Update index settings + updateIndexSettings(INDEX_NAME, IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2); + ensureGreen(INDEX_NAME); + assertBusy(() -> { + int indexRoutingFilesAfterUpdate = repository.blobStore().blobContainer(indexRoutingPaths.get(0)).listBlobs().size(); + // At-least 3 new index routing files will be created as shards will transition from INIT -> UNASSIGNED -> STARTED state + assertTrue(indexRoutingFilesAfterUpdate >= indexRoutingFiles.get() + 3); + }); + + latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + verifyUpdatesInManifestFile(latestManifest, expectedIndexNames, 1, deletedIndexNames, true); + + routingTableVersions = getRoutingTableFromAllNodes(); + assertTrue(areRoutingTablesSame(routingTableVersions)); + + // Delete the index and assert its deletion + deleteIndexAndVerify(remoteManifestManager); + + routingTableVersions = getRoutingTableFromAllNodes(); + assertTrue(areRoutingTablesSame(routingTableVersions)); + } + + public void testRemoteRoutingTableWithMultipleIndex() throws Exception { + BlobStoreRepository repository = prepareClusterAndVerifyRepository(); + + RemoteClusterStateService remoteClusterStateService = internalCluster().getClusterManagerNodeInstance( + RemoteClusterStateService.class + ); + RemoteManifestManager remoteManifestManager = remoteClusterStateService.getRemoteManifestManager(); + Optional latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + List expectedIndexNames = new ArrayList<>(); + List deletedIndexNames = new ArrayList<>(); + verifyUpdatesInManifestFile(latestManifest, expectedIndexNames, 1, deletedIndexNames, true); + + List routingTables = getRoutingTableFromAllNodes(); + // Verify indices in routing table + Set expectedIndicesInRoutingTable = Set.of(INDEX_NAME); + assertEquals(routingTables.get(0).getIndicesRouting().keySet(), expectedIndicesInRoutingTable); + // Verify routing table across all nodes is equal + assertTrue(areRoutingTablesSame(routingTables)); + + // Create new index + createIndex(INDEX_NAME_1, remoteStoreIndexSettings(1, 5)); + ensureGreen(INDEX_NAME_1); + + latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + + updateIndexRoutingPaths(repository); + verifyUpdatesInManifestFile(latestManifest, expectedIndexNames, 2, deletedIndexNames, true); + routingTables = getRoutingTableFromAllNodes(); + // Verify indices in routing table + expectedIndicesInRoutingTable = Set.of(INDEX_NAME, INDEX_NAME_1); + assertEquals(routingTables.get(0).getIndicesRouting().keySet(), expectedIndicesInRoutingTable); + // Verify routing table across all nodes is equal + assertTrue(areRoutingTablesSame(routingTables)); + } + + public void testRemoteRoutingTableEmptyRoutingTableDiff() throws Exception { + prepareClusterAndVerifyRepository(); + + RemoteClusterStateService remoteClusterStateService = internalCluster().getClusterManagerNodeInstance( + RemoteClusterStateService.class + ); + RemoteManifestManager remoteManifestManager = remoteClusterStateService.getRemoteManifestManager(); + Optional latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + List expectedIndexNames = new ArrayList<>(); + List deletedIndexNames = new ArrayList<>(); + verifyUpdatesInManifestFile(latestManifest, expectedIndexNames, 1, deletedIndexNames, true); + + List routingTableVersions = getRoutingTableFromAllNodes(); + assertTrue(areRoutingTablesSame(routingTableVersions)); + + // Update cluster settings + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), 0, TimeUnit.SECONDS)) + .get(); + assertTrue(response.isAcknowledged()); + + latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + verifyUpdatesInManifestFile(latestManifest, expectedIndexNames, 1, deletedIndexNames, false); + + routingTableVersions = getRoutingTableFromAllNodes(); + assertTrue(areRoutingTablesSame(routingTableVersions)); + } + + public void testRemoteRoutingTableIndexNodeRestart() throws Exception { + BlobStoreRepository repository = prepareClusterAndVerifyRepository(); + + List routingTableVersions = getRoutingTableFromAllNodes(); + assertTrue(areRoutingTablesSame(routingTableVersions)); + + // Ensure node comes healthy after restart + Set dataNodes = internalCluster().getDataNodeNames(); + internalCluster().restartNode(randomFrom(dataNodes)); + ensureGreen(); + ensureGreen(INDEX_NAME); + + // ensure restarted node joins and the cluster is stable + assertEquals(3, internalCluster().clusterService().state().nodes().getDataNodes().size()); + ensureStableCluster(4); + assertRemoteStoreRepositoryOnAllNodes(REMOTE_ROUTING_TABLE_REPO); + + assertBusy(() -> { + int indexRoutingFilesAfterNodeDrop = repository.blobStore().blobContainer(indexRoutingPaths.get(0)).listBlobs().size(); + assertTrue(indexRoutingFilesAfterNodeDrop > indexRoutingFiles.get()); + }); + + RemoteClusterStateService remoteClusterStateService = internalCluster().getClusterManagerNodeInstance( + RemoteClusterStateService.class + ); + RemoteManifestManager remoteManifestManager = remoteClusterStateService.getRemoteManifestManager(); + Optional latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + List expectedIndexNames = new ArrayList<>(); + List deletedIndexNames = new ArrayList<>(); + verifyUpdatesInManifestFile(latestManifest, expectedIndexNames, 1, deletedIndexNames, true); + } + + public void testRemoteRoutingTableIndexMasterRestart() throws Exception { + BlobStoreRepository repository = prepareClusterAndVerifyRepository(); + + List routingTableVersions = getRoutingTableFromAllNodes(); + assertTrue(areRoutingTablesSame(routingTableVersions)); + + // Ensure node comes healthy after restart + String clusterManagerName = internalCluster().getClusterManagerName(); + internalCluster().restartNode(clusterManagerName); + ensureGreen(); + ensureGreen(INDEX_NAME); + + // ensure master is elected and the cluster is stable + assertNotNull(internalCluster().clusterService().state().nodes().getClusterManagerNode()); + ensureStableCluster(4); + assertRemoteStoreRepositoryOnAllNodes(REMOTE_ROUTING_TABLE_REPO); + + assertBusy(() -> { + int indexRoutingFilesAfterNodeDrop = repository.blobStore().blobContainer(indexRoutingPaths.get(0)).listBlobs().size(); + assertTrue(indexRoutingFilesAfterNodeDrop > indexRoutingFiles.get()); + }); + + RemoteClusterStateService remoteClusterStateService = internalCluster().getClusterManagerNodeInstance( + RemoteClusterStateService.class + ); + RemoteManifestManager remoteManifestManager = remoteClusterStateService.getRemoteManifestManager(); + Optional latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + List expectedIndexNames = new ArrayList<>(); + List deletedIndexNames = new ArrayList<>(); + verifyUpdatesInManifestFile(latestManifest, expectedIndexNames, 1, deletedIndexNames, true); + } + + private BlobStoreRepository prepareClusterAndVerifyRepository() throws Exception { + clusterSettingsSuppliedByTest = true; + Path segmentRepoPath = randomRepoPath(); + Path translogRepoPath = randomRepoPath(); + Path remoteRoutingTableRepoPath = randomRepoPath(); + Settings settings = buildRemoteStoreNodeAttributes( + REPOSITORY_NAME, + segmentRepoPath, + REPOSITORY_2_NAME, + translogRepoPath, + REMOTE_ROUTING_TABLE_REPO, + remoteRoutingTableRepoPath, + false + ); + prepareCluster(1, 3, INDEX_NAME, 1, 5, settings); + ensureGreen(INDEX_NAME); + + RepositoriesService repositoriesService = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class); + BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(REMOTE_ROUTING_TABLE_REPO); + + BlobPath baseMetadataPath = getBaseMetadataPath(repository); + List indexRoutingTables = new ArrayList<>(getClusterState().routingTable().indicesRouting().values()); + indexRoutingPaths = new ArrayList<>(); + for (IndexRoutingTable indexRoutingTable : indexRoutingTables) { + indexRoutingPaths.add(getIndexRoutingPath(baseMetadataPath.add(INDEX_ROUTING_TABLE), indexRoutingTable.getIndex().getUUID())); + } + + assertBusy(() -> { + int totalRoutingFiles = calculateTotalRoutingFiles(repository); + indexRoutingFiles.set(totalRoutingFiles); + // There would be >=3 files as shards will transition from UNASSIGNED -> INIT -> STARTED state + assertTrue(indexRoutingFiles.get() >= 3); + }); + assertRemoteStoreRepositoryOnAllNodes(REMOTE_ROUTING_TABLE_REPO); + return repository; + } + + private BlobPath getBaseMetadataPath(BlobStoreRepository repository) { + return repository.basePath() + .add( + Base64.getUrlEncoder() + .withoutPadding() + .encodeToString(getClusterState().getClusterName().value().getBytes(StandardCharsets.UTF_8)) + ) + .add("cluster-state") + .add(getClusterState().metadata().clusterUUID()); + } + + private BlobPath getIndexRoutingPath(BlobPath indexRoutingPath, String indexUUID) { + RemoteStoreEnums.PathHashAlgorithm pathHashAlgo = RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; + return pathType.path( + RemoteStorePathStrategy.PathInput.builder().basePath(indexRoutingPath).indexUUID(indexUUID).build(), + pathHashAlgo + ); + } + + private void verifyUpdatesInManifestFile( + Optional latestManifest, + List expectedIndexNames, + int expectedIndicesRoutingFilesInManifest, + List expectedDeletedIndex, + boolean isRoutingTableDiffFileExpected + ) { + assertTrue(latestManifest.isPresent()); + ClusterMetadataManifest manifest = latestManifest.get(); + + assertEquals(expectedDeletedIndex, manifest.getDiffManifest().getIndicesDeleted()); + assertEquals(expectedIndicesRoutingFilesInManifest, manifest.getIndicesRouting().size()); + + // Check if all paths in manifest.getIndicesRouting() are present in indexRoutingPaths + for (ClusterMetadataManifest.UploadedIndexMetadata uploadedFilename : manifest.getIndicesRouting()) { + boolean pathFound = false; + for (BlobPath indexRoutingPath : indexRoutingPaths) { + if (uploadedFilename.getUploadedFilename().contains(indexRoutingPath.buildAsString())) { + pathFound = true; + break; + } + } + assertTrue("Uploaded file not found in indexRoutingPaths: " + uploadedFilename.getUploadedFilename(), pathFound); + } + assertEquals(isRoutingTableDiffFileExpected, manifest.getDiffManifest().getIndicesRoutingDiffPath() != null); + } + + private List getRoutingTableFromAllNodes() throws ExecutionException, InterruptedException { + String[] allNodes = internalCluster().getNodeNames(); + List routingTables = new ArrayList<>(); + for (String node : allNodes) { + RoutingTable routingTable = internalCluster().client(node) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .routingTable(); + routingTables.add(routingTable); + } + return routingTables; + } + + private void updateIndexRoutingPaths(BlobStoreRepository repository) { + BlobPath baseMetadataPath = getBaseMetadataPath(repository); + List indexRoutingTables = new ArrayList<>(getClusterState().routingTable().indicesRouting().values()); + + indexRoutingPaths.clear(); // Clear the list to avoid stale data + for (IndexRoutingTable indexRoutingTable : indexRoutingTables) { + indexRoutingPaths.add(getIndexRoutingPath(baseMetadataPath.add(INDEX_ROUTING_TABLE), indexRoutingTable.getIndex().getUUID())); + } + } + + private int calculateTotalRoutingFiles(BlobStoreRepository repository) throws IOException { + int totalRoutingFiles = 0; + for (BlobPath path : indexRoutingPaths) { + totalRoutingFiles += repository.blobStore().blobContainer(path).listBlobs().size(); + } + return totalRoutingFiles; + } + + private boolean areRoutingTablesSame(List routingTables) { + if (routingTables == null || routingTables.isEmpty()) { + return false; + } + + RoutingTable firstRoutingTable = routingTables.get(0); + for (RoutingTable routingTable : routingTables) { + if (!compareRoutingTables(firstRoutingTable, routingTable)) { + logger.info("Responses are not the same: {} {}", firstRoutingTable, routingTable); + return false; + } + } + return true; + } + + private boolean compareRoutingTables(RoutingTable a, RoutingTable b) { + if (a == b) return true; + if (b == null || a.getClass() != b.getClass()) return false; + if (a.version() != b.version()) return false; + if (a.indicesRouting().size() != b.indicesRouting().size()) return false; + + for (Map.Entry entry : a.indicesRouting().entrySet()) { + IndexRoutingTable thisIndexRoutingTable = entry.getValue(); + IndexRoutingTable thatIndexRoutingTable = b.indicesRouting().get(entry.getKey()); + if (!thatIndexRoutingTable.equals(thatIndexRoutingTable)) { + return false; + } + } + return true; + } + + private void updateIndexSettings(String indexName, String settingKey, int settingValue) { + client().admin() + .indices() + .prepareUpdateSettings(indexName) + .setSettings(Settings.builder().put(settingKey, settingValue)) + .execute() + .actionGet(); + } + + private void deleteIndexAndVerify(RemoteManifestManager remoteManifestManager) { + client().admin().indices().prepareDelete(INDEX_NAME).execute().actionGet(); + assertFalse(client().admin().indices().prepareExists(INDEX_NAME).get().isExists()); + + // Verify index is marked deleted in manifest + Optional latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + assertTrue(latestManifest.isPresent()); + ClusterMetadataManifest manifest = latestManifest.get(); + assertTrue(manifest.getDiffManifest().getIndicesDeleted().contains(INDEX_NAME)); + assertTrue(manifest.getIndicesRouting().isEmpty()); + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java new file mode 100644 index 0000000000000..6a2e7ce4957ae --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java @@ -0,0 +1,173 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.client.Client; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.fs.ReloadableFsRepository; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.DISCOVERY_NODES; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.model.RemoteClusterBlocks.CLUSTER_BLOCKS; +import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_METADATA; +import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@ClusterScope(scope = Scope.TEST, numDataNodes = 0) +public class RemoteStatePublicationIT extends RemoteStoreBaseIntegTestCase { + + private static String INDEX_NAME = "test-index"; + private boolean isRemoteStateEnabled = true; + private String isRemotePublicationEnabled = "true"; + + @Before + public void setup() { + asyncUploadMockFsRepo = false; + isRemoteStateEnabled = true; + isRemotePublicationEnabled = "true"; + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder() + .put(super.featureFlagSettings()) + .put(FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL, isRemotePublicationEnabled) + .build(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + String routingTableRepoName = "remote-routing-repo"; + String routingTableRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + routingTableRepoName + ); + String routingTableRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + routingTableRepoName + ); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), isRemoteStateEnabled) + .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, routingTableRepoName) + .put(routingTableRepoTypeAttributeKey, ReloadableFsRepository.TYPE) + .put(routingTableRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) + .build(); + } + + public void testPublication() throws Exception { + // create cluster with multi node (3 master + 2 data) + prepareCluster(3, 2, INDEX_NAME, 1, 2); + ensureStableCluster(5); + ensureGreen(INDEX_NAME); + // update settings on a random node + assertAcked( + internalCluster().client() + .admin() + .cluster() + .updateSettings( + new ClusterUpdateSettingsRequest().persistentSettings( + Settings.builder().put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "10mb").build() + ) + ) + .actionGet() + ); + + RepositoriesService repositoriesService = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class); + BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(REPOSITORY_NAME); + + Map globalMetadataFiles = getMetadataFiles(repository, RemoteClusterStateUtils.GLOBAL_METADATA_PATH_TOKEN); + + assertTrue(globalMetadataFiles.containsKey(COORDINATION_METADATA)); + assertTrue(globalMetadataFiles.containsKey(SETTING_METADATA)); + assertTrue(globalMetadataFiles.containsKey(TRANSIENT_SETTING_METADATA)); + assertTrue(globalMetadataFiles.containsKey(TEMPLATES_METADATA)); + assertTrue(globalMetadataFiles.keySet().stream().anyMatch(key -> key.startsWith(CUSTOM_METADATA))); + + Map ephemeralMetadataFiles = getMetadataFiles( + repository, + RemoteClusterStateUtils.CLUSTER_STATE_EPHEMERAL_PATH_TOKEN + ); + + assertTrue(ephemeralMetadataFiles.containsKey(CLUSTER_BLOCKS)); + assertTrue(ephemeralMetadataFiles.containsKey(DISCOVERY_NODES)); + + Map manifestFiles = getMetadataFiles(repository, RemoteClusterMetadataManifest.MANIFEST); + assertTrue(manifestFiles.containsKey(RemoteClusterMetadataManifest.MANIFEST)); + + // get settings from each node and verify that it is updated + Settings settings = clusterService().getSettings(); + logger.info("settings : {}", settings); + for (Client client : clients()) { + ClusterStateResponse response = client.admin().cluster().prepareState().clear().setMetadata(true).get(); + String refreshSetting = response.getState() + .metadata() + .settings() + .get(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()); + assertEquals("10mb", refreshSetting); + } + } + + public void testRemotePublicationDisableIfRemoteStateDisabled() { + // only disable remote state + isRemoteStateEnabled = false; + // create cluster with multi node with in-consistent settings + prepareCluster(3, 2, INDEX_NAME, 1, 2); + // assert cluster is stable, ensuring publication falls back to legacy transport with inconsistent settings + ensureStableCluster(5); + ensureGreen(INDEX_NAME); + + assertNull(internalCluster().getCurrentClusterManagerNodeInstance(RemoteClusterStateService.class)); + } + + private Map getMetadataFiles(BlobStoreRepository repository, String subDirectory) throws IOException { + BlobPath metadataPath = repository.basePath() + .add( + Base64.getUrlEncoder() + .withoutPadding() + .encodeToString(getClusterState().getClusterName().value().getBytes(StandardCharsets.UTF_8)) + ) + .add(RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN) + .add(getClusterState().metadata().clusterUUID()) + .add(subDirectory); + return repository.blobStore().blobContainer(metadataPath).listBlobs().keySet().stream().map(fileName -> { + logger.info(fileName); + return fileName.split(DELIMITER)[0]; + }).collect(Collectors.toMap(Function.identity(), key -> 1, Integer::sum)); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java new file mode 100644 index 0000000000000..c461f83657340 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -0,0 +1,432 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.Rounding; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.IndexService; +import org.opensearch.index.compositeindex.CompositeIndexSettings; +import org.opensearch.index.compositeindex.datacube.DateDimension; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; +import org.opensearch.indices.IndicesService; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +/** + * Integration tests for star tree mapper + */ +public class StarTreeMapperIT extends OpenSearchIntegTestCase { + private static final String TEST_INDEX = "test"; + + private static XContentBuilder createMinimalTestMapping(boolean invalidDim, boolean invalidMetric, boolean keywordDim) { + try { + return jsonBuilder().startObject() + .startObject("composite") + .startObject("startree-1") + .field("type", "star_tree") + .startObject("config") + .startArray("ordered_dimensions") + .startObject() + .field("name", "timestamp") + .endObject() + .startObject() + .field("name", getDim(invalidDim, keywordDim)) + .endObject() + .endArray() + .startArray("metrics") + .startObject() + .field("name", getDim(invalidMetric, false)) + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("numeric_dv") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("numeric") + .field("type", "integer") + .field("doc_values", false) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .startObject("keyword") + .field("type", "keyword") + .field("doc_values", false) + .endObject() + .endObject() + .endObject(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + private static XContentBuilder createMaxDimTestMapping() { + try { + return jsonBuilder().startObject() + .startObject("composite") + .startObject("startree-1") + .field("type", "star_tree") + .startObject("config") + .startArray("ordered_dimensions") + .startObject() + .field("name", "timestamp") + .startArray("calendar_intervals") + .value("day") + .value("month") + .endArray() + .endObject() + .startObject() + .field("name", "dim2") + .endObject() + .startObject() + .field("name", "dim3") + .endObject() + .endArray() + .startArray("metrics") + .startObject() + .field("name", "dim2") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("dim2") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("dim3") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .endObject() + .endObject(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + private static XContentBuilder createTestMappingWithoutStarTree(boolean invalidDim, boolean invalidMetric, boolean keywordDim) { + try { + return jsonBuilder().startObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("numeric_dv") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("numeric") + .field("type", "integer") + .field("doc_values", false) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .startObject("keyword") + .field("type", "keyword") + .field("doc_values", false) + .endObject() + .endObject() + .endObject(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + private static XContentBuilder createUpdateTestMapping(boolean changeDim, boolean sameStarTree) { + try { + return jsonBuilder().startObject() + .startObject("composite") + .startObject(sameStarTree ? "startree-1" : "startree-2") + .field("type", "star_tree") + .startObject("config") + .startArray("ordered_dimensions") + .startObject() + .field("name", "timestamp") + .endObject() + .startObject() + .field("name", changeDim ? "numeric_new" : getDim(false, false)) + .endObject() + .endArray() + .startArray("metrics") + .startObject() + .field("name", getDim(false, false)) + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("numeric_dv") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("numeric") + .field("type", "integer") + .field("doc_values", false) + .endObject() + .startObject("numeric_new") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .startObject("keyword") + .field("type", "keyword") + .field("doc_values", false) + .endObject() + .endObject() + .endObject(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + private static String getDim(boolean hasDocValues, boolean isKeyword) { + if (hasDocValues) { + return "numeric"; + } else if (isKeyword) { + return "keyword"; + } + return "numeric_dv"; + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.STAR_TREE_INDEX, "true").build(); + } + + @Before + public final void setupNodeSettings() { + Settings request = Settings.builder().put(CompositeIndexSettings.STAR_TREE_INDEX_ENABLED_SETTING.getKey(), true).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + } + + public void testValidCompositeIndex() { + prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, false)).get(); + Iterable dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class); + for (IndicesService service : dataNodeInstances) { + final Index index = resolveIndex("test"); + if (service.hasIndex(index)) { + IndexService indexService = service.indexService(index); + Set fts = indexService.mapperService().getCompositeFieldTypes(); + + for (CompositeMappedFieldType ft : fts) { + assertTrue(ft instanceof StarTreeMapper.StarTreeFieldType); + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) ft; + assertEquals("timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedTimeUnits = Arrays.asList( + Rounding.DateTimeUnit.MINUTES_OF_HOUR, + Rounding.DateTimeUnit.HOUR_OF_DAY + ); + assertEquals(expectedTimeUnits, dateDim.getIntervals()); + assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); + + // Assert default metrics + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals( + StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, + starTreeFieldType.getStarTreeConfig().getBuildMode() + ); + assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); + } + } + } + } + + public void testUpdateIndexWithAdditionOfStarTree() { + prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, false)).get(); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().preparePutMapping(TEST_INDEX).setSource(createUpdateTestMapping(false, false)).get() + ); + assertEquals("Index cannot have more than [1] star tree fields", ex.getMessage()); + } + + public void testUpdateIndexWithNewerStarTree() { + prepareCreate(TEST_INDEX).setMapping(createTestMappingWithoutStarTree(false, false, false)).get(); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().preparePutMapping(TEST_INDEX).setSource(createUpdateTestMapping(false, false)).get() + ); + assertEquals( + "Composite fields must be specified during index creation, addition of new composite fields during update is not supported", + ex.getMessage() + ); + } + + public void testUpdateIndexWhenMappingIsDifferent() { + prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, false)).get(); + + // update some field in the mapping + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().preparePutMapping(TEST_INDEX).setSource(createUpdateTestMapping(true, true)).get() + ); + assertTrue(ex.getMessage().contains("Cannot update parameter [config] from")); + } + + public void testUpdateIndexWhenMappingIsSame() { + prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, false)).get(); + + // update some field in the mapping + AcknowledgedResponse putMappingResponse = client().admin() + .indices() + .preparePutMapping(TEST_INDEX) + .setSource(createMinimalTestMapping(false, false, false)) + .get(); + assertAcked(putMappingResponse); + + Iterable dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class); + for (IndicesService service : dataNodeInstances) { + final Index index = resolveIndex("test"); + if (service.hasIndex(index)) { + IndexService indexService = service.indexService(index); + Set fts = indexService.mapperService().getCompositeFieldTypes(); + + for (CompositeMappedFieldType ft : fts) { + assertTrue(ft instanceof StarTreeMapper.StarTreeFieldType); + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) ft; + assertEquals("timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedTimeUnits = Arrays.asList( + Rounding.DateTimeUnit.MINUTES_OF_HOUR, + Rounding.DateTimeUnit.HOUR_OF_DAY + ); + assertEquals(expectedTimeUnits, dateDim.getIntervals()); + assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); + + // Assert default metrics + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals( + StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, + starTreeFieldType.getStarTreeConfig().getBuildMode() + ); + assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); + } + } + } + } + + public void testInvalidDimCompositeIndex() { + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(true, false, false)).get() + ); + assertEquals( + "Aggregations not supported for the dimension field [numeric] with field type [integer] as part of star tree field", + ex.getMessage() + ); + } + + public void testMaxDimsCompositeIndex() { + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> prepareCreate(TEST_INDEX).setMapping(createMaxDimTestMapping()) + .setSettings(Settings.builder().put(StarTreeIndexSettings.STAR_TREE_MAX_DIMENSIONS_SETTING.getKey(), 2)) + .get() + ); + assertEquals( + "Failed to parse mapping [_doc]: ordered_dimensions cannot have more than 2 dimensions for star tree field [startree-1]", + ex.getMessage() + ); + } + + public void testMaxCalendarIntervalsCompositeIndex() { + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> prepareCreate(TEST_INDEX).setMapping(createMaxDimTestMapping()) + .setSettings(Settings.builder().put(StarTreeIndexSettings.STAR_TREE_MAX_DATE_INTERVALS_SETTING.getKey(), 1)) + .get() + ); + assertEquals( + "Failed to parse mapping [_doc]: At most [1] calendar intervals are allowed in dimension [timestamp]", + ex.getMessage() + ); + } + + public void testUnsupportedDim() { + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, true)).get() + ); + assertEquals( + "Failed to parse mapping [_doc]: unsupported field type associated with dimension [keyword] as part of star tree field [startree-1]", + ex.getMessage() + ); + } + + public void testInvalidMetric() { + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, true, false)).get() + ); + assertEquals( + "Aggregations not supported for the metrics field [numeric] with field type [integer] as part of star tree field", + ex.getMessage() + ); + } + + @After + public final void cleanupNodeSettings() { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 09d5c208a8756..108ef14f0fcb4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -34,6 +34,12 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.Weight; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -56,7 +62,10 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.request.RequestCacheStats; +import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; @@ -65,6 +74,7 @@ import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.time.ZoneId; @@ -768,6 +778,59 @@ public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception { assertTrue(stats.getMemorySizeInBytes() == 0); } + public void testTimedOutQuery() throws Exception { + // A timed out query should be cached and then invalidated + Client client = client(); + String index = "index"; + assertAcked( + client.admin() + .indices() + .prepareCreate(index) + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + // Disable index refreshing to avoid cache being invalidated mid-test + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1)) + ) + .get() + ); + indexRandom(true, client.prepareIndex(index).setSource("k", "hello")); + ensureSearchable(index); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + forceMerge(client, index); + + QueryBuilder timeoutQueryBuilder = new TermQueryBuilder("k", "hello") { + @Override + protected Query doToQuery(QueryShardContext context) { + return new TermQuery(new Term("k", "hello")) { + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + // Create the weight before sleeping. Otherwise, TermStates.build() (in the call to super.createWeight()) will + // sometimes throw an exception on timeout, rather than timing out gracefully. + Weight result = super.createWeight(searcher, scoreMode, boost); + try { + Thread.sleep(500); + } catch (InterruptedException ignored) {} + return result; + } + }; + } + }; + + SearchResponse resp = client.prepareSearch(index) + .setRequestCache(true) + .setQuery(timeoutQueryBuilder) + .setTimeout(TimeValue.ZERO) + .get(); + assertTrue(resp.isTimedOut()); + RequestCacheStats requestCacheStats = getRequestCacheStats(client, index); + // The cache should be empty as the timed-out query was invalidated + assertEquals(0, requestCacheStats.getMemorySizeInBytes()); + } + private Path[] shardDirectory(String server, Index index, int shard) { NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server); final Path[] paths = env.availableShardPaths(new ShardId(index, shard)); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java index 8fd7961cab3a7..7bdf33edf1534 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java @@ -298,8 +298,8 @@ public void testMustAcceptDataLossToImportDanglingIndex() throws Exception { * 1, then create two indices and delete them both while one node in * the cluster is stopped. The deletion of the second pushes the deletion * of the first out of the graveyard. When the stopped node is resumed, - * only the second index will be found into the graveyard and the the - * other will be considered dangling, and can therefore be listed and + * only the second index will be found into the graveyard and the other + * will be considered dangling, and can therefore be listed and * deleted through the API */ public void testDanglingIndexCanBeDeleted() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java index 657d0f178e096..0eb37a7b25618 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java @@ -315,6 +315,87 @@ public void testBulkWithUpsert() throws Exception { assertThat(upserted.get("processed"), equalTo(true)); } + public void testSingleDocIngestFailure() throws Exception { + createIndex("test"); + BytesReference source = BytesReference.bytes( + jsonBuilder().startObject() + .field("description", "my_pipeline") + .startArray("processors") + .startObject() + .startObject("test") + .endObject() + .endObject() + .endArray() + .endObject() + ); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, MediaTypeRegistry.JSON); + client().admin().cluster().putPipeline(putPipelineRequest).get(); + + GetPipelineRequest getPipelineRequest = new GetPipelineRequest("_id"); + GetPipelineResponse getResponse = client().admin().cluster().getPipeline(getPipelineRequest).get(); + assertThat(getResponse.isFound(), is(true)); + assertThat(getResponse.pipelines().size(), equalTo(1)); + assertThat(getResponse.pipelines().get(0).getId(), equalTo("_id")); + + assertThrows( + IllegalArgumentException.class, + () -> client().prepareIndex("test") + .setId("1") + .setPipeline("_id") + .setSource(Requests.INDEX_CONTENT_TYPE, "field", "value", "fail", true) + .get() + ); + + DeletePipelineRequest deletePipelineRequest = new DeletePipelineRequest("_id"); + AcknowledgedResponse response = client().admin().cluster().deletePipeline(deletePipelineRequest).get(); + assertThat(response.isAcknowledged(), is(true)); + + getResponse = client().admin().cluster().prepareGetPipeline("_id").get(); + assertThat(getResponse.isFound(), is(false)); + assertThat(getResponse.pipelines().size(), equalTo(0)); + } + + public void testSingleDocIngestDrop() throws Exception { + createIndex("test"); + BytesReference source = BytesReference.bytes( + jsonBuilder().startObject() + .field("description", "my_pipeline") + .startArray("processors") + .startObject() + .startObject("test") + .endObject() + .endObject() + .endArray() + .endObject() + ); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, MediaTypeRegistry.JSON); + client().admin().cluster().putPipeline(putPipelineRequest).get(); + + GetPipelineRequest getPipelineRequest = new GetPipelineRequest("_id"); + GetPipelineResponse getResponse = client().admin().cluster().getPipeline(getPipelineRequest).get(); + assertThat(getResponse.isFound(), is(true)); + assertThat(getResponse.pipelines().size(), equalTo(1)); + assertThat(getResponse.pipelines().get(0).getId(), equalTo("_id")); + + DocWriteResponse indexResponse = client().prepareIndex("test") + .setId("1") + .setPipeline("_id") + .setSource(Requests.INDEX_CONTENT_TYPE, "field", "value", "drop", true) + .get(); + assertEquals(DocWriteResponse.Result.NOOP, indexResponse.getResult()); + + Map doc = client().prepareGet("test", "1").get().getSourceAsMap(); + assertNull(doc); + + DeletePipelineRequest deletePipelineRequest = new DeletePipelineRequest("_id"); + AcknowledgedResponse response = client().admin().cluster().deletePipeline(deletePipelineRequest).get(); + assertThat(response.isAcknowledged(), is(true)); + + getResponse = client().admin().cluster().prepareGetPipeline("_id").get(); + assertThat(getResponse.isFound(), is(false)); + assertThat(getResponse.pipelines().size(), equalTo(0)); + } + public void test() throws Exception { BytesReference source = BytesReference.bytes( jsonBuilder().startObject() diff --git a/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionForClusterManagerIT.java b/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionForClusterManagerIT.java index b9da5ffb86af0..e3a4216e772fb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionForClusterManagerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionForClusterManagerIT.java @@ -12,7 +12,11 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; import org.opensearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.opensearch.action.support.clustermanager.term.GetTermVersionAction; +import org.opensearch.action.support.clustermanager.term.GetTermVersionResponse; import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.coordination.ClusterStateTermVersion; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -20,6 +24,7 @@ import org.opensearch.node.IoUsageStats; import org.opensearch.node.ResourceUsageCollectorService; import org.opensearch.node.resource.tracker.ResourceTrackerSettings; +import org.opensearch.plugins.Plugin; import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; @@ -29,9 +34,13 @@ import org.opensearch.rest.action.admin.indices.RestGetAliasesAction; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; import org.junit.Before; +import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; @@ -62,6 +71,10 @@ public class AdmissionForClusterManagerIT extends OpenSearchIntegTestCase { .put(CLUSTER_ADMIN_CPU_USAGE_LIMIT.getKey(), 50) .build(); + protected Collection> nodePlugins() { + return List.of(MockTransportService.TestPlugin.class); + } + @Before public void init() { String clusterManagerNode = internalCluster().startClusterManagerOnlyNode( @@ -79,6 +92,25 @@ public void init() { // Enable admission control client().admin().cluster().prepareUpdateSettings().setTransientSettings(ENFORCE_ADMISSION_CONTROL).execute().actionGet(); + MockTransportService primaryService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + clusterManagerNode + ); + + // Force always fetch from ClusterManager + ClusterService clusterService = internalCluster().clusterService(); + GetTermVersionResponse oosTerm = new GetTermVersionResponse( + new ClusterStateTermVersion( + clusterService.state().getClusterName(), + clusterService.state().metadata().clusterUUID(), + clusterService.state().term() - 1, + clusterService.state().version() - 1 + ) + ); + primaryService.addRequestHandlingBehavior( + GetTermVersionAction.NAME, + (handler, request, channel, task) -> channel.sendResponse(oosTerm) + ); } public void testAdmissionControlEnforced() throws Exception { @@ -86,8 +118,8 @@ public void testAdmissionControlEnforced() throws Exception { // Write API on ClusterManager assertAcked(prepareCreate("test").setMapping("field", "type=text").setAliases("{\"alias1\" : {}}")); - // Read API on ClusterManager + GetAliasesRequest aliasesRequest = new GetAliasesRequest(); aliasesRequest.aliases("alias1"); try { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java index 5be9b25512704..e4e681a5433b5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -13,6 +13,8 @@ import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.delete.DeleteResponse; @@ -21,12 +23,17 @@ import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.common.Priority; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; @@ -39,6 +46,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; @@ -261,4 +269,39 @@ public ClusterHealthStatus waitForRelocation(TimeValue t) { } return actionGet.getStatus(); } + + protected IndexShard getIndexShard(String dataNode, String indexName) throws ExecutionException, InterruptedException { + String clusterManagerName = internalCluster().getClusterManagerName(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); + GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + String uuid = getIndexResponse.getSettings().get(indexName).get(IndexMetadata.SETTING_INDEX_UUID); + IndexService indexService = indicesService.indexService(new Index(indexName, uuid)); + return indexService.getShard(0); + } + + public void changeReplicaCountAndEnsureGreen(int replicaCount, String indexName) { + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(indexName) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, replicaCount)) + ); + ensureGreen(indexName); + } + + public void completeDocRepToRemoteMigration() { + assertTrue( + internalCluster().client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .putNull(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey()) + .putNull(MIGRATION_DIRECTION_SETTING.getKey()) + ) + .get() + .isAcknowledged() + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java index 6885d37c4aab0..b55219e1cb37f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java @@ -275,7 +275,6 @@ initalMetadataVersion < internalCluster().client() * After shard relocation completes, shuts down the docrep nodes and asserts remote * index settings are applied even when the index is in YELLOW state */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13737") public void testIndexSettingsUpdatedEvenForMisconfiguredReplicas() throws Exception { internalCluster().startClusterManagerOnlyNode(); @@ -332,7 +331,6 @@ public void testIndexSettingsUpdatedEvenForMisconfiguredReplicas() throws Except * After shard relocation completes, restarts the docrep node holding extra replica shard copy * and asserts remote index settings are applied as soon as the docrep replica copy is unassigned */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13871") public void testIndexSettingsUpdatedWhenDocrepNodeIsRestarted() throws Exception { internalCluster().startClusterManagerOnlyNode(); @@ -548,6 +546,73 @@ public void testRemoteIndexPathFileExistsAfterMigration() throws Exception { assertTrue(Arrays.stream(files).anyMatch(file -> file.toString().contains(fileNamePrefix))); } + /** + * Scenario: + * Creates an index with 1 pri 1 rep setup with 3 docrep nodes (1 cluster manager + 2 data nodes), + * initiate migration and create 3 remote nodes (1 cluster manager + 2 data nodes) and moves over + * only primary shard copy of the index + * After the primary shard copy is relocated, decrease replica count to 0, stop all docrep nodes + * and conclude migration. Remote store index settings should be applied to the index at this point. + */ + public void testIndexSettingsUpdateDuringReplicaCountDecrement() throws Exception { + String indexName = "migration-index-replica-decrement"; + String docrepClusterManager = internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting 2 docrep nodes"); + List docrepNodeNames = internalCluster().startDataOnlyNodes(2); + internalCluster().validateClusterFormed(); + + logger.info("---> Creating index with 1 primary and 1 replica"); + Settings oneReplica = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + createIndexAndAssertDocrepProperties(indexName, oneReplica); + + int docsToIndex = randomIntBetween(10, 100); + logger.info("---> Indexing {} on both indices", docsToIndex); + indexBulk(indexName, docsToIndex); + + logger.info( + "---> Stopping shard rebalancing to ensure shards do not automatically move over to newer nodes after they are launched" + ); + stopShardRebalancing(); + + logger.info("---> Starting 3 remote store enabled nodes"); + initDocRepToRemoteMigration(); + setAddRemote(true); + internalCluster().startClusterManagerOnlyNode(); + List remoteNodeNames = internalCluster().startDataOnlyNodes(2); + internalCluster().validateClusterFormed(); + + String primaryNode = primaryNodeName(indexName); + + logger.info("---> Moving over primary to remote store enabled nodes"); + assertAcked( + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(indexName, 0, primaryNode, remoteNodeNames.get(0))) + .execute() + .actionGet() + ); + waitForRelocation(); + waitNoPendingTasksOnAll(); + + logger.info("---> Reducing replica count to 0 for the index"); + changeReplicaCountAndEnsureGreen(0, indexName); + + logger.info("---> Stopping all docrep nodes"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(docrepClusterManager)); + for (String node : docrepNodeNames) { + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node)); + } + internalCluster().validateClusterFormed(); + completeDocRepToRemoteMigration(); + waitNoPendingTasksOnAll(); + assertRemoteProperties(indexName); + } + private void createIndexAndAssertDocrepProperties(String index, Settings settings) { createIndexAssertHealthAndDocrepProperties(index, settings, this::ensureGreen); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java index e0e25db4ca722..4d37b2a1feb88 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java @@ -17,6 +17,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.shard.IndexShard; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.test.OpenSearchIntegTestCase; @@ -216,4 +217,12 @@ public void testEndToEndRemoteMigration() throws Exception { asyncIndexingService.getIndexedDocs() ); } + + public void testRemoteSettingPropagatedToIndexShardAfterMigration() throws Exception { + testEndToEndRemoteMigration(); + IndexShard indexShard = getIndexShard(primaryNodeName("test"), "test"); + assertTrue(indexShard.indexSettings().isRemoteStoreEnabled()); + assertEquals(MigrationBaseTestCase.REPOSITORY_NAME, indexShard.indexSettings().getRemoteStoreRepository()); + assertEquals(MigrationBaseTestCase.REPOSITORY_2_NAME, indexShard.indexSettings().getRemoteStoreTranslogRepository()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 64efcee6ef1b5..f83ae3e0ca820 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -69,6 +69,7 @@ public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; protected static final String REPOSITORY_2_NAME = "test-remote-store-repo-2"; + protected static final String REMOTE_ROUTING_TABLE_REPO = "remote-routing-table-repo"; protected static final int SHARD_COUNT = 1; protected static int REPLICA_COUNT = 1; protected static final String TOTAL_OPERATIONS = "total-operations"; @@ -312,7 +313,7 @@ public void assertRemoteStoreRepositoryOnAllNodes(String repositoryName) { } } - public static int getFileCount(Path path) throws Exception { + public static int getFileCount(Path path) throws IOException { final AtomicInteger filesExisting = new AtomicInteger(0); Files.walkFileTree(path, new SimpleFileVisitor<>() { @Override @@ -360,4 +361,20 @@ protected void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, ensureGreen(index); } } + + protected void prepareCluster( + int numClusterManagerNodes, + int numDataOnlyNodes, + String indices, + int replicaCount, + int shardCount, + Settings settings + ) { + internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes, settings); + internalCluster().startDataOnlyNodes(numDataOnlyNodes, settings); + for (String index : indices.split(",")) { + createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount)); + ensureGreen(index); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 96d6338e5913b..194dce5f4a57a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -65,7 +65,6 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; import static org.opensearch.index.shard.IndexShardTestCase.getTranslog; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; -import static org.opensearch.test.OpenSearchTestCase.getShardLevelBlobPath; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.comparesEqualTo; @@ -133,6 +132,21 @@ private void testPeerRecovery(int numberOfIterations, boolean invokeFlush) throw ); } + public void testRemoteStoreIndexCreationAndDeletionWithReferencedStore() throws InterruptedException, ExecutionException { + String dataNode = internalCluster().startNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); + + // Simulating a condition where store is already in use by increasing ref count, this helps in testing index + // deletion when refresh is in-progress. + indexShard.store().incRef(); + assertAcked(client().admin().indices().prepareDelete(INDEX_NAME)); + indexShard.store().decRef(); + } + public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogNoDataFlush() throws Exception { testPeerRecovery(1, true); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java new file mode 100644 index 0000000000000..05ff738d2df0b --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Set; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStorePinnedTimestampsIT extends RemoteStoreBaseIntegTestCase { + static final String INDEX_NAME = "remote-store-test-idx-1"; + + ActionListener noOpActionListener = new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) {} + }; + + public void testTimestampPinUnpin() throws Exception { + prepareCluster(1, 1, INDEX_NAME, 0, 2); + ensureGreen(INDEX_NAME); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + primaryNodeName(INDEX_NAME) + ); + + Tuple> pinnedTimestampWithFetchTimestamp = RemoteStorePinnedTimestampService.getPinnedTimestamps(); + long lastFetchTimestamp = pinnedTimestampWithFetchTimestamp.v1(); + assertEquals(-1L, lastFetchTimestamp); + assertEquals(Set.of(), pinnedTimestampWithFetchTimestamp.v2()); + + assertThrows( + IllegalArgumentException.class, + () -> remoteStorePinnedTimestampService.pinTimestamp(1234L, "ss1", noOpActionListener) + ); + + long timestamp1 = System.currentTimeMillis() + 30000L; + long timestamp2 = System.currentTimeMillis() + 60000L; + long timestamp3 = System.currentTimeMillis() + 900000L; + remoteStorePinnedTimestampService.pinTimestamp(timestamp1, "ss2", noOpActionListener); + remoteStorePinnedTimestampService.pinTimestamp(timestamp2, "ss3", noOpActionListener); + remoteStorePinnedTimestampService.pinTimestamp(timestamp3, "ss4", noOpActionListener); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + + assertBusy(() -> { + Tuple> pinnedTimestampWithFetchTimestamp_2 = RemoteStorePinnedTimestampService.getPinnedTimestamps(); + long lastFetchTimestamp_2 = pinnedTimestampWithFetchTimestamp_2.v1(); + assertTrue(lastFetchTimestamp_2 != -1); + assertEquals(Set.of(timestamp1, timestamp2, timestamp3), pinnedTimestampWithFetchTimestamp_2.v2()); + }); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueMinutes(3)); + + // This should be a no-op as pinning entity is different + remoteStorePinnedTimestampService.unpinTimestamp(timestamp1, "no-snapshot", noOpActionListener); + // Unpinning already pinned entity + remoteStorePinnedTimestampService.unpinTimestamp(timestamp2, "ss3", noOpActionListener); + // Adding different entity to already pinned timestamp + remoteStorePinnedTimestampService.pinTimestamp(timestamp3, "ss5", noOpActionListener); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + + assertBusy(() -> { + Tuple> pinnedTimestampWithFetchTimestamp_3 = RemoteStorePinnedTimestampService.getPinnedTimestamps(); + long lastFetchTimestamp_3 = pinnedTimestampWithFetchTimestamp_3.v1(); + assertTrue(lastFetchTimestamp_3 != -1); + assertEquals(Set.of(timestamp1, timestamp3), pinnedTimestampWithFetchTimestamp_3.v2()); + }); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueMinutes(3)); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 31c73e2fc03ae..86d586cd17146 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -250,7 +250,7 @@ public void testStatsResponseFromLocalNode() { } } - @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.remotestore:TRACE") + @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.index.shard:TRACE") public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exception { setup(); // Scenario: @@ -280,11 +280,13 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce .get(0) .getSegmentStats(); logger.info( - "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started and {}b upload bytes failed.", + "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started, {}b upload bytes failed , {} uploads succeeded, {} upload byes succeeded.", zeroStatePrimaryStats.refreshTimeLagMs, zeroStatePrimaryStats.bytesLag, zeroStatePrimaryStats.uploadBytesStarted, - zeroStatePrimaryStats.uploadBytesFailed + zeroStatePrimaryStats.uploadBytesFailed, + zeroStatePrimaryStats.totalUploadsSucceeded, + zeroStatePrimaryStats.uploadBytesSucceeded ); assertTrue( zeroStatePrimaryStats.totalUploadsStarted == zeroStatePrimaryStats.totalUploadsSucceeded @@ -348,7 +350,7 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce } } - @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.remotestore:TRACE") + @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.index.shard:TRACE") public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() throws Exception { setup(); // Scenario: @@ -382,11 +384,13 @@ public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() thr .get(0) .getSegmentStats(); logger.info( - "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started and {}b upload bytes failed.", + "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started, {}b upload bytes failed , {} uploads succeeded, {} upload byes succeeded.", zeroStatePrimaryStats.refreshTimeLagMs, zeroStatePrimaryStats.bytesLag, zeroStatePrimaryStats.uploadBytesStarted, - zeroStatePrimaryStats.uploadBytesFailed + zeroStatePrimaryStats.uploadBytesFailed, + zeroStatePrimaryStats.totalUploadsSucceeded, + zeroStatePrimaryStats.uploadBytesSucceeded ); assertTrue( zeroStatePrimaryStats.totalUploadsStarted == zeroStatePrimaryStats.totalUploadsSucceeded @@ -617,7 +621,7 @@ public void testNonZeroPrimaryStatsOnNewlyCreatedIndexWithZeroDocs() throws Exce } assertZeroTranslogDownloadStats(translogStats); }); - }, 5, TimeUnit.SECONDS); + }, 10, TimeUnit.SECONDS); } public void testStatsCorrectnessOnFailover() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java index a51bd6b20fff0..88c9ae436e85f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java @@ -20,6 +20,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.IndexModule; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.shard.IndexShard; @@ -65,11 +67,20 @@ protected Settings featureFlagSettings() { return featureSettings.build(); } + @Override + protected Settings nodeSettings(int nodeOrdinal) { + ByteSizeValue cacheSize = new ByteSizeValue(16, ByteSizeUnit.GB); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(Node.NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), cacheSize.toString()) + .build(); + } + public void testWritableWarmFeatureFlagDisabled() { Settings clusterSettings = Settings.builder().put(super.nodeSettings(0)).put(FeatureFlags.TIERED_REMOTE_INDEX, false).build(); InternalTestCluster internalTestCluster = internalCluster(); internalTestCluster.startClusterManagerOnlyNode(clusterSettings); - internalTestCluster.startDataOnlyNode(clusterSettings); + internalTestCluster.startDataAndSearchNodes(1); Settings indexSettings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) @@ -94,7 +105,7 @@ public void testWritableWarmFeatureFlagDisabled() { public void testWritableWarmBasic() throws Exception { InternalTestCluster internalTestCluster = internalCluster(); internalTestCluster.startClusterManagerOnlyNode(); - internalTestCluster.startDataOnlyNode(); + internalTestCluster.startDataAndSearchNodes(1); Settings settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index 01ad06757640c..3cf63e2f19a16 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -52,6 +52,7 @@ import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -66,6 +67,7 @@ import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.RangeQueryBuilder; import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.TermsQueryBuilder; import org.opensearch.index.query.WildcardQueryBuilder; import org.opensearch.index.query.WrapperQueryBuilder; import org.opensearch.index.query.functionscore.ScoreFunctionBuilders; @@ -84,6 +86,7 @@ import java.io.IOException; import java.io.Reader; +import java.nio.ByteBuffer; import java.time.Instant; import java.time.ZoneId; import java.time.ZoneOffset; @@ -98,6 +101,8 @@ import java.util.concurrent.ExecutionException; import java.util.regex.Pattern; +import org.roaringbitmap.RoaringBitmap; + import static java.util.Collections.singletonMap; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -1157,6 +1162,41 @@ public void testTermsQuery() throws Exception { assertHitCount(searchResponse, 0L); } + public void testTermsQueryWithBitmapDocValuesQuery() throws Exception { + assertAcked( + prepareCreate("products").setMapping( + jsonBuilder().startObject() + .startObject("properties") + .startObject("product") + .field("type", "integer") + .field("index", false) + .endObject() + .endObject() + .endObject() + ) + ); + indexRandom( + true, + client().prepareIndex("products").setId("1").setSource("product", 1), + client().prepareIndex("products").setId("2").setSource("product", 2), + client().prepareIndex("products").setId("3").setSource("product", new int[] { 1, 3 }), + client().prepareIndex("products").setId("4").setSource("product", 4) + ); + + RoaringBitmap r = new RoaringBitmap(); + r.add(1); + r.add(4); + byte[] array = new byte[r.serializedSizeInBytes()]; + r.serialize(ByteBuffer.wrap(array)); + BytesArray bitmap = new BytesArray(array); + // directly building the terms query builder, so pass in the bitmap value as BytesArray + SearchResponse searchResponse = client().prepareSearch("products") + .setQuery(constantScoreQuery(termsQuery("product", bitmap).valueType(TermsQueryBuilder.ValueType.BITMAP))) + .get(); + assertHitCount(searchResponse, 3L); + assertSearchHits(searchResponse, "1", "3", "4"); + } + public void testTermsLookupFilter() throws Exception { assertAcked(prepareCreate("lookup").setMapping("terms", "type=text", "other", "type=text")); indexRandomForConcurrentSearch("lookup"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index e40928f15e8a8..fdb12639c65be 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -42,6 +42,7 @@ import org.opensearch.action.bulk.BulkRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.cluster.metadata.IndexMetadata; @@ -90,6 +91,7 @@ import static org.opensearch.script.MockScriptPlugin.NAME; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -919,7 +921,7 @@ public void testSortMissingNumbers() throws Exception { client().prepareIndex("test") .setId("3") .setSource( - jsonBuilder().startObject().field("id", "3").field("i_value", 2).field("d_value", 2.2).field("u_value", 2).endObject() + jsonBuilder().startObject().field("id", "3").field("i_value", 2).field("d_value", 2.2).field("u_value", 3).endObject() ) .get(); @@ -964,6 +966,18 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + logger.info("--> sort with custom missing value"); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing(randomBoolean() ? 1 : "1")) + .get(); + assertNoFailures(searchResponse); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + // FLOAT logger.info("--> sort with no missing (same as missing _last)"); searchResponse = client().prepareSearch() @@ -1001,6 +1015,18 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + logger.info("--> sort with custom missing value"); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("d_value").order(SortOrder.ASC).missing(randomBoolean() ? 1.1 : "1.1")) + .get(); + assertNoFailures(searchResponse); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + // UNSIGNED_LONG logger.info("--> sort with no missing (same as missing _last)"); searchResponse = client().prepareSearch() @@ -1037,6 +1063,24 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("2")); assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + + logger.info("--> sort with custom missing value"); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("u_value").order(SortOrder.ASC).missing(randomBoolean() ? 2 : "2")) + .get(); + assertNoFailures(searchResponse); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + + logger.info("--> sort with negative missing value"); + SearchRequestBuilder searchRequestBuilder = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("u_value").order(SortOrder.ASC).missing(randomBoolean() ? -1 : "-1")); + assertFailures(searchRequestBuilder, RestStatus.BAD_REQUEST, containsString("Value [-1] is out of range for an unsigned long")); } public void testSortMissingNumbersMinMax() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 1c199df4d548e..a19bbe49ad340 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -67,7 +67,6 @@ import java.util.stream.StreamSupport; import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; -import static org.opensearch.common.util.FeatureFlags.TIERED_REMOTE_INDEX; import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList; import static org.opensearch.index.store.remote.filecache.FileCacheSettings.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode; @@ -1019,11 +1018,12 @@ public void testStartSearchNode() throws Exception { internalCluster().startNode(Settings.builder().put(onlyRole(DiscoveryNodeRole.SEARCH_ROLE))); // test start node without search role internalCluster().startNode(Settings.builder().put(onlyRole(DiscoveryNodeRole.DATA_ROLE))); - // test start non-dedicated search node with TIERED_REMOTE_INDEX feature enabled - internalCluster().startNode( - Settings.builder() - .put(onlyRoles(Set.of(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.DATA_ROLE))) - .put(TIERED_REMOTE_INDEX, true) + // test start non-dedicated search node, if the user doesn't configure the cache size, it fails + assertThrows( + SettingsException.class, + () -> internalCluster().startNode( + Settings.builder().put(onlyRoles(Set.of(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.DATA_ROLE))) + ) ); // test start non-dedicated search node assertThrows( diff --git a/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java new file mode 100644 index 0000000000000..67ee45f4c9306 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.codecs.lucene90; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.index.SegmentWriteState; + +import java.io.Closeable; +import java.io.IOException; + +/** + * This class is an abstraction of the {@link DocValuesConsumer} for the Star Tree index structure. + * It is responsible to consume various types of document values (numeric, binary, sorted, sorted numeric, + * and sorted set) for fields in the Star Tree index. + * + * @opensearch.experimental + */ +public class Lucene90DocValuesConsumerWrapper implements Closeable { + + private final Lucene90DocValuesConsumer lucene90DocValuesConsumer; + + public Lucene90DocValuesConsumerWrapper( + SegmentWriteState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + lucene90DocValuesConsumer = new Lucene90DocValuesConsumer(state, dataCodec, dataExtension, metaCodec, metaExtension); + } + + public Lucene90DocValuesConsumer getLucene90DocValuesConsumer() { + return lucene90DocValuesConsumer; + } + + @Override + public void close() throws IOException { + lucene90DocValuesConsumer.close(); + } +} diff --git a/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java new file mode 100644 index 0000000000000..a213852c59094 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.codecs.lucene90; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.SegmentReadState; + +import java.io.Closeable; +import java.io.IOException; + +/** + * This class is a custom abstraction of the {@link DocValuesProducer} for the Star Tree index structure. + * It is responsible for providing access to various types of document values (numeric, binary, sorted, sorted numeric, + * and sorted set) for fields in the Star Tree index. + * + * @opensearch.experimental + */ +public class Lucene90DocValuesProducerWrapper implements Closeable { + + private final Lucene90DocValuesProducer lucene90DocValuesProducer; + + public Lucene90DocValuesProducerWrapper( + SegmentReadState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + lucene90DocValuesProducer = new Lucene90DocValuesProducer(state, dataCodec, dataExtension, metaCodec, metaExtension); + } + + public DocValuesProducer getLucene90DocValuesProducer() { + return lucene90DocValuesProducer; + } + + @Override + public void close() throws IOException { + lucene90DocValuesProducer.close(); + } +} diff --git a/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java b/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java new file mode 100644 index 0000000000000..f7759fcced284 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.index; + +import org.apache.lucene.util.Counter; + +/** + * A wrapper class for writing sorted numeric doc values. + *

    + * This class provides a convenient way to add sorted numeric doc values to a field + * and retrieve the corresponding {@link SortedNumericDocValues} instance. + * + * @opensearch.experimental + */ +public class SortedNumericDocValuesWriterWrapper { + + private final SortedNumericDocValuesWriter sortedNumericDocValuesWriter; + + /** + * Sole constructor. Constructs a new {@link SortedNumericDocValuesWriterWrapper} instance. + * + * @param fieldInfo the field information for the field being written + * @param counter a counter for tracking memory usage + */ + public SortedNumericDocValuesWriterWrapper(FieldInfo fieldInfo, Counter counter) { + sortedNumericDocValuesWriter = new SortedNumericDocValuesWriter(fieldInfo, counter); + } + + /** + * Adds a value to the sorted numeric doc values for the specified document. + * + * @param docID the document ID + * @param value the value to add + */ + public void addValue(int docID, long value) { + sortedNumericDocValuesWriter.addValue(docID, value); + } + + /** + * Returns the {@link SortedNumericDocValues} instance containing the sorted numeric doc values + * + * @return the {@link SortedNumericDocValues} instance + */ + public SortedNumericDocValues getDocValues() { + return sortedNumericDocValuesWriter.getDocValues(); + } +} diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 16c15f553951c..c86e6580122d5 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -216,6 +216,9 @@ import org.opensearch.action.admin.indices.template.put.TransportPutComponentTemplateAction; import org.opensearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.opensearch.action.admin.indices.template.put.TransportPutIndexTemplateAction; +import org.opensearch.action.admin.indices.tiering.HotToWarmTieringAction; +import org.opensearch.action.admin.indices.tiering.RestWarmTieringAction; +import org.opensearch.action.admin.indices.tiering.TransportHotToWarmTieringAction; import org.opensearch.action.admin.indices.upgrade.get.TransportUpgradeStatusAction; import org.opensearch.action.admin.indices.upgrade.get.UpgradeStatusAction; import org.opensearch.action.admin.indices.upgrade.post.TransportUpgradeAction; @@ -634,6 +637,9 @@ public void reg actions.register(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class); actions.register(CloneSnapshotAction.INSTANCE, TransportCloneSnapshotAction.class); actions.register(RestoreSnapshotAction.INSTANCE, TransportRestoreSnapshotAction.class); + if (FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX)) { + actions.register(HotToWarmTieringAction.INSTANCE, TransportHotToWarmTieringAction.class); + } actions.register(SnapshotsStatusAction.INSTANCE, TransportSnapshotsStatusAction.class); actions.register(ClusterAddWeightedRoutingAction.INSTANCE, TransportAddWeightedRoutingAction.class); @@ -966,6 +972,9 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestNodeAttrsAction()); registerHandler.accept(new RestRepositoriesAction()); registerHandler.accept(new RestSnapshotAction()); + if (FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX)) { + registerHandler.accept(new RestWarmTieringAction()); + } registerHandler.accept(new RestTemplatesAction()); // Point in time API @@ -1191,9 +1200,12 @@ public void unregisterDynamicRoute(NamedRoute route) { * @param route The {@link RestHandler.Route}. * @return the corresponding {@link RestSendToExtensionAction} if it is registered, null otherwise. */ - @SuppressWarnings("unchecked") public RestSendToExtensionAction get(RestHandler.Route route) { - return routeRegistry.get(route); + if (route instanceof NamedRoute) { + return routeRegistry.get((NamedRoute) route); + } + // Only NamedRoutes are map keys so any other route is not in the map + return null; } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java index 22feb4d99297a..c8a3be78a790e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java @@ -48,7 +48,8 @@ public TransportGetDecommissionStateAction( threadPool, actionFilters, GetDecommissionStateRequest::new, - indexNameExpressionResolver + indexNameExpressionResolver, + true ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java index 1cc357a4c20f4..f69f462372888 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -534,4 +534,9 @@ private ClusterHealthResponse clusterHealth( pendingTaskTimeInQueue ); } + + @Override + protected boolean localExecuteSupportedByAction() { + return false; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index 2c4f8522a5a5c..dda54cce334ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -129,7 +129,7 @@ protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) { */ public static class NodeInfoRequest extends TransportRequest { - NodesInfoRequest request; + protected NodesInfoRequest request; public NodeInfoRequest(StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 2e93e5e7841cb..2c808adc97c7a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -140,7 +140,7 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { */ public static class NodeStatsRequest extends TransportRequest { - NodesStatsRequest request; + protected NodesStatsRequest request; public NodeStatsRequest(StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index c7d784dbc96e7..c99b52dfe34f4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -79,7 +79,8 @@ public TransportGetRepositoriesAction( threadPool, actionFilters, GetRepositoriesRequest::new, - indexNameExpressionResolver + indexNameExpressionResolver, + true ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 216e1fb2ed1cc..3988d50b2ce1e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -42,7 +42,6 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -59,17 +58,13 @@ import org.opensearch.common.settings.SettingsException; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.index.remote.RemoteMigrationIndexMetadataUpdater; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.Collection; -import java.util.Set; -import java.util.stream.Collectors; -import static org.opensearch.index.remote.RemoteMigrationIndexMetadataUpdater.indexHasAllRemoteStoreRelatedMetadata; +import static org.opensearch.index.remote.RemoteStoreUtils.checkAndFinalizeRemoteStoreMigration; /** * Transport action for updating cluster settings @@ -262,13 +257,14 @@ public void onFailure(String source, Exception e) { @Override public ClusterState execute(final ClusterState currentState) { - validateCompatibilityModeSettingRequest(request, state); - final ClusterState clusterState = updater.updateSettings( + boolean isCompatibilityModeChanging = validateCompatibilityModeSettingRequest(request, state); + ClusterState clusterState = updater.updateSettings( currentState, clusterSettings.upgradeSettings(request.transientSettings()), clusterSettings.upgradeSettings(request.persistentSettings()), logger ); + clusterState = checkAndFinalizeRemoteStoreMigration(isCompatibilityModeChanging, request, clusterState, logger); changed = clusterState != currentState; return clusterState; } @@ -278,19 +274,23 @@ public ClusterState execute(final ClusterState currentState) { /** * Runs various checks associated with changing cluster compatibility mode + * * @param request cluster settings update request, for settings to be updated and new values * @param clusterState current state of cluster, for information on nodes + * @return true if the incoming cluster settings update request is switching compatibility modes */ - public void validateCompatibilityModeSettingRequest(ClusterUpdateSettingsRequest request, ClusterState clusterState) { + public boolean validateCompatibilityModeSettingRequest(ClusterUpdateSettingsRequest request, ClusterState clusterState) { Settings settings = Settings.builder().put(request.persistentSettings()).put(request.transientSettings()).build(); if (RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.exists(settings)) { - String value = RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(settings).mode; validateAllNodesOfSameVersion(clusterState.nodes()); - if (RemoteStoreNodeService.CompatibilityMode.STRICT.mode.equals(value)) { + if (RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get( + settings + ) == RemoteStoreNodeService.CompatibilityMode.STRICT) { validateAllNodesOfSameType(clusterState.nodes()); - validateIndexSettings(clusterState); } + return true; } + return false; } /** @@ -310,31 +310,18 @@ private void validateAllNodesOfSameVersion(DiscoveryNodes discoveryNodes) { * @param discoveryNodes current discovery nodes in the cluster */ private void validateAllNodesOfSameType(DiscoveryNodes discoveryNodes) { - Set nodeTypes = discoveryNodes.getNodes() + boolean allNodesDocrepEnabled = discoveryNodes.getNodes() .values() .stream() - .map(DiscoveryNode::isRemoteStoreNode) - .collect(Collectors.toSet()); - if (nodeTypes.size() != 1) { + .allMatch(discoveryNode -> discoveryNode.isRemoteStoreNode() == false); + boolean allNodesRemoteStoreEnabled = discoveryNodes.getNodes() + .values() + .stream() + .allMatch(discoveryNode -> discoveryNode.isRemoteStoreNode()); + if (allNodesDocrepEnabled == false && allNodesRemoteStoreEnabled == false) { throw new SettingsException( "can not switch to STRICT compatibility mode when the cluster contains both remote and non-remote nodes" ); } } - - /** - * Verifies that while trying to switch to STRICT compatibility mode, - * all indices in the cluster have {@link RemoteMigrationIndexMetadataUpdater#indexHasAllRemoteStoreRelatedMetadata(IndexMetadata)} as true. - * If not, throws {@link SettingsException} - * @param clusterState current cluster state - */ - private void validateIndexSettings(ClusterState clusterState) { - Collection allIndicesMetadata = clusterState.metadata().indices().values(); - if (allIndicesMetadata.isEmpty() == false - && allIndicesMetadata.stream().anyMatch(indexMetadata -> indexHasAllRemoteStoreRelatedMetadata(indexMetadata) == false)) { - throw new SettingsException( - "can not switch to STRICT compatibility mode since all indices in the cluster does not have remote store based index settings" - ); - } - } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index a2a65b6400c97..83e104236f640 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -85,7 +85,8 @@ public TransportClusterSearchShardsAction( threadPool, actionFilters, ClusterSearchShardsRequest::new, - indexNameExpressionResolver + indexNameExpressionResolver, + true ); this.indicesService = indicesService; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/TransportGetWeightedRoutingAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/TransportGetWeightedRoutingAction.java index 50368d85e0011..6c110c0ea2a73 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/TransportGetWeightedRoutingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/TransportGetWeightedRoutingAction.java @@ -55,7 +55,8 @@ public TransportGetWeightedRoutingAction( threadPool, actionFilters, ClusterGetWeightedRoutingRequest::new, - indexNameExpressionResolver + indexNameExpressionResolver, + true ); this.weightedRoutingService = weightedRoutingService; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java index cae465a90446e..13ea7eaa43bf8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -92,6 +92,7 @@ public TransportClusterStateAction( ClusterStateRequest::new, indexNameExpressionResolver ); + this.localExecuteSupported = true; } @Override @@ -233,9 +234,4 @@ private ClusterStateResponse buildResponse(final ClusterStateRequest request, fi return new ClusterStateResponse(currentState.getClusterName(), builder.build(), false); } - - @Override - protected boolean localExecuteSupportedByAction() { - return true; - } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java index 26e554f44fca1..03a73f45ffe81 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -78,26 +78,49 @@ public ClusterStatsIndices(List nodeResponses, Mapping this.segments = new SegmentsStats(); for (ClusterStatsNodeResponse r : nodeResponses) { - for (org.opensearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) { - ShardStats indexShardStats = countsPerIndex.get(shardStats.getShardRouting().getIndexName()); - if (indexShardStats == null) { - indexShardStats = new ShardStats(); - countsPerIndex.put(shardStats.getShardRouting().getIndexName(), indexShardStats); + // Aggregated response from the node + if (r.getAggregatedNodeLevelStats() != null) { + + for (Map.Entry entry : r.getAggregatedNodeLevelStats().indexStatsMap + .entrySet()) { + ShardStats indexShardStats = countsPerIndex.get(entry.getKey()); + if (indexShardStats == null) { + indexShardStats = new ShardStats(entry.getValue()); + countsPerIndex.put(entry.getKey(), indexShardStats); + } else { + indexShardStats.addStatsFrom(entry.getValue()); + } } - indexShardStats.total++; - - CommonStats shardCommonStats = shardStats.getStats(); - - if (shardStats.getShardRouting().primary()) { - indexShardStats.primaries++; - docs.add(shardCommonStats.docs); + docs.add(r.getAggregatedNodeLevelStats().commonStats.docs); + store.add(r.getAggregatedNodeLevelStats().commonStats.store); + fieldData.add(r.getAggregatedNodeLevelStats().commonStats.fieldData); + queryCache.add(r.getAggregatedNodeLevelStats().commonStats.queryCache); + completion.add(r.getAggregatedNodeLevelStats().commonStats.completion); + segments.add(r.getAggregatedNodeLevelStats().commonStats.segments); + } else { + // Default response from the node + for (org.opensearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) { + ShardStats indexShardStats = countsPerIndex.get(shardStats.getShardRouting().getIndexName()); + if (indexShardStats == null) { + indexShardStats = new ShardStats(); + countsPerIndex.put(shardStats.getShardRouting().getIndexName(), indexShardStats); + } + + indexShardStats.total++; + + CommonStats shardCommonStats = shardStats.getStats(); + + if (shardStats.getShardRouting().primary()) { + indexShardStats.primaries++; + docs.add(shardCommonStats.docs); + } + store.add(shardCommonStats.store); + fieldData.add(shardCommonStats.fieldData); + queryCache.add(shardCommonStats.queryCache); + completion.add(shardCommonStats.completion); + segments.add(shardCommonStats.segments); } - store.add(shardCommonStats.store); - fieldData.add(shardCommonStats.fieldData); - queryCache.add(shardCommonStats.queryCache); - completion.add(shardCommonStats.completion); - segments.add(shardCommonStats.segments); } } @@ -202,6 +225,11 @@ public static class ShardStats implements ToXContentFragment { public ShardStats() {} + public ShardStats(ClusterStatsNodeResponse.AggregatedIndexStats aggregatedIndexStats) { + this.total = aggregatedIndexStats.total; + this.primaries = aggregatedIndexStats.primaries; + } + /** * number of indices in the cluster */ @@ -329,6 +357,11 @@ public void addIndexShardCount(ShardStats indexShardCount) { } } + public void addStatsFrom(ClusterStatsNodeResponse.AggregatedIndexStats incomingStats) { + this.total += incomingStats.total; + this.primaries += incomingStats.primaries; + } + /** * Inner Fields used for creating XContent and parsing * diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index 1b25bf84356d6..6ed3ca7c409e7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -32,17 +32,29 @@ package org.opensearch.action.admin.cluster.stats; +import org.opensearch.Version; import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.index.cache.query.QueryCacheStats; +import org.opensearch.index.engine.SegmentsStats; +import org.opensearch.index.fielddata.FieldDataStats; +import org.opensearch.index.shard.DocsStats; +import org.opensearch.index.store.StoreStats; +import org.opensearch.search.suggest.completion.CompletionStats; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; /** * Transport action for obtaining cluster stats from node level @@ -55,6 +67,7 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { private final NodeStats nodeStats; private final ShardStats[] shardsStats; private ClusterHealthStatus clusterStatus; + private AggregatedNodeLevelStats aggregatedNodeLevelStats; public ClusterStatsNodeResponse(StreamInput in) throws IOException { super(in); @@ -64,7 +77,12 @@ public ClusterStatsNodeResponse(StreamInput in) throws IOException { } this.nodeInfo = new NodeInfo(in); this.nodeStats = new NodeStats(in); - shardsStats = in.readArray(ShardStats::new, ShardStats[]::new); + if (in.getVersion().onOrAfter(Version.V_2_16_0)) { + this.shardsStats = in.readOptionalArray(ShardStats::new, ShardStats[]::new); + this.aggregatedNodeLevelStats = in.readOptionalWriteable(AggregatedNodeLevelStats::new); + } else { + this.shardsStats = in.readArray(ShardStats::new, ShardStats[]::new); + } } public ClusterStatsNodeResponse( @@ -81,6 +99,24 @@ public ClusterStatsNodeResponse( this.clusterStatus = clusterStatus; } + public ClusterStatsNodeResponse( + DiscoveryNode node, + @Nullable ClusterHealthStatus clusterStatus, + NodeInfo nodeInfo, + NodeStats nodeStats, + ShardStats[] shardsStats, + boolean useAggregatedNodeLevelResponses + ) { + super(node); + this.nodeInfo = nodeInfo; + this.nodeStats = nodeStats; + if (useAggregatedNodeLevelResponses) { + this.aggregatedNodeLevelStats = new AggregatedNodeLevelStats(node, shardsStats); + } + this.shardsStats = shardsStats; + this.clusterStatus = clusterStatus; + } + public NodeInfo nodeInfo() { return this.nodeInfo; } @@ -101,6 +137,10 @@ public ShardStats[] shardsStats() { return this.shardsStats; } + public AggregatedNodeLevelStats getAggregatedNodeLevelStats() { + return aggregatedNodeLevelStats; + } + public static ClusterStatsNodeResponse readNodeResponse(StreamInput in) throws IOException { return new ClusterStatsNodeResponse(in); } @@ -116,6 +156,95 @@ public void writeTo(StreamOutput out) throws IOException { } nodeInfo.writeTo(out); nodeStats.writeTo(out); - out.writeArray(shardsStats); + if (out.getVersion().onOrAfter(Version.V_2_16_0)) { + if (aggregatedNodeLevelStats != null) { + out.writeOptionalArray(null); + out.writeOptionalWriteable(aggregatedNodeLevelStats); + } else { + out.writeOptionalArray(shardsStats); + out.writeOptionalWriteable(null); + } + } else { + out.writeArray(shardsStats); + } + } + + /** + * Node level statistics used for ClusterStatsIndices for _cluster/stats call. + */ + public class AggregatedNodeLevelStats extends BaseNodeResponse { + + CommonStats commonStats; + Map indexStatsMap; + + protected AggregatedNodeLevelStats(StreamInput in) throws IOException { + super(in); + commonStats = in.readOptionalWriteable(CommonStats::new); + indexStatsMap = in.readMap(StreamInput::readString, AggregatedIndexStats::new); + } + + protected AggregatedNodeLevelStats(DiscoveryNode node, ShardStats[] indexShardsStats) { + super(node); + this.commonStats = new CommonStats(); + this.commonStats.docs = new DocsStats(); + this.commonStats.store = new StoreStats(); + this.commonStats.fieldData = new FieldDataStats(); + this.commonStats.queryCache = new QueryCacheStats(); + this.commonStats.completion = new CompletionStats(); + this.commonStats.segments = new SegmentsStats(); + this.indexStatsMap = new HashMap<>(); + + // Index Level Stats + for (org.opensearch.action.admin.indices.stats.ShardStats shardStats : indexShardsStats) { + AggregatedIndexStats indexShardStats = this.indexStatsMap.get(shardStats.getShardRouting().getIndexName()); + if (indexShardStats == null) { + indexShardStats = new AggregatedIndexStats(); + this.indexStatsMap.put(shardStats.getShardRouting().getIndexName(), indexShardStats); + } + + indexShardStats.total++; + + CommonStats shardCommonStats = shardStats.getStats(); + + if (shardStats.getShardRouting().primary()) { + indexShardStats.primaries++; + this.commonStats.docs.add(shardCommonStats.docs); + } + this.commonStats.store.add(shardCommonStats.store); + this.commonStats.fieldData.add(shardCommonStats.fieldData); + this.commonStats.queryCache.add(shardCommonStats.queryCache); + this.commonStats.completion.add(shardCommonStats.completion); + this.commonStats.segments.add(shardCommonStats.segments); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalWriteable(commonStats); + out.writeMap(indexStatsMap, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream)); + } + } + + /** + * Node level statistics used for ClusterStatsIndices for _cluster/stats call. + */ + @PublicApi(since = "2.16.0") + public static class AggregatedIndexStats implements Writeable { + public int total = 0; + public int primaries = 0; + + public AggregatedIndexStats(StreamInput in) throws IOException { + total = in.readVInt(); + primaries = in.readVInt(); + } + + public AggregatedIndexStats() {} + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(total); + out.writeVInt(primaries); + } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java index 6a99451c596ed..bd75b2210e474 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.stats; +import org.opensearch.Version; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; @@ -49,8 +50,13 @@ public class ClusterStatsRequest extends BaseNodesRequest { public ClusterStatsRequest(StreamInput in) throws IOException { super(in); + if (in.getVersion().onOrAfter(Version.V_2_16_0)) { + useAggregatedNodeLevelResponses = in.readOptionalBoolean(); + } } + private Boolean useAggregatedNodeLevelResponses = false; + /** * Get stats from nodes based on the nodes ids specified. If none are passed, stats * based on all nodes will be returned. @@ -59,9 +65,20 @@ public ClusterStatsRequest(String... nodesIds) { super(nodesIds); } + public boolean useAggregatedNodeLevelResponses() { + return useAggregatedNodeLevelResponses; + } + + public void useAggregatedNodeLevelResponses(boolean useAggregatedNodeLevelResponses) { + this.useAggregatedNodeLevelResponses = useAggregatedNodeLevelResponses; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_2_16_0)) { + out.writeOptionalBoolean(useAggregatedNodeLevelResponses); + } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java index 0dcb03dc26d0e..4d0932bd3927d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java @@ -50,4 +50,9 @@ public class ClusterStatsRequestBuilder extends NodesOperationRequestBuilder< public ClusterStatsRequestBuilder(OpenSearchClient client, ClusterStatsAction action) { super(client, action, new ClusterStatsRequest()); } + + public final ClusterStatsRequestBuilder useAggregatedNodeLevelResponses(boolean useAggregatedNodeLevelResponses) { + request.useAggregatedNodeLevelResponses(useAggregatedNodeLevelResponses); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index e4f483f796f44..be7d41a7ba75e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -212,8 +212,14 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq clusterStatus = new ClusterStateHealth(clusterService.state()).getStatus(); } - return new ClusterStatsNodeResponse(nodeInfo.getNode(), clusterStatus, nodeInfo, nodeStats, shardsStats.toArray(new ShardStats[0])); - + return new ClusterStatsNodeResponse( + nodeInfo.getNode(), + clusterStatus, + nodeInfo, + nodeStats, + shardsStats.toArray(new ShardStats[0]), + nodeRequest.request.useAggregatedNodeLevelResponses() + ); } /** @@ -223,7 +229,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq */ public static class ClusterStatsNodeRequest extends TransportRequest { - ClusterStatsRequest request; + protected ClusterStatsRequest request; public ClusterStatsNodeRequest(StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java index db1f1edde2812..c34ec49406802 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java @@ -73,7 +73,8 @@ public TransportGetStoredScriptAction( threadPool, actionFilters, GetStoredScriptRequest::new, - indexNameExpressionResolver + indexNameExpressionResolver, + true ); this.scriptService = scriptService; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index 5d5053cc80738..01846ef46c1ed 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -110,4 +110,9 @@ protected void clusterManagerOperation( logger.trace("done fetching pending tasks from cluster service"); listener.onResponse(new PendingClusterTasksResponse(pendingTasks)); } + + @Override + protected boolean localExecuteSupportedByAction() { + return false; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 3aca9c1976f16..4f4e3bd481ee7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -86,7 +86,8 @@ public TransportGetAliasesAction( threadPool, actionFilters, GetAliasesRequest::new, - indexNameExpressionResolver + indexNameExpressionResolver, + true ); this.systemIndices = systemIndices; } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java index 428a0eb35513d..a298eae1aa865 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java @@ -71,7 +71,8 @@ public TransportIndicesExistsAction( threadPool, actionFilters, IndicesExistsRequest::new, - indexNameExpressionResolver + indexNameExpressionResolver, + true ); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 3fbf9ac1bb570..a8b97d0f344ae 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -105,7 +105,8 @@ public TransportIndicesShardStoresAction( threadPool, actionFilters, IndicesShardStoresRequest::new, - indexNameExpressionResolver + indexNameExpressionResolver, + true ); this.listShardStoresInfo = listShardStoresInfo; } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java index e2594cd792cd3..c3217d109044d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java @@ -76,7 +76,8 @@ public TransportGetComponentTemplateAction( threadPool, actionFilters, GetComponentTemplateAction.Request::new, - indexNameExpressionResolver + indexNameExpressionResolver, + true ); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java index b1ef32db7274f..84fbb59481c10 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java @@ -76,7 +76,8 @@ public TransportGetComposableIndexTemplateAction( threadPool, actionFilters, GetComposableIndexTemplateAction.Request::new, - indexNameExpressionResolver + indexNameExpressionResolver, + true ); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index 10b4975f7b9d0..522234dda509f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -76,7 +76,8 @@ public TransportGetIndexTemplatesAction( threadPool, actionFilters, GetIndexTemplatesRequest::new, - indexNameExpressionResolver + indexNameExpressionResolver, + true ); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index c1a02d813ffb2..22f1831a54164 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -140,7 +140,8 @@ protected void clusterManagerOperation( MetadataIndexTemplateService.validateV2TemplateRequest( state.metadata(), simulateTemplateToAdd, - request.getIndexTemplateRequest().indexTemplate() + request.getIndexTemplateRequest().indexTemplate(), + clusterService.getClusterSettings() ); stateWithTemplate = indexTemplateService.addIndexTemplateV2( state, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java index 6565896fd3db2..03190445647ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java @@ -134,7 +134,8 @@ protected void clusterManagerOperation( MetadataIndexTemplateService.validateV2TemplateRequest( state.metadata(), simulateTemplateToAdd, - request.getIndexTemplateRequest().indexTemplate() + request.getIndexTemplateRequest().indexTemplate(), + clusterService.getClusterSettings() ); stateWithTemplate = indexTemplateService.addIndexTemplateV2( state, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/HotToWarmTieringAction.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/HotToWarmTieringAction.java new file mode 100644 index 0000000000000..ae34a9a734221 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/HotToWarmTieringAction.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.tiering; + +import org.opensearch.action.ActionType; +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Tiering action to move indices from hot to warm + * + * @opensearch.experimental + */ +@ExperimentalApi +public class HotToWarmTieringAction extends ActionType { + + public static final HotToWarmTieringAction INSTANCE = new HotToWarmTieringAction(); + public static final String NAME = "indices:admin/tier/hot_to_warm"; + + private HotToWarmTieringAction() { + super(NAME, HotToWarmTieringResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/HotToWarmTieringResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/HotToWarmTieringResponse.java new file mode 100644 index 0000000000000..275decf7a8ea5 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/HotToWarmTieringResponse.java @@ -0,0 +1,157 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.tiering; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * Response object for an {@link TieringIndexRequest} which is sent to client after the initial verification of the request + * by the backend service. The format of the response object will be as below: + * + * { + * "acknowledged": true/false, + * "failed_indices": [ + * { + * "index": "index1", + * "error": "Low disk threshold watermark breached" + * }, + * { + * "index": "index2", + * "error": "Index is not a remote store backed index" + * } + * ] + * } + * + * @opensearch.experimental + */ +@ExperimentalApi +public class HotToWarmTieringResponse extends AcknowledgedResponse { + + private final List failedIndices; + + public HotToWarmTieringResponse(boolean acknowledged) { + super(acknowledged); + this.failedIndices = Collections.emptyList(); + } + + public HotToWarmTieringResponse(boolean acknowledged, List indicesResults) { + super(acknowledged); + this.failedIndices = (indicesResults == null) + ? Collections.emptyList() + : indicesResults.stream().sorted(Comparator.comparing(IndexResult::getIndex)).collect(Collectors.toList()); + } + + public HotToWarmTieringResponse(StreamInput in) throws IOException { + super(in); + failedIndices = Collections.unmodifiableList(in.readList(IndexResult::new)); + } + + public List getFailedIndices() { + return this.failedIndices; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(this.failedIndices); + } + + @Override + protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { + super.addCustomFields(builder, params); + builder.startArray("failed_indices"); + + for (IndexResult failedIndex : failedIndices) { + failedIndex.toXContent(builder, params); + } + builder.endArray(); + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } + + /** + * Inner class to represent the result of a failed index for tiering. + * @opensearch.experimental + */ + @ExperimentalApi + public static class IndexResult implements Writeable, ToXContentFragment { + private final String index; + private final String failureReason; + + public IndexResult(String index, String failureReason) { + this.index = index; + this.failureReason = failureReason; + } + + IndexResult(StreamInput in) throws IOException { + this.index = in.readString(); + this.failureReason = in.readString(); + } + + public String getIndex() { + return index; + } + + public String getFailureReason() { + return failureReason; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeString(failureReason); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("index", index); + builder.field("error", failureReason); + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IndexResult that = (IndexResult) o; + return Objects.equals(index, that.index) && Objects.equals(failureReason, that.failureReason); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(index); + result = 31 * result + Objects.hashCode(failureReason); + return result; + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/RestWarmTieringAction.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/RestWarmTieringAction.java new file mode 100644 index 0000000000000..6f2eceafa9e77 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/RestWarmTieringAction.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.tiering; + +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.core.common.Strings.splitStringByCommaToArray; +import static org.opensearch.rest.RestRequest.Method.POST; + +/** + * Rest Tiering API action to move indices to warm tier + * + * @opensearch.experimental + */ +@ExperimentalApi +public class RestWarmTieringAction extends BaseRestHandler { + + private static final String TARGET_TIER = "warm"; + + @Override + public List routes() { + return singletonList(new RestHandler.Route(POST, "/{index}/_tier/" + TARGET_TIER)); + } + + @Override + public String getName() { + return "warm_tiering_action"; + } + + @Override + protected BaseRestHandler.RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + final TieringIndexRequest tieringIndexRequest = new TieringIndexRequest( + TARGET_TIER, + splitStringByCommaToArray(request.param("index")) + ); + tieringIndexRequest.timeout(request.paramAsTime("timeout", tieringIndexRequest.timeout())); + tieringIndexRequest.clusterManagerNodeTimeout( + request.paramAsTime("cluster_manager_timeout", tieringIndexRequest.clusterManagerNodeTimeout()) + ); + tieringIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, tieringIndexRequest.indicesOptions())); + tieringIndexRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", tieringIndexRequest.waitForCompletion())); + return channel -> client.admin() + .cluster() + .execute(HotToWarmTieringAction.INSTANCE, tieringIndexRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringIndexRequest.java new file mode 100644 index 0000000000000..ed458a47ddb7d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringIndexRequest.java @@ -0,0 +1,195 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.tiering; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.IndicesRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Locale; +import java.util.Objects; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * Represents the tiering request for indices to move to a different tier + * + * @opensearch.experimental + */ +@ExperimentalApi +public class TieringIndexRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { + + private String[] indices; + private final Tier targetTier; + private IndicesOptions indicesOptions; + private boolean waitForCompletion; + + public TieringIndexRequest(String targetTier, String... indices) { + this.targetTier = Tier.fromString(targetTier); + this.indices = indices; + this.indicesOptions = IndicesOptions.fromOptions(false, false, true, false); + this.waitForCompletion = false; + } + + public TieringIndexRequest(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + targetTier = Tier.fromString(in.readString()); + indicesOptions = IndicesOptions.readIndicesOptions(in); + waitForCompletion = in.readBoolean(); + } + + // pkg private for testing + TieringIndexRequest(Tier targetTier, IndicesOptions indicesOptions, boolean waitForCompletion, String... indices) { + this.indices = indices; + this.targetTier = targetTier; + this.indicesOptions = indicesOptions; + this.waitForCompletion = waitForCompletion; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (indices == null) { + validationException = addValidationError("Mandatory parameter - indices is missing from the request", validationException); + } else { + for (String index : indices) { + if (index == null || index.length() == 0) { + validationException = addValidationError( + String.format(Locale.ROOT, "Specified index in the request [%s] is null or empty", index), + validationException + ); + } + } + } + if (!Tier.WARM.equals(targetTier)) { + validationException = addValidationError("The specified tier is not supported", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + out.writeString(targetTier.value()); + indicesOptions.writeIndicesOptions(out); + out.writeBoolean(waitForCompletion); + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + @Override + public boolean includeDataStreams() { + return true; + } + + @Override + public TieringIndexRequest indices(String... indices) { + this.indices = indices; + return this; + } + + public TieringIndexRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + /** + * If this parameter is set to true the operation will wait for completion of tiering process before returning. + * + * @param waitForCompletion if true the operation will wait for completion + * @return this request + */ + public TieringIndexRequest waitForCompletion(boolean waitForCompletion) { + this.waitForCompletion = waitForCompletion; + return this; + } + + /** + * Returns wait for completion setting + * + * @return true if the operation will wait for completion + */ + public boolean waitForCompletion() { + return waitForCompletion; + } + + public Tier tier() { + return targetTier; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + TieringIndexRequest that = (TieringIndexRequest) o; + return clusterManagerNodeTimeout.equals(that.clusterManagerNodeTimeout) + && timeout.equals(that.timeout) + && Objects.equals(indicesOptions, that.indicesOptions) + && Arrays.equals(indices, that.indices) + && targetTier.equals(that.targetTier) + && waitForCompletion == that.waitForCompletion; + } + + @Override + public int hashCode() { + return Objects.hash(clusterManagerNodeTimeout, timeout, indicesOptions, waitForCompletion, Arrays.hashCode(indices)); + } + + /** + * Represents the supported tiers for an index + * + * @opensearch.experimental + */ + @ExperimentalApi + public enum Tier { + HOT, + WARM; + + public static Tier fromString(String name) { + if (name == null) { + throw new IllegalArgumentException("Tiering type cannot be null"); + } + String upperCase = name.trim().toUpperCase(Locale.ROOT); + switch (upperCase) { + case "HOT": + return HOT; + case "WARM": + return WARM; + default: + throw new IllegalArgumentException( + "Tiering type [" + name + "] is not supported. Supported types are " + HOT + " and " + WARM + ); + } + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringValidationResult.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringValidationResult.java new file mode 100644 index 0000000000000..ccd60daf027ce --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringValidationResult.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.tiering; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.index.Index; + +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Validation result for tiering + * + * @opensearch.experimental + */ + +@ExperimentalApi +public class TieringValidationResult { + private final Set acceptedIndices; + private final Map rejectedIndices; + + public TieringValidationResult(Set concreteIndices) { + // by default all the indices are added to the accepted set + this.acceptedIndices = ConcurrentHashMap.newKeySet(); + acceptedIndices.addAll(concreteIndices); + this.rejectedIndices = new HashMap<>(); + } + + public Set getAcceptedIndices() { + return acceptedIndices; + } + + public Map getRejectedIndices() { + return rejectedIndices; + } + + public void addToRejected(Index index, String reason) { + acceptedIndices.remove(index); + rejectedIndices.put(index, reason); + } + + public HotToWarmTieringResponse constructResponse() { + final List indicesResult = new LinkedList<>(); + for (Map.Entry rejectedIndex : rejectedIndices.entrySet()) { + indicesResult.add(new HotToWarmTieringResponse.IndexResult(rejectedIndex.getKey().getName(), rejectedIndex.getValue())); + } + return new HotToWarmTieringResponse(acceptedIndices.size() > 0, indicesResult); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TieringValidationResult that = (TieringValidationResult) o; + + if (!Objects.equals(acceptedIndices, that.acceptedIndices)) return false; + return Objects.equals(rejectedIndices, that.rejectedIndices); + } + + @Override + public int hashCode() { + int result = acceptedIndices != null ? acceptedIndices.hashCode() : 0; + result = 31 * result + (rejectedIndices != null ? rejectedIndices.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "TieringValidationResult{" + "acceptedIndices=" + acceptedIndices + ", rejectedIndices=" + rejectedIndices + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringAction.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringAction.java new file mode 100644 index 0000000000000..8d1ab0bb37cdd --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringAction.java @@ -0,0 +1,110 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.tiering; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterInfoService; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.routing.allocation.DiskThresholdSettings; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.Index; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Set; + +import static org.opensearch.indices.tiering.TieringRequestValidator.validateHotToWarm; + +/** + * Transport Tiering action to move indices from hot to warm + * + * @opensearch.experimental + */ +@ExperimentalApi +public class TransportHotToWarmTieringAction extends TransportClusterManagerNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportHotToWarmTieringAction.class); + private final ClusterInfoService clusterInfoService; + private final DiskThresholdSettings diskThresholdSettings; + + @Inject + public TransportHotToWarmTieringAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + ClusterInfoService clusterInfoService, + Settings settings + ) { + super( + HotToWarmTieringAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + TieringIndexRequest::new, + indexNameExpressionResolver + ); + this.clusterInfoService = clusterInfoService; + this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterService.getClusterSettings()); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected HotToWarmTieringResponse read(StreamInput in) throws IOException { + return new HotToWarmTieringResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(TieringIndexRequest request, ClusterState state) { + return state.blocks() + .indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request)); + } + + @Override + protected void clusterManagerOperation( + TieringIndexRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + if (concreteIndices == null || concreteIndices.length == 0) { + listener.onResponse(new HotToWarmTieringResponse(true)); + return; + } + final TieringValidationResult tieringValidationResult = validateHotToWarm( + state, + Set.of(concreteIndices), + clusterInfoService.getClusterInfo(), + diskThresholdSettings + ); + + if (tieringValidationResult.getAcceptedIndices().isEmpty()) { + listener.onResponse(tieringValidationResult.constructResponse()); + return; + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/package-info.java new file mode 100644 index 0000000000000..878e3575a3934 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/package-info.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * Actions that OpenSearch can take to tier the indices + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.admin.indices.tiering; diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java index 7614206cd226f..e686585095962 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java @@ -96,7 +96,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques private String globalRouting; private String globalIndex; private Boolean globalRequireAlias; - private int batchSize = 1; + private int batchSize = Integer.MAX_VALUE; private long sizeInBytes = 0; diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java index 80333c7346f92..7bc0380bccbc0 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java @@ -70,7 +70,8 @@ public GetPipelineTransportAction( threadPool, actionFilters, GetPipelineRequest::new, - indexNameExpressionResolver + indexNameExpressionResolver, + true ); } diff --git a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineTransportAction.java index a7fcb8f1cfbae..215b7ae1a610c 100644 --- a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineTransportAction.java @@ -48,7 +48,8 @@ public GetSearchPipelineTransportAction( threadPool, actionFilters, GetSearchPipelineRequest::new, - indexNameExpressionResolver + indexNameExpressionResolver, + true ); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryAggregationCategorizer.java b/server/src/main/java/org/opensearch/action/search/SearchQueryAggregationCategorizer.java deleted file mode 100644 index 607ccf182851b..0000000000000 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryAggregationCategorizer.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.action.search; - -import org.opensearch.search.aggregations.AggregationBuilder; -import org.opensearch.search.aggregations.PipelineAggregationBuilder; -import org.opensearch.telemetry.metrics.tags.Tags; - -import java.util.Collection; - -/** - * Increments the counters related to Aggregation Search Queries. - */ -public class SearchQueryAggregationCategorizer { - - private static final String TYPE_TAG = "type"; - private final SearchQueryCounters searchQueryCounters; - - public SearchQueryAggregationCategorizer(SearchQueryCounters searchQueryCounters) { - this.searchQueryCounters = searchQueryCounters; - } - - public void incrementSearchQueryAggregationCounters(Collection aggregatorFactories) { - for (AggregationBuilder aggregationBuilder : aggregatorFactories) { - incrementCountersRecursively(aggregationBuilder); - } - } - - private void incrementCountersRecursively(AggregationBuilder aggregationBuilder) { - // Increment counters for the current aggregation - String aggregationType = aggregationBuilder.getType(); - searchQueryCounters.aggCounter.add(1, Tags.create().addTag(TYPE_TAG, aggregationType)); - - // Recursively process sub-aggregations if any - Collection subAggregations = aggregationBuilder.getSubAggregations(); - if (subAggregations != null && !subAggregations.isEmpty()) { - for (AggregationBuilder subAggregation : subAggregations) { - incrementCountersRecursively(subAggregation); - } - } - - // Process pipeline aggregations - Collection pipelineAggregations = aggregationBuilder.getPipelineAggregations(); - for (PipelineAggregationBuilder pipelineAggregation : pipelineAggregations) { - String pipelineAggregationType = pipelineAggregation.getType(); - searchQueryCounters.aggCounter.add(1, Tags.create().addTag(TYPE_TAG, pipelineAggregationType)); - } - } -} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java deleted file mode 100644 index ffaae5b08772f..0000000000000 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.action.search; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.index.query.QueryBuilder; -import org.opensearch.index.query.QueryBuilderVisitor; -import org.opensearch.index.query.QueryShapeVisitor; -import org.opensearch.search.aggregations.AggregatorFactories; -import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.search.sort.SortBuilder; -import org.opensearch.telemetry.metrics.MetricsRegistry; -import org.opensearch.telemetry.metrics.tags.Tags; - -import java.util.List; -import java.util.ListIterator; - -/** - * Class to categorize the search queries based on the type and increment the relevant counters. - * Class also logs the query shape. - */ -final class SearchQueryCategorizer { - - private static final Logger log = LogManager.getLogger(SearchQueryCategorizer.class); - - final SearchQueryCounters searchQueryCounters; - - final SearchQueryAggregationCategorizer searchQueryAggregationCategorizer; - - public SearchQueryCategorizer(MetricsRegistry metricsRegistry) { - searchQueryCounters = new SearchQueryCounters(metricsRegistry); - searchQueryAggregationCategorizer = new SearchQueryAggregationCategorizer(searchQueryCounters); - } - - public void categorize(SearchSourceBuilder source) { - QueryBuilder topLevelQueryBuilder = source.query(); - logQueryShape(topLevelQueryBuilder); - incrementQueryTypeCounters(topLevelQueryBuilder); - incrementQueryAggregationCounters(source.aggregations()); - incrementQuerySortCounters(source.sorts()); - } - - private void incrementQuerySortCounters(List> sorts) { - if (sorts != null && sorts.size() > 0) { - for (ListIterator> it = sorts.listIterator(); it.hasNext();) { - SortBuilder sortBuilder = it.next(); - String sortOrder = sortBuilder.order().toString(); - searchQueryCounters.sortCounter.add(1, Tags.create().addTag("sort_order", sortOrder)); - } - } - } - - private void incrementQueryAggregationCounters(AggregatorFactories.Builder aggregations) { - if (aggregations == null) { - return; - } - - searchQueryAggregationCategorizer.incrementSearchQueryAggregationCounters(aggregations.getAggregatorFactories()); - } - - private void incrementQueryTypeCounters(QueryBuilder topLevelQueryBuilder) { - if (topLevelQueryBuilder == null) { - return; - } - QueryBuilderVisitor searchQueryVisitor = new SearchQueryCategorizingVisitor(searchQueryCounters); - topLevelQueryBuilder.visit(searchQueryVisitor); - } - - private void logQueryShape(QueryBuilder topLevelQueryBuilder) { - if (topLevelQueryBuilder == null) { - return; - } - QueryShapeVisitor shapeVisitor = new QueryShapeVisitor(); - topLevelQueryBuilder.visit(shapeVisitor); - log.trace("Query shape : {}", shapeVisitor.prettyPrintTree(" ")); - } - -} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java deleted file mode 100644 index 31f83dbef9dc9..0000000000000 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.action.search; - -import org.apache.lucene.search.BooleanClause; -import org.opensearch.index.query.QueryBuilder; -import org.opensearch.index.query.QueryBuilderVisitor; - -/** - * Class to visit the query builder tree and also track the level information. - * Increments the counters related to Search Query type. - */ -final class SearchQueryCategorizingVisitor implements QueryBuilderVisitor { - private final int level; - private final SearchQueryCounters searchQueryCounters; - - public SearchQueryCategorizingVisitor(SearchQueryCounters searchQueryCounters) { - this(searchQueryCounters, 0); - } - - private SearchQueryCategorizingVisitor(SearchQueryCounters counters, int level) { - this.searchQueryCounters = counters; - this.level = level; - } - - public void accept(QueryBuilder qb) { - searchQueryCounters.incrementCounter(qb, level); - } - - public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { - return new SearchQueryCategorizingVisitor(searchQueryCounters, level + 1); - } -} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java deleted file mode 100644 index a8a7e352b89dc..0000000000000 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.action.search; - -import org.opensearch.index.query.QueryBuilder; -import org.opensearch.telemetry.metrics.Counter; -import org.opensearch.telemetry.metrics.MetricsRegistry; -import org.opensearch.telemetry.metrics.tags.Tags; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -/** - * Class contains all the Counters related to search query types. - */ -final class SearchQueryCounters { - private static final String LEVEL_TAG = "level"; - private static final String UNIT = "1"; - private final MetricsRegistry metricsRegistry; - public final Counter aggCounter; - public final Counter otherQueryCounter; - public final Counter sortCounter; - private final Map, Counter> queryHandlers; - public final ConcurrentHashMap nameToQueryTypeCounters; - - public SearchQueryCounters(MetricsRegistry metricsRegistry) { - this.metricsRegistry = metricsRegistry; - this.nameToQueryTypeCounters = new ConcurrentHashMap<>(); - this.aggCounter = metricsRegistry.createCounter( - "search.query.type.agg.count", - "Counter for the number of top level agg search queries", - UNIT - ); - this.otherQueryCounter = metricsRegistry.createCounter( - "search.query.type.other.count", - "Counter for the number of top level and nested search queries that do not match any other categories", - UNIT - ); - this.sortCounter = metricsRegistry.createCounter( - "search.query.type.sort.count", - "Counter for the number of top level sort search queries", - UNIT - ); - this.queryHandlers = new HashMap<>(); - - } - - public void incrementCounter(QueryBuilder queryBuilder, int level) { - String uniqueQueryCounterName = queryBuilder.getName(); - - Counter counter = nameToQueryTypeCounters.computeIfAbsent(uniqueQueryCounterName, k -> createQueryCounter(k)); - counter.add(1, Tags.create().addTag(LEVEL_TAG, level)); - } - - private Counter createQueryCounter(String counterName) { - Counter counter = metricsRegistry.createCounter( - "search.query.type." + counterName + ".count", - "Counter for the number of top level and nested " + counterName + " search queries", - UNIT - ); - return counter; - } -} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index 97ef94055faf7..d1d5f568fc09d 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -27,6 +27,7 @@ @PublicApi(since = "2.11.0") public final class SearchRequestStats extends SearchRequestOperationsListener { Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); + StatsHolder tookStatsHolder; public static final String SEARCH_REQUEST_STATS_ENABLED_KEY = "search.request_stats_enabled"; public static final Setting SEARCH_REQUEST_STATS_ENABLED = Setting.boolSetting( @@ -40,6 +41,7 @@ public final class SearchRequestStats extends SearchRequestOperationsListener { public SearchRequestStats(ClusterSettings clusterSettings) { this.setEnabled(clusterSettings.get(SEARCH_REQUEST_STATS_ENABLED)); clusterSettings.addSettingsUpdateConsumer(SEARCH_REQUEST_STATS_ENABLED, this::setEnabled); + tookStatsHolder = new StatsHolder(); for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { phaseStatsMap.put(searchPhaseName, new StatsHolder()); } @@ -57,6 +59,18 @@ public long getPhaseMetric(SearchPhaseName searchPhaseName) { return phaseStatsMap.get(searchPhaseName).timing.sum(); } + public long getTookCurrent() { + return tookStatsHolder.current.count(); + } + + public long getTookTotal() { + return tookStatsHolder.total.count(); + } + + public long getTookMetric() { + return tookStatsHolder.timing.sum(); + } + @Override protected void onPhaseStart(SearchPhaseContext context) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); @@ -75,6 +89,23 @@ protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); } + @Override + protected void onRequestStart(SearchRequestContext searchRequestContext) { + tookStatsHolder.current.inc(); + } + + @Override + protected void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + tookStatsHolder.current.dec(); + tookStatsHolder.total.inc(); + tookStatsHolder.timing.inc(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - searchRequestContext.getAbsoluteStartNanos())); + } + + @Override + protected void onRequestFailure(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + tookStatsHolder.current.dec(); + } + /** * Holder of statistics values * diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index dfecf4f462c4d..ed2943db94420 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -37,8 +37,8 @@ import org.opensearch.core.tasks.TaskId; import org.opensearch.search.fetch.ShardFetchSearchRequest; import org.opensearch.search.internal.ShardSearchRequest; -import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.SearchBackpressureTask; +import org.opensearch.wlm.QueryGroupTask; import java.util.Map; import java.util.function.Supplier; @@ -50,7 +50,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class SearchShardTask extends CancellableTask implements SearchBackpressureTask { +public class SearchShardTask extends QueryGroupTask implements SearchBackpressureTask { // generating metadata in a lazy way since source can be quite big private final MemoizedSupplier metadataSupplier; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTask.java b/server/src/main/java/org/opensearch/action/search/SearchTask.java index d3c1043c50cce..2a1a961e7607b 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTask.java @@ -35,8 +35,8 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.tasks.TaskId; -import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.SearchBackpressureTask; +import org.opensearch.wlm.QueryGroupTask; import java.util.Map; import java.util.function.Supplier; @@ -49,7 +49,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class SearchTask extends CancellableTask implements SearchBackpressureTask { +public class SearchTask extends QueryGroupTask implements SearchBackpressureTask { // generating description in a lazy way since source can be quite big private final Supplier descriptionSupplier; private SearchProgressListener progressListener = SearchProgressListener.NOOP; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTaskRequestOperationsListener.java b/server/src/main/java/org/opensearch/action/search/SearchTaskRequestOperationsListener.java new file mode 100644 index 0000000000000..4434d71793b23 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchTaskRequestOperationsListener.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.tasks.TaskResourceTrackingService; + +/** + * SearchTaskRequestOperationsListener subscriber for operations on search tasks resource usages. + * Listener ensures to refreshResourceStats on request end capturing the search task resource usage + * upon request completion. + * + */ +public final class SearchTaskRequestOperationsListener extends SearchRequestOperationsListener { + private final TaskResourceTrackingService taskResourceTrackingService; + + public SearchTaskRequestOperationsListener(TaskResourceTrackingService taskResourceTrackingService) { + this.taskResourceTrackingService = taskResourceTrackingService; + } + + @Override + public void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + taskResourceTrackingService.refreshResourceStats(context.getTask()); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 6e380775355a2..88bf7ebea8e52 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -101,6 +101,7 @@ import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; +import org.opensearch.wlm.QueryGroupTask; import java.util.ArrayList; import java.util.Arrays; @@ -143,13 +144,6 @@ public class TransportSearchAction extends HandledTransportAction SEARCH_QUERY_METRICS_ENABLED_SETTING = Setting.boolSetting( - "search.query.metrics.enabled", - false, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - // cluster level setting for timeout based search cancellation. If search request level parameter is present then that will take // precedence over the cluster setting value public static final String SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY = "search.cancel_after_time_interval"; @@ -182,11 +176,8 @@ public class TransportSearchAction extends HandledTransportAction buildPerIndexAliasFilter( SearchRequest request, ClusterState clusterState, @@ -462,6 +443,12 @@ private void executeRequest( ); searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); + // At this point either the QUERY_GROUP_ID header will be present in ThreadContext either via ActionFilter + // or HTTP header (HTTP header will be deprecated once ActionFilter is implemented) + if (task instanceof QueryGroupTask) { + ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + } + PipelinedRequest searchRequest; ActionListener listener; try { @@ -473,13 +460,6 @@ private void executeRequest( } ActionListener requestTransformListener = ActionListener.wrap(sr -> { - if (searchQueryMetricsEnabled) { - try { - searchQueryCategorizer.categorize(sr.source()); - } catch (Exception e) { - logger.error("Error while trying to categorize the query.", e); - } - } ActionListener rewriteListener = buildRewriteListener( sr, diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 080b0d607e991..4e869f29878cd 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -267,24 +267,7 @@ protected void doStart(ClusterState clusterState) { final DiscoveryNodes nodes = clusterState.nodes(); if (nodes.isLocalNodeElectedClusterManager() || localExecute(request)) { // check for block, if blocked, retry, else, execute locally - final ClusterBlockException blockException = checkBlock(request, clusterState); - if (blockException != null) { - if (!blockException.retryable()) { - listener.onFailure(blockException); - } else { - logger.debug("can't execute due to a cluster block, retrying", blockException); - retry(clusterState, blockException, newState -> { - try { - ClusterBlockException newException = checkBlock(request, newState); - return (newException == null || !newException.retryable()); - } catch (Exception e) { - // accept state as block will be rechecked by doStart() and listener.onFailure() then called - logger.trace("exception occurred during cluster block checking, accepting state", e); - return true; - } - }); - } - } else { + if (!checkForBlock(request, clusterState)) { threadPool.executor(executor) .execute( ActionRunnable.wrap( @@ -422,12 +405,43 @@ public GetTermVersionResponse read(StreamInput in) throws IOException { }; } + private boolean checkForBlock(Request request, ClusterState localClusterState) { + final ClusterBlockException blockException = checkBlock(request, localClusterState); + if (blockException != null) { + if (!blockException.retryable()) { + listener.onFailure(blockException); + } else { + logger.debug("can't execute due to a cluster block, retrying", blockException); + retry(localClusterState, blockException, newState -> { + try { + ClusterBlockException newException = checkBlock(request, newState); + return (newException == null || !newException.retryable()); + } catch (Exception e) { + // accept state as block will be rechecked by doStart() and listener.onFailure() then called + logger.trace("exception occurred during cluster block checking, accepting state", e); + return true; + } + }); + } + return true; + } else { + return false; + } + } + private void executeOnLocalNode(ClusterState localClusterState) { - Runnable runTask = ActionRunnable.wrap( - getDelegateForLocalExecute(localClusterState), - l -> clusterManagerOperation(task, request, localClusterState, l) - ); - threadPool.executor(executor).execute(runTask); + try { + // check for block, if blocked, retry, else, execute locally + if (!checkForBlock(request, localClusterState)) { + Runnable runTask = ActionRunnable.wrap( + getDelegateForLocalExecute(localClusterState), + l -> clusterManagerOperation(task, request, localClusterState, l) + ); + threadPool.executor(executor).execute(runTask); + } + } catch (Exception e) { + listener.onFailure(e); + } } private void executeOnClusterManager(DiscoveryNode clusterManagerNode, ClusterState clusterState) { diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java index d58487a475bcf..88cb2ed6a9bf0 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java @@ -51,6 +51,8 @@ public abstract class TransportClusterManagerNodeReadAction< Request extends ClusterManagerNodeReadRequest, Response extends ActionResponse> extends TransportClusterManagerNodeAction { + protected boolean localExecuteSupported = false; + protected TransportClusterManagerNodeReadAction( String actionName, TransportService transportService, @@ -58,7 +60,8 @@ protected TransportClusterManagerNodeReadAction( ThreadPool threadPool, ActionFilters actionFilters, Writeable.Reader request, - IndexNameExpressionResolver indexNameExpressionResolver + IndexNameExpressionResolver indexNameExpressionResolver, + boolean localExecuteSupported ) { this( actionName, @@ -71,6 +74,19 @@ protected TransportClusterManagerNodeReadAction( request, indexNameExpressionResolver ); + this.localExecuteSupported = localExecuteSupported; + } + + protected TransportClusterManagerNodeReadAction( + String actionName, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + Writeable.Reader request, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + this(actionName, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver, false); } protected TransportClusterManagerNodeReadAction( @@ -124,4 +140,9 @@ protected TransportClusterManagerNodeReadAction( protected final boolean localExecute(Request request) { return request.local(); } + + protected boolean localExecuteSupportedByAction() { + return localExecuteSupported; + } + } diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java index 65f00a4731ab5..8a0082ad05f66 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java @@ -62,6 +62,7 @@ public TransportClusterInfoAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super(actionName, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); + this.localExecuteSupported = true; } @Override diff --git a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java index 4d54ce51c923c..a4f6d8afeaf38 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java @@ -65,6 +65,14 @@ public abstract class BaseNodesRequest * will be ignored and this will be used. * */ private DiscoveryNode[] concreteNodes; + + /** + * Since do not use the discovery nodes coming from the request in all code paths following a request extended off from + * BaseNodeRequest, we do not require it to sent around across all nodes. + * + * Setting default behavior as `true` but can be explicitly changed in requests that do not require. + */ + private boolean includeDiscoveryNodes = true; private final TimeValue DEFAULT_TIMEOUT_SECS = TimeValue.timeValueSeconds(30); private TimeValue timeout; @@ -119,6 +127,14 @@ public void setConcreteNodes(DiscoveryNode[] concreteNodes) { this.concreteNodes = concreteNodes; } + public void setIncludeDiscoveryNodes(boolean value) { + includeDiscoveryNodes = value; + } + + public boolean getIncludeDiscoveryNodes() { + return includeDiscoveryNodes; + } + @Override public ActionRequestValidationException validate() { return null; diff --git a/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java index 9a1a28dd70636..3acd12f632e0f 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java @@ -226,6 +226,7 @@ class AsyncAction { private final NodesRequest request; private final ActionListener listener; private final AtomicReferenceArray responses; + private final DiscoveryNode[] concreteNodes; private final AtomicInteger counter = new AtomicInteger(); private final Task task; @@ -238,10 +239,18 @@ class AsyncAction { assert request.concreteNodes() != null; } this.responses = new AtomicReferenceArray<>(request.concreteNodes().length); + this.concreteNodes = request.concreteNodes(); + + if (request.getIncludeDiscoveryNodes() == false) { + // As we transfer the ownership of discovery nodes to route the request to into the AsyncAction class, we + // remove the list of DiscoveryNodes from the request. This reduces the payload of the request and improves + // the number of concrete nodes in the memory. + request.setConcreteNodes(null); + } } void start() { - final DiscoveryNode[] nodes = request.concreteNodes(); + final DiscoveryNode[] nodes = this.concreteNodes; if (nodes.length == 0) { // nothing to notify threadPool.generic().execute(() -> listener.onResponse(newResponse(request, responses))); @@ -260,7 +269,6 @@ void start() { if (task != null) { nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId()); } - transportService.sendRequest( node, getTransportNodeAction(node), diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java index 9654bd1c114ba..6cb5e049e0f1e 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java @@ -717,7 +717,7 @@ public IndexRequest doc() { private IndexRequest safeDoc() { if (doc == null) { - doc = new IndexRequest(); + doc = new IndexRequest(index); } return doc; } @@ -803,7 +803,7 @@ public IndexRequest upsertRequest() { private IndexRequest safeUpsertRequest() { if (upsertRequest == null) { - upsertRequest = new IndexRequest(); + upsertRequest = new IndexRequest(index); } return upsertRequest; } diff --git a/server/src/main/java/org/opensearch/client/OriginSettingClient.java b/server/src/main/java/org/opensearch/client/OriginSettingClient.java index 1b0e08cc489c4..27d87227df7bc 100644 --- a/server/src/main/java/org/opensearch/client/OriginSettingClient.java +++ b/server/src/main/java/org/opensearch/client/OriginSettingClient.java @@ -36,6 +36,7 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.ContextPreservingActionListener; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; @@ -65,7 +66,11 @@ protected void ActionListener listener ) { final Supplier supplier = in().threadPool().getThreadContext().newRestorableContext(false); - try (ThreadContext.StoredContext ignore = in().threadPool().getThreadContext().stashWithOrigin(origin)) { + try ( + ThreadContext.StoredContext ignore = ThreadContextAccess.doPrivileged( + () -> in().threadPool().getThreadContext().stashWithOrigin(origin) + ) + ) { super.doExecute(action, request, new ContextPreservingActionListener<>(supplier, listener)); } } diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 6c6049f04231b..509cd732357d6 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -416,6 +416,7 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.bytes.BytesReference; @@ -2148,7 +2149,9 @@ protected void ActionListener listener ) { ThreadContext threadContext = threadPool().getThreadContext(); - try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(headers)) { + try ( + ThreadContext.StoredContext ctx = ThreadContextAccess.doPrivileged(() -> threadContext.stashAndMergeHeaders(headers)) + ) { super.doExecute(action, request, listener); } } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java index 4c38d6fd99f5d..7216c447acc3e 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java @@ -33,6 +33,7 @@ package org.opensearch.cluster; import org.opensearch.Version; +import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; @@ -68,6 +69,8 @@ public class ClusterInfo implements ToXContentFragment, Writeable { final Map routingToDataPath; final Map reservedSpace; final Map nodeFileCacheStats; + private long avgTotalBytes; + private long avgFreeByte; protected ClusterInfo() { this(Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of()); @@ -97,6 +100,7 @@ public ClusterInfo( this.routingToDataPath = routingToDataPath; this.reservedSpace = reservedSpace; this.nodeFileCacheStats = nodeFileCacheStats; + calculateAvgFreeAndTotalBytes(mostAvailableSpaceUsage); } public ClusterInfo(StreamInput in) throws IOException { @@ -117,6 +121,39 @@ public ClusterInfo(StreamInput in) throws IOException { } else { this.nodeFileCacheStats = Map.of(); } + + calculateAvgFreeAndTotalBytes(mostAvailableSpaceUsage); + } + + /** + * Returns a {@link DiskUsage} for the {@link RoutingNode} using the + * average usage of other nodes in the disk usage map. + * @param usages Map of nodeId to DiskUsage for all known nodes + */ + private void calculateAvgFreeAndTotalBytes(final Map usages) { + if (usages == null || usages.isEmpty()) { + this.avgTotalBytes = 0; + this.avgFreeByte = 0; + return; + } + + long totalBytes = 0; + long freeBytes = 0; + for (DiskUsage du : usages.values()) { + totalBytes += du.getTotalBytes(); + freeBytes += du.getFreeBytes(); + } + + this.avgTotalBytes = totalBytes / usages.size(); + this.avgFreeByte = freeBytes / usages.size(); + } + + public long getAvgFreeByte() { + return avgFreeByte; + } + + public long getAvgTotalBytes() { + return avgTotalBytes; } @Override diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index c7fd263bda56a..bb51c42252448 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -48,6 +48,7 @@ import org.opensearch.cluster.metadata.MetadataIndexTemplateService; import org.opensearch.cluster.metadata.MetadataMappingService; import org.opensearch.cluster.metadata.MetadataUpdateSettingsService; +import org.opensearch.cluster.metadata.QueryGroupMetadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.ViewMetadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; @@ -214,6 +215,8 @@ public static List getNamedWriteables() { DecommissionAttributeMetadata::new, DecommissionAttributeMetadata::readDiffFrom ); + + registerMetadataCustom(entries, QueryGroupMetadata.TYPE, QueryGroupMetadata::new, QueryGroupMetadata::readDiffFrom); // Task Status (not Diffable) entries.add(new Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new)); return entries; diff --git a/server/src/main/java/org/opensearch/cluster/applicationtemplates/ClusterStateSystemTemplateLoader.java b/server/src/main/java/org/opensearch/cluster/applicationtemplates/ClusterStateSystemTemplateLoader.java new file mode 100644 index 0000000000000..332960ef49064 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/applicationtemplates/ClusterStateSystemTemplateLoader.java @@ -0,0 +1,108 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.applicationtemplates; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.OpenSearchCorruptionException; +import org.opensearch.action.admin.indices.template.put.PutComponentTemplateAction; +import org.opensearch.client.Client; +import org.opensearch.client.OriginSettingClient; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.ComponentTemplate; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.Supplier; + +/** + * Class responsible for loading the component templates provided by a repository into the cluster state. + */ +@ExperimentalApi +public class ClusterStateSystemTemplateLoader implements SystemTemplateLoader { + + private final Client client; + + private final Supplier clusterStateSupplier; + + private static final Logger logger = LogManager.getLogger(SystemTemplateLoader.class); + + public static final String TEMPLATE_LOADER_IDENTIFIER = "system_template_loader"; + public static final String TEMPLATE_TYPE_KEY = "_type"; + + public ClusterStateSystemTemplateLoader(Client client, Supplier clusterStateSupplier) { + this.client = new OriginSettingClient(client, TEMPLATE_LOADER_IDENTIFIER); + this.clusterStateSupplier = clusterStateSupplier; + } + + @Override + public boolean loadTemplate(SystemTemplate template) throws IOException { + final ComponentTemplate existingTemplate = clusterStateSupplier.get() + .metadata() + .componentTemplates() + .get(template.templateMetadata().fullyQualifiedName()); + + if (existingTemplate != null + && !SystemTemplateMetadata.COMPONENT_TEMPLATE_TYPE.equals( + Objects.toString(existingTemplate.metadata().get(TEMPLATE_TYPE_KEY)) + )) { + throw new OpenSearchCorruptionException( + "Attempting to create " + template.templateMetadata().name() + " which has already been created through some other source." + ); + } + + if (existingTemplate != null && existingTemplate.version() >= template.templateMetadata().version()) { + logger.debug( + "Skipping putting template {} as its existing version [{}] is >= fetched version [{}]", + template.templateMetadata().fullyQualifiedName(), + existingTemplate.version(), + template.templateMetadata().version() + ); + return false; + } + + ComponentTemplate newTemplate = null; + try ( + XContentParser contentParser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.IGNORE_DEPRECATIONS, + template.templateContent().utf8ToString() + ) + ) { + newTemplate = ComponentTemplate.parse(contentParser); + } + + if (!Objects.equals(newTemplate.version(), template.templateMetadata().version())) { + throw new OpenSearchCorruptionException( + "Template version mismatch for " + + template.templateMetadata().name() + + ". Version in metadata: " + + template.templateMetadata().version() + + " , Version in content: " + + newTemplate.version() + ); + } + + final PutComponentTemplateAction.Request request = new PutComponentTemplateAction.Request( + template.templateMetadata().fullyQualifiedName() + ).componentTemplate(newTemplate); + + return client.admin() + .indices() + .execute(PutComponentTemplateAction.INSTANCE, request) + .actionGet(TimeValue.timeValueMillis(30000)) + .isAcknowledged(); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplate.java b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplate.java new file mode 100644 index 0000000000000..e11ded7ef5546 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplate.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.applicationtemplates; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.bytes.BytesReference; + +/** + * Encapsulates the information and content about a system template available within a repository. + */ +@ExperimentalApi +public class SystemTemplate { + + private final BytesReference templateContent; + + private final SystemTemplateMetadata templateMetadata; + + private final TemplateRepositoryMetadata repositoryMetadata; + + public SystemTemplate(BytesReference templateContent, SystemTemplateMetadata templateInfo, TemplateRepositoryMetadata repositoryInfo) { + this.templateContent = templateContent; + this.templateMetadata = templateInfo; + this.repositoryMetadata = repositoryInfo; + } + + public BytesReference templateContent() { + return templateContent; + } + + public SystemTemplateMetadata templateMetadata() { + return templateMetadata; + } + + public TemplateRepositoryMetadata repositoryMetadata() { + return repositoryMetadata; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateLoader.java b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateLoader.java new file mode 100644 index 0000000000000..077580aed5a64 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateLoader.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.applicationtemplates; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.IOException; + +/** + * Interface to load template into the OpenSearch runtime. + */ +@ExperimentalApi +public interface SystemTemplateLoader { + + /** + * @param template Templated to be loaded + * @throws IOException If an exceptional situation is encountered while parsing/loading the template + */ + boolean loadTemplate(SystemTemplate template) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateMetadata.java new file mode 100644 index 0000000000000..227b70ffa2ef5 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateMetadata.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.applicationtemplates; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Objects; + +/** + * Metadata information about a template available in a template repository. + */ +@ExperimentalApi +public class SystemTemplateMetadata { + + private final long version; + private final String type; + private final String name; + + private static final String DELIMITER = "@"; + + public static final String COMPONENT_TEMPLATE_TYPE = "@abc_template"; + + public SystemTemplateMetadata(long version, String type, String name) { + this.version = version; + this.type = type; + this.name = name; + } + + public String type() { + return type; + } + + public String name() { + return name; + } + + public long version() { + return version; + } + + /** + * Gets the metadata using fully qualified name for the template + * @param fullyQualifiedName (e.g. @abc_template@logs@1) + * @return Metadata object based on name + */ + public static SystemTemplateMetadata fromComponentTemplate(String fullyQualifiedName) { + assert fullyQualifiedName.length() > DELIMITER.length() * 3 + 2 + COMPONENT_TEMPLATE_TYPE.length() + : "System template name must have all defined components"; + assert (DELIMITER + fullyQualifiedName.substring(1, fullyQualifiedName.indexOf(DELIMITER, 1))).equals(COMPONENT_TEMPLATE_TYPE); + + return new SystemTemplateMetadata( + Long.parseLong(fullyQualifiedName.substring(fullyQualifiedName.lastIndexOf(DELIMITER) + 1)), + COMPONENT_TEMPLATE_TYPE, + fullyQualifiedName.substring(fullyQualifiedName.indexOf(DELIMITER, 2) + 1, fullyQualifiedName.lastIndexOf(DELIMITER)) + ); + } + + public static SystemTemplateMetadata fromComponentTemplateInfo(String name, long version) { + return new SystemTemplateMetadata(version, COMPONENT_TEMPLATE_TYPE, name); + } + + public final String fullyQualifiedName() { + return type + DELIMITER + name + DELIMITER + version; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SystemTemplateMetadata that = (SystemTemplateMetadata) o; + return version == that.version && Objects.equals(type, that.type) && Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(version, type, name); + } + + @Override + public String toString() { + return "SystemTemplateMetadata{" + "version=" + version + ", type='" + type + '\'' + ", name='" + name + '\'' + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateRepository.java b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateRepository.java new file mode 100644 index 0000000000000..9cf302b8874f2 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateRepository.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.applicationtemplates; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.IOException; + +/** + * Repository interface around the templates provided by a store (e.g. code repo, remote file store, etc) + */ +@ExperimentalApi +public interface SystemTemplateRepository extends AutoCloseable { + + /** + * @return Metadata about the repository + */ + TemplateRepositoryMetadata metadata(); + + /** + * @return Metadata for all available templates + */ + Iterable listTemplates() throws IOException; + + /** + * + * @param template metadata about template to be fetched + * @return The actual template content + */ + SystemTemplate getTemplate(SystemTemplateMetadata template) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesPlugin.java b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesPlugin.java new file mode 100644 index 0000000000000..54871e6db7010 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesPlugin.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.applicationtemplates; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.IOException; + +/** + * Plugin interface to expose the template maintaining logic. + */ +@ExperimentalApi +public interface SystemTemplatesPlugin { + + /** + * @return repository implementation from which templates are to be fetched. + */ + SystemTemplateRepository loadRepository() throws IOException; + + /** + * @param templateInfo Metadata about the template to load + * @return Implementation of TemplateLoader which determines how to make the template available at runtime. + */ + SystemTemplateLoader loaderFor(SystemTemplateMetadata templateInfo); +} diff --git a/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesService.java b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesService.java new file mode 100644 index 0000000000000..90652192e5c28 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesService.java @@ -0,0 +1,183 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.applicationtemplates; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.cluster.LocalNodeClusterManagerListener; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Service class to orchestrate execution around available templates' management. + */ +@ExperimentalApi +public class SystemTemplatesService implements LocalNodeClusterManagerListener { + + public static final Setting SETTING_APPLICATION_BASED_CONFIGURATION_TEMPLATES_ENABLED = Setting.boolSetting( + "cluster.application_templates.enabled", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private final List systemTemplatesPluginList; + private final ThreadPool threadPool; + + private final AtomicBoolean loaded = new AtomicBoolean(false); + + private volatile boolean enabledTemplates; + + private volatile Stats latestStats; + + private static final Logger logger = LogManager.getLogger(SystemTemplatesService.class); + + public SystemTemplatesService( + List systemTemplatesPluginList, + ThreadPool threadPool, + ClusterSettings clusterSettings, + Settings settings + ) { + this.systemTemplatesPluginList = systemTemplatesPluginList; + this.threadPool = threadPool; + if (settings.getAsBoolean(SETTING_APPLICATION_BASED_CONFIGURATION_TEMPLATES_ENABLED.getKey(), false)) { + setEnabledTemplates(settings.getAsBoolean(SETTING_APPLICATION_BASED_CONFIGURATION_TEMPLATES_ENABLED.getKey(), false)); + } + clusterSettings.addSettingsUpdateConsumer(SETTING_APPLICATION_BASED_CONFIGURATION_TEMPLATES_ENABLED, this::setEnabledTemplates); + } + + @Override + public void onClusterManager() { + threadPool.generic().execute(() -> refreshTemplates(false)); + } + + @Override + public void offClusterManager() { + // do nothing + } + + public void verifyRepositories() { + refreshTemplates(true); + } + + public Stats stats() { + return latestStats; + } + + void refreshTemplates(boolean verification) { + int templatesLoaded = 0; + int failedLoadingTemplates = 0; + int failedLoadingRepositories = 0; + List exceptions = new ArrayList<>(); + + if ((verification || loaded.compareAndSet(false, true)) && enabledTemplates) { + for (SystemTemplatesPlugin plugin : systemTemplatesPluginList) { + try (SystemTemplateRepository repository = plugin.loadRepository()) { + + final TemplateRepositoryMetadata repositoryMetadata = repository.metadata(); + logger.debug( + "Loading templates from repository: {} at version {}", + repositoryMetadata.id(), + repositoryMetadata.version() + ); + + for (SystemTemplateMetadata templateMetadata : repository.listTemplates()) { + try { + final SystemTemplate template = repository.getTemplate(templateMetadata); + + // Load plugin if not in verification phase. + if (!verification && plugin.loaderFor(templateMetadata).loadTemplate(template)) { + templatesLoaded++; + } + + } catch (Exception ex) { + exceptions.add(ex); + logger.error( + new ParameterizedMessage( + "Failed loading template {} from repository: {}", + templateMetadata.fullyQualifiedName(), + repositoryMetadata.id() + ), + ex + ); + failedLoadingTemplates++; + } + } + } catch (Exception ex) { + exceptions.add(ex); + failedLoadingRepositories++; + logger.error(new ParameterizedMessage("Failed loading repository from plugin: {}", plugin.getClass().getName()), ex); + } + } + + logger.debug( + "Stats: Total Loaded Templates: [{}], Failed Loading Templates: [{}], Failed Loading Repositories: [{}]", + templatesLoaded, + failedLoadingTemplates, + failedLoadingRepositories + ); + + // End exceptionally if invoked in verification context + if (verification && (failedLoadingRepositories > 0 || failedLoadingTemplates > 0)) { + latestStats = new Stats(templatesLoaded, failedLoadingTemplates, failedLoadingRepositories); + throw new IllegalStateException("Some of the repositories could not be loaded or are corrupted: " + exceptions); + } + } + + latestStats = new Stats(templatesLoaded, failedLoadingTemplates, failedLoadingRepositories); + } + + private void setEnabledTemplates(boolean enabled) { + if (!FeatureFlags.isEnabled(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING)) { + throw new IllegalArgumentException( + "Application Based Configuration Templates is under an experimental feature and can be activated only by enabling " + + FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING.getKey() + + " feature flag." + ); + } + enabledTemplates = enabled; + } + + /** + * Class to record stats for templates loaded through the listener in a single iteration. + */ + @ExperimentalApi + public static class Stats { + private final long templatesLoaded; + private final long failedLoadingTemplates; + private final long failedLoadingRepositories; + + public Stats(long templatesLoaded, long failedLoadingTemplates, long failedLoadingRepositories) { + this.templatesLoaded = templatesLoaded; + this.failedLoadingTemplates = failedLoadingTemplates; + this.failedLoadingRepositories = failedLoadingRepositories; + } + + public long getTemplatesLoaded() { + return templatesLoaded; + } + + public long getFailedLoadingTemplates() { + return failedLoadingTemplates; + } + + public long getFailedLoadingRepositories() { + return failedLoadingRepositories; + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/applicationtemplates/TemplateRepositoryMetadata.java b/server/src/main/java/org/opensearch/cluster/applicationtemplates/TemplateRepositoryMetadata.java new file mode 100644 index 0000000000000..1fa79d291480b --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/applicationtemplates/TemplateRepositoryMetadata.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.applicationtemplates; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Objects; + +/** + * The information to uniquely identify a template repository. + */ +@ExperimentalApi +public class TemplateRepositoryMetadata { + + private final String id; + private final long version; + + public TemplateRepositoryMetadata(String id, long version) { + this.id = id; + this.version = version; + } + + public String id() { + return id; + } + + public long version() { + return version; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TemplateRepositoryMetadata that = (TemplateRepositoryMetadata) o; + return version == that.version && Objects.equals(id, that.id); + } + + @Override + public int hashCode() { + return Objects.hash(id, version); + } + + @Override + public String toString() { + return "TemplateRepositoryMetadata{" + "id='" + id + '\'' + ", version=" + version + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/applicationtemplates/package-info.java b/server/src/main/java/org/opensearch/cluster/applicationtemplates/package-info.java new file mode 100644 index 0000000000000..3fef2aab07d43 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/applicationtemplates/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all application based configuration templates related operations. */ +package org.opensearch.cluster.applicationtemplates; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index 7fa63ae8abc62..c7820c2c9a365 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -105,7 +105,8 @@ public CoordinationState( .getLastAcceptedConfiguration(); this.publishVotes = new VoteCollection(); this.isRemoteStateEnabled = isRemoteStoreClusterStateEnabled(settings); - this.isRemotePublicationEnabled = FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) + this.isRemotePublicationEnabled = isRemoteStateEnabled + && FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) && localNode.isRemoteStatePublicationEnabled(); } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java index 259d8961a3e78..896fe6fc8024b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java @@ -47,6 +47,7 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.Diff; +import org.opensearch.cluster.metadata.ComponentTemplateMetadata; import org.opensearch.cluster.metadata.DataStreamMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.collect.Tuple; @@ -94,9 +95,10 @@ public abstract class OpenSearchNodeCommand extends EnvironmentAwareCommand { public T parseNamedObject(Class categoryClass, String name, XContentParser parser, C context) throws IOException { // Currently, two unknown top-level objects are present if (Metadata.Custom.class.isAssignableFrom(categoryClass)) { - if (DataStreamMetadata.TYPE.equals(name)) { + if (DataStreamMetadata.TYPE.equals(name) || ComponentTemplateMetadata.TYPE.equals(name)) { // DataStreamMetadata is used inside Metadata class for validation purposes and building the indicesLookup, - // therefor even es node commands need to be able to parse it. + // ComponentTemplateMetadata is used inside Metadata class for building the systemTemplatesLookup, + // therefor even OpenSearch node commands need to be able to parse it. return super.parseNamedObject(categoryClass, name, parser, context); // TODO: Try to parse other named objects (e.g. stored scripts, ingest pipelines) that are part of core es as well? // Note that supporting PersistentTasksCustomMetadata is trickier, because PersistentTaskParams is a named object too. diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java index 0b7ed4fee5775..023c2db1a574a 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java @@ -117,6 +117,10 @@ protected void addToExtendedFields(String extendedField, AtomicLong extendedFiel this.extendedFields.put(extendedField, extendedFieldValue); } + public Map getExtendedFields() { + return extendedFields; + } + public String getStatsName() { return statsName; } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index 36eabd51ffda1..62885a12222be 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -284,7 +284,6 @@ PublishWithJoinResponse handleIncomingRemotePublishRequest(RemotePublishRequest ) ); ClusterState clusterState = remoteClusterStateService.getClusterStateUsingDiff( - request.getClusterName(), manifest, lastSeen, transportService.getLocalNode().getId() diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java index e7f1b97f28842..63bbe4144c4fb 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.metadata; +import org.opensearch.Version; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; import org.opensearch.cluster.metadata.DataStream.TimestampField; @@ -75,6 +76,7 @@ public class ComposableIndexTemplate extends AbstractDiffable PARSER = new ConstructingObjectParser<>( @@ -87,7 +89,8 @@ public class ComposableIndexTemplate extends AbstractDiffable) a[5], - (DataStreamTemplate) a[6] + (DataStreamTemplate) a[6], + (Context) a[7] ) ); @@ -99,6 +102,7 @@ public class ComposableIndexTemplate extends AbstractDiffable p.map(), METADATA); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), DataStreamTemplate.PARSER, DATA_STREAM); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Context.PARSER, CONTEXT); } private final List indexPatterns; @@ -114,6 +118,8 @@ public class ComposableIndexTemplate extends AbstractDiffable metadata; @Nullable private final DataStreamTemplate dataStreamTemplate; + @Nullable + private final Context context; static Diff readITV2DiffFrom(StreamInput in) throws IOException { return AbstractDiffable.readDiffFrom(ComposableIndexTemplate::new, in); @@ -131,7 +137,7 @@ public ComposableIndexTemplate( @Nullable Long version, @Nullable Map metadata ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, null); + this(indexPatterns, template, componentTemplates, priority, version, metadata, null, null); } public ComposableIndexTemplate( @@ -142,6 +148,19 @@ public ComposableIndexTemplate( @Nullable Long version, @Nullable Map metadata, @Nullable DataStreamTemplate dataStreamTemplate + ) { + this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null); + } + + public ComposableIndexTemplate( + List indexPatterns, + @Nullable Template template, + @Nullable List componentTemplates, + @Nullable Long priority, + @Nullable Long version, + @Nullable Map metadata, + @Nullable DataStreamTemplate dataStreamTemplate, + @Nullable Context context ) { this.indexPatterns = indexPatterns; this.template = template; @@ -150,6 +169,7 @@ public ComposableIndexTemplate( this.version = version; this.metadata = metadata; this.dataStreamTemplate = dataStreamTemplate; + this.context = context; } public ComposableIndexTemplate(StreamInput in) throws IOException { @@ -164,6 +184,11 @@ public ComposableIndexTemplate(StreamInput in) throws IOException { this.version = in.readOptionalVLong(); this.metadata = in.readMap(); this.dataStreamTemplate = in.readOptionalWriteable(DataStreamTemplate::new); + if (in.getVersion().onOrAfter(Version.V_2_16_0)) { + this.context = in.readOptionalWriteable(Context::new); + } else { + this.context = null; + } } public List indexPatterns() { @@ -205,6 +230,10 @@ public DataStreamTemplate getDataStreamTemplate() { return dataStreamTemplate; } + public Context context() { + return context; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(this.indexPatterns); @@ -219,6 +248,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVLong(this.version); out.writeMap(this.metadata); out.writeOptionalWriteable(dataStreamTemplate); + if (out.getVersion().onOrAfter(Version.V_2_16_0)) { + out.writeOptionalWriteable(context); + } } @Override @@ -243,6 +275,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (this.dataStreamTemplate != null) { builder.field(DATA_STREAM.getPreferredName(), dataStreamTemplate); } + if (this.context != null) { + builder.field(CONTEXT.getPreferredName(), context); + } builder.endObject(); return builder; } @@ -256,7 +291,8 @@ public int hashCode() { this.priority, this.version, this.metadata, - this.dataStreamTemplate + this.dataStreamTemplate, + this.context ); } @@ -275,7 +311,8 @@ public boolean equals(Object obj) { && Objects.equals(this.priority, other.priority) && Objects.equals(this.version, other.version) && Objects.equals(this.metadata, other.metadata) - && Objects.equals(this.dataStreamTemplate, other.dataStreamTemplate); + && Objects.equals(this.dataStreamTemplate, other.dataStreamTemplate) + && Objects.equals(this.context, other.context); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Context.java b/server/src/main/java/org/opensearch/cluster/metadata/Context.java new file mode 100644 index 0000000000000..4bd6134e8a318 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/Context.java @@ -0,0 +1,130 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.AbstractDiffable; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +/** + * Class encapsulating the context metadata associated with an index template/index. + */ +@ExperimentalApi +public class Context extends AbstractDiffable implements ToXContentObject { + + private static final ParseField NAME = new ParseField("name"); + private static final ParseField VERSION = new ParseField("version"); + private static final ParseField PARAMS = new ParseField("params"); + + public static final String LATEST_VERSION = "_latest"; + + private String name; + private String version = LATEST_VERSION; + private Map params; + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "index_template", + false, + a -> new Context((String) a[0], (String) a[1], (Map) a[2]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), VERSION); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.map(), PARAMS); + } + + public Context(String name) { + this(name, LATEST_VERSION, Map.of()); + } + + public Context(String name, String version, Map params) { + this.name = name; + if (version != null) { + this.version = version; + } + this.params = params; + } + + public Context(StreamInput in) throws IOException { + this.name = in.readString(); + this.version = in.readOptionalString(); + this.params = in.readMap(); + } + + public String name() { + return name; + } + + public void name(String name) { + this.name = name; + } + + public String version() { + return version; + } + + public void version(String version) { + this.version = version; + } + + public Map params() { + return params; + } + + public void params(Map params) { + this.params = params; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeOptionalString(version); + out.writeMap(params); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NAME.getPreferredName(), this.name); + builder.field("version", this.version); + if (params != null) { + builder.field("params", this.params); + } + builder.endObject(); + return builder; + } + + public static Context fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Context context = (Context) o; + return Objects.equals(name, context.name) && Objects.equals(version, context.version) && Objects.equals(params, context.params); + } + + @Override + public int hashCode() { + return Objects.hash(name, version, params); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 9e7fe23f29872..df0d2609ad83d 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -43,6 +43,7 @@ import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.node.DiscoveryNodeFilters; import org.opensearch.cluster.routing.allocation.IndexMetadataUpdater; +import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.MapBuilder; @@ -686,6 +687,8 @@ public static APIBlock readFrom(StreamInput input) throws IOException { private final boolean isSystem; private final boolean isRemoteSnapshot; + private final int indexTotalShardsPerNodeLimit; + private IndexMetadata( final Index index, final long version, @@ -711,7 +714,8 @@ private IndexMetadata( final int routingPartitionSize, final ActiveShardCount waitForActiveShards, final Map rolloverInfos, - final boolean isSystem + final boolean isSystem, + final int indexTotalShardsPerNodeLimit ) { this.index = index; @@ -746,6 +750,7 @@ private IndexMetadata( this.rolloverInfos = Collections.unmodifiableMap(rolloverInfos); this.isSystem = isSystem; this.isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); + this.indexTotalShardsPerNodeLimit = indexTotalShardsPerNodeLimit; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -899,6 +904,10 @@ public Set inSyncAllocationIds(int shardId) { return inSyncAllocationIds.get(shardId); } + public int getIndexTotalShardsPerNodeLimit() { + return this.indexTotalShardsPerNodeLimit; + } + @Nullable public DiscoveryNodeFilters requireFilters() { return requireFilters; @@ -1583,6 +1592,8 @@ public IndexMetadata build() { ); } + final int indexTotalShardsPerNodeLimit = ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(settings); + final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); return new IndexMetadata( @@ -1610,7 +1621,8 @@ public IndexMetadata build() { routingPartitionSize, waitForActiveShards, rolloverInfos, - isSystem + isSystem, + indexTotalShardsPerNodeLimit ); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index a0ef8de07fbf2..6163fd624c838 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -43,6 +43,7 @@ import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.NamedDiffable; import org.opensearch.cluster.NamedDiffableValueSerializer; +import org.opensearch.cluster.applicationtemplates.SystemTemplateMetadata; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.coordination.CoordinationMetadata; @@ -280,6 +281,8 @@ static Custom fromXContent(XContentParser parser, String name) throws IOExceptio private final SortedMap indicesLookup; + private final Map> systemTemplatesLookup; + Metadata( String clusterUUID, boolean clusterUUIDCommitted, @@ -297,7 +300,8 @@ static Custom fromXContent(XContentParser parser, String name) throws IOExceptio String[] visibleOpenIndices, String[] allClosedIndices, String[] visibleClosedIndices, - SortedMap indicesLookup + SortedMap indicesLookup, + Map> systemTemplatesLookup ) { this.clusterUUID = clusterUUID; this.clusterUUIDCommitted = clusterUUIDCommitted; @@ -328,6 +332,7 @@ static Custom fromXContent(XContentParser parser, String name) throws IOExceptio this.allClosedIndices = allClosedIndices; this.visibleClosedIndices = visibleClosedIndices; this.indicesLookup = indicesLookup; + this.systemTemplatesLookup = systemTemplatesLookup; } public long version() { @@ -828,6 +833,10 @@ public Map componentTemplates() { .orElse(Collections.emptyMap()); } + public Map> systemTemplatesLookup() { + return systemTemplatesLookup; + } + public Map templatesV2() { return Optional.ofNullable((ComposableIndexTemplateMetadata) this.custom(ComposableIndexTemplateMetadata.TYPE)) .map(ComposableIndexTemplateMetadata::indexTemplates) @@ -844,6 +853,12 @@ public Map views() { return Optional.ofNullable((ViewMetadata) this.custom(ViewMetadata.TYPE)).map(ViewMetadata::views).orElse(Collections.emptyMap()); } + public Map queryGroups() { + return Optional.ofNullable((QueryGroupMetadata) this.custom(QueryGroupMetadata.TYPE)) + .map(QueryGroupMetadata::queryGroups) + .orElse(Collections.emptyMap()); + } + public DecommissionAttributeMetadata decommissionAttributeMetadata() { return custom(DecommissionAttributeMetadata.TYPE); } @@ -1189,6 +1204,8 @@ public static class Builder { private final Map customs; private final Metadata previousMetadata; + private Map> systemTemplatesLookup; + public Builder() { clusterUUID = UNKNOWN_CLUSTER_UUID; indices = new HashMap<>(); @@ -1287,6 +1304,7 @@ public Builder templates(Map templates) { } public Builder templates(TemplatesMetadata templatesMetadata) { + this.templates.clear(); this.templates.putAll(templatesMetadata.getTemplates()); return this; } @@ -1367,6 +1385,32 @@ public Builder removeDataStream(String name) { return this; } + public Builder queryGroups(final Map queryGroups) { + this.customs.put(QueryGroupMetadata.TYPE, new QueryGroupMetadata(queryGroups)); + return this; + } + + public Builder put(final QueryGroup queryGroup) { + Objects.requireNonNull(queryGroup, "queryGroup should not be null"); + Map existing = new HashMap<>(getQueryGroups()); + existing.put(queryGroup.get_id(), queryGroup); + return queryGroups(existing); + } + + public Builder remove(final QueryGroup queryGroup) { + Objects.requireNonNull(queryGroup, "queryGroup should not be null"); + Map existing = new HashMap<>(getQueryGroups()); + existing.remove(queryGroup.get_id()); + return queryGroups(existing); + } + + private Map getQueryGroups() { + return Optional.ofNullable(this.customs.get(QueryGroupMetadata.TYPE)) + .map(o -> (QueryGroupMetadata) o) + .map(QueryGroupMetadata::queryGroups) + .orElse(Collections.emptyMap()); + } + private Map getViews() { return Optional.ofNullable(customs.get(ViewMetadata.TYPE)) .map(o -> (ViewMetadata) o) @@ -1534,6 +1578,8 @@ public Metadata build() { ? (DataStreamMetadata) this.previousMetadata.customs.get(DataStreamMetadata.TYPE) : null; + buildSystemTemplatesLookup(); + boolean recomputeRequiredforIndicesLookups = (previousMetadata == null) || (indices.equals(previousMetadata.indices) == false) || (previousDataStreamMetadata != null && previousDataStreamMetadata.equals(dataStreamMetadata) == false) @@ -1544,6 +1590,33 @@ public Metadata build() { : buildMetadataWithRecomputedIndicesLookups(); } + private void buildSystemTemplatesLookup() { + if (previousMetadata != null + && Objects.equals( + previousMetadata.customs.get(ComponentTemplateMetadata.TYPE), + this.customs.get(ComponentTemplateMetadata.TYPE) + )) { + systemTemplatesLookup = Collections.unmodifiableMap(previousMetadata.systemTemplatesLookup); + } else { + systemTemplatesLookup = new HashMap<>(); + Optional.ofNullable((ComponentTemplateMetadata) this.customs.get(ComponentTemplateMetadata.TYPE)) + .map(ComponentTemplateMetadata::componentTemplates) + .orElseGet(Collections::emptyMap) + .forEach((k, v) -> { + if (MetadataIndexTemplateService.isSystemTemplate(v)) { + SystemTemplateMetadata templateMetadata = SystemTemplateMetadata.fromComponentTemplate(k); + systemTemplatesLookup.compute(templateMetadata.name(), (ik, iv) -> { + if (iv == null) { + iv = new TreeMap<>(); + } + iv.put(templateMetadata.version(), k); + return iv; + }); + } + }); + } + } + protected Metadata buildMetadataWithPreviousIndicesLookups() { return new Metadata( clusterUUID, @@ -1562,7 +1635,8 @@ protected Metadata buildMetadataWithPreviousIndicesLookups() { Arrays.copyOf(previousMetadata.visibleOpenIndices, previousMetadata.visibleOpenIndices.length), Arrays.copyOf(previousMetadata.allClosedIndices, previousMetadata.allClosedIndices.length), Arrays.copyOf(previousMetadata.visibleClosedIndices, previousMetadata.visibleClosedIndices.length), - Collections.unmodifiableSortedMap(previousMetadata.indicesLookup) + Collections.unmodifiableSortedMap(previousMetadata.indicesLookup), + systemTemplatesLookup ); } @@ -1685,7 +1759,8 @@ protected Metadata buildMetadataWithRecomputedIndicesLookups() { visibleOpenIndicesArray, allClosedIndicesArray, visibleClosedIndicesArray, - indicesLookup + indicesLookup, + systemTemplatesLookup ); } @@ -1762,9 +1837,7 @@ static void validateDataStreams(SortedMap indicesLooku if (dsMetadata != null) { for (DataStream ds : dsMetadata.dataStreams().values()) { String prefix = DataStream.BACKING_INDEX_PREFIX + ds.getName() + "-"; - Set conflicts = indicesLookup.subMap(prefix, DataStream.BACKING_INDEX_PREFIX + ds.getName() + ".") // '.' is the - // char after - // '-' + Set conflicts = indicesLookup.subMap(prefix, DataStream.BACKING_INDEX_PREFIX + ds.getName() + ".") .keySet() .stream() .filter(s -> NUMBER_PATTERN.matcher(s.substring(prefix.length())).matches()) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 16edec112f123..50d25b11ef810 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -85,6 +85,7 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.compositeindex.CompositeIndexValidator; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MapperService.MergeReason; @@ -945,7 +946,8 @@ static Settings aggregateIndexSettings( if (INDEX_NUMBER_OF_SHARDS_SETTING.exists(indexSettingsBuilder) == false) { indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, INDEX_NUMBER_OF_SHARDS_SETTING.get(settings)); } - if (INDEX_NUMBER_OF_REPLICAS_SETTING.exists(indexSettingsBuilder) == false) { + if (INDEX_NUMBER_OF_REPLICAS_SETTING.exists(indexSettingsBuilder) == false + || indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) { indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, DEFAULT_REPLICA_COUNT_SETTING.get(currentState.metadata().settings())); } if (settings.get(SETTING_AUTO_EXPAND_REPLICAS) != null && indexSettingsBuilder.get(SETTING_AUTO_EXPAND_REPLICAS) == null) { @@ -1318,6 +1320,10 @@ private static void updateIndexMappingsAndBuildSortOrder( } } + if (mapperService.isCompositeIndexPresent()) { + CompositeIndexValidator.validate(mapperService, indexService.getCompositeIndexSettings(), indexService.getIndexSettings()); + } + if (sourceMetadata == null) { // now that the mapping is merged we can validate the index sort. // we cannot validate for index shrinking since the mapping is empty diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 5b03d3f7b19ce..6b638c9920c27 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -42,6 +42,9 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.applicationtemplates.ClusterStateSystemTemplateLoader; +import org.opensearch.cluster.applicationtemplates.SystemTemplateMetadata; +import org.opensearch.cluster.applicationtemplates.SystemTemplatesService; import org.opensearch.cluster.service.ClusterManagerTaskKeys; import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; @@ -53,9 +56,11 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.logging.HeaderWarning; import org.opensearch.common.regex.Regex; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.action.ActionListener; @@ -72,6 +77,7 @@ import org.opensearch.indices.IndexTemplateMissingException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexTemplateException; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.io.UncheckedIOException; @@ -94,6 +100,7 @@ import static org.opensearch.cluster.metadata.MetadataCreateDataStreamService.validateTimestampFieldMapping; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; +import static org.opensearch.common.util.concurrent.ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME; import static org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED; /** @@ -116,6 +123,7 @@ public class MetadataIndexTemplateService { private final ClusterManagerTaskThrottler.ThrottlingKey removeIndexTemplateV2TaskKey; private final ClusterManagerTaskThrottler.ThrottlingKey createComponentTemplateTaskKey; private final ClusterManagerTaskThrottler.ThrottlingKey removeComponentTemplateTaskKey; + private final ThreadPool threadPool; @Inject public MetadataIndexTemplateService( @@ -124,7 +132,8 @@ public MetadataIndexTemplateService( AliasValidator aliasValidator, IndicesService indicesService, IndexScopedSettings indexScopedSettings, - NamedXContentRegistry xContentRegistry + NamedXContentRegistry xContentRegistry, + ThreadPool threadPool ) { this.clusterService = clusterService; this.aliasValidator = aliasValidator; @@ -132,6 +141,7 @@ public MetadataIndexTemplateService( this.metadataCreateIndexService = metadataCreateIndexService; this.indexScopedSettings = indexScopedSettings; this.xContentRegistry = xContentRegistry; + this.threadPool = threadPool; // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. createIndexTemplateTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_TEMPLATE_KEY, true); @@ -209,6 +219,7 @@ public void putComponentTemplate( final ComponentTemplate template, final ActionListener listener ) { + validateComponentTemplateRequest(template); clusterService.submitStateUpdateTask( "create-component-template [" + name + "], cause [" + cause + "]", new ClusterStateUpdateTask(Priority.URGENT) { @@ -378,6 +389,7 @@ public void removeComponentTemplate( final ActionListener listener ) { validateNotInUse(clusterService.state().metadata(), name); + validateComponentTemplateRequest(clusterService.state().metadata().componentTemplates().get(name)); clusterService.submitStateUpdateTask("remove-component-template [" + name + "]", new ClusterStateUpdateTask(Priority.URGENT) { @Override @@ -439,7 +451,12 @@ static void validateNotInUse(Metadata metadata, String templateNameOrWildcard) { .collect(Collectors.toSet()); final Set componentsBeingUsed = new HashSet<>(); final List templatesStillUsing = metadata.templatesV2().entrySet().stream().filter(e -> { - Set intersecting = Sets.intersection(new HashSet<>(e.getValue().composedOf()), matchingComponentTemplates); + Set referredComponentTemplates = new HashSet<>(e.getValue().composedOf()); + String systemTemplateUsed = findContextTemplate(metadata, e.getValue().context()); + if (systemTemplateUsed != null) { + referredComponentTemplates.add(systemTemplateUsed); + } + Set intersecting = Sets.intersection(referredComponentTemplates, matchingComponentTemplates); if (intersecting.size() > 0) { componentsBeingUsed.addAll(intersecting); return true; @@ -469,7 +486,7 @@ public void putIndexTemplateV2( final ComposableIndexTemplate template, final ActionListener listener ) { - validateV2TemplateRequest(clusterService.state().metadata(), name, template); + validateV2TemplateRequest(clusterService.state().metadata(), name, template, clusterService.getClusterSettings()); clusterService.submitStateUpdateTask( "create-index-template-v2 [" + name + "], cause [" + cause + "]", new ClusterStateUpdateTask(Priority.URGENT) { @@ -502,7 +519,12 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS ); } - public static void validateV2TemplateRequest(Metadata metadata, String name, ComposableIndexTemplate template) { + public static void validateV2TemplateRequest( + Metadata metadata, + String name, + ComposableIndexTemplate template, + ClusterSettings settings + ) { if (template.indexPatterns().stream().anyMatch(Regex::isMatchAllPattern)) { Settings mergedSettings = resolveSettings(metadata, template); if (IndexMetadata.INDEX_HIDDEN_SETTING.exists(mergedSettings)) { @@ -514,6 +536,8 @@ public static void validateV2TemplateRequest(Metadata metadata, String name, Com } final Map componentTemplates = metadata.componentTemplates(); + final boolean isContextAllowed = FeatureFlags.isEnabled(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES); + final List missingComponentTemplates = template.composedOf() .stream() .filter(componentTemplate -> componentTemplates.containsKey(componentTemplate) == false) @@ -525,6 +549,59 @@ public static void validateV2TemplateRequest(Metadata metadata, String name, Com "index template [" + name + "] specifies component templates " + missingComponentTemplates + " that do not exist" ); } + + if (template.context() != null && !isContextAllowed) { + throw new InvalidIndexTemplateException( + name, + "index template [" + + name + + "] specifies a context which cannot be used without enabling: " + + SystemTemplatesService.SETTING_APPLICATION_BASED_CONFIGURATION_TEMPLATES_ENABLED.getKey() + ); + } + + if (isContextAllowed + && template.composedOf().stream().anyMatch(componentTemplate -> isSystemTemplate(componentTemplates.get(componentTemplate)))) { + throw new InvalidIndexTemplateException( + name, + "index template [" + name + "] specifies a component templates which can only be used in context." + ); + } + + if (template.context() != null && findContextTemplate(metadata, template.context()) == null) { + throw new InvalidIndexTemplateException( + name, + "index template [" + name + "] specifies a context which is not loaded on the cluster." + ); + } + } + + private void validateComponentTemplateRequest(ComponentTemplate componentTemplate) { + if (isSystemTemplate(componentTemplate) + && !ClusterStateSystemTemplateLoader.TEMPLATE_LOADER_IDENTIFIER.equals( + threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME) + )) { + throw new IllegalArgumentException("A system template can only be created/updated/deleted with a repository"); + } + } + + private static String findContextTemplate(Metadata metadata, Context context) { + if (context == null) { + return null; + } + final boolean searchSpecificVersion = !Context.LATEST_VERSION.equals(context.version()); + return Optional.ofNullable(metadata.systemTemplatesLookup()) + .map(coll -> coll.get(context.name())) + .map(coll -> coll.get(searchSpecificVersion ? Long.parseLong(context.version()) : coll.lastKey())) + .orElse(null); + } + + public static boolean isSystemTemplate(ComponentTemplate componentTemplate) { + return Optional.ofNullable(componentTemplate) + .map(ComponentTemplate::metadata) + .map(md -> md.get(ClusterStateSystemTemplateLoader.TEMPLATE_TYPE_KEY)) + .filter(ob -> SystemTemplateMetadata.COMPONENT_TEMPLATE_TYPE.equals(ob.toString())) + .isPresent(); } public ClusterState addIndexTemplateV2( @@ -613,7 +690,8 @@ public ClusterState addIndexTemplateV2( template.priority(), template.version(), template.metadata(), - template.getDataStreamTemplate() + template.getDataStreamTemplate(), + template.context() ); } @@ -866,7 +944,7 @@ static ClusterState innerRemoveIndexTemplateV2(ClusterState currentState, String static Set dataStreamsUsingTemplate(final ClusterState state, final String templateName) { final ComposableIndexTemplate template = state.metadata().templatesV2().get(templateName); - if (template == null) { + if (template == null || template.getDataStreamTemplate() == null) { return Collections.emptySet(); } final Set dataStreams = state.metadata().dataStreams().keySet(); @@ -1140,7 +1218,7 @@ public static List collectMappings(final ClusterState state, .map(Template::mappings) .filter(Objects::nonNull) .collect(Collectors.toCollection(LinkedList::new)); - // Add the actual index template's mappings, since it takes the highest precedence + // Add the actual index template's mappings, since it takes the next precedence Optional.ofNullable(template.template()).map(Template::mappings).ifPresent(mappings::add); if (template.getDataStreamTemplate() != null && indexName.startsWith(DataStream.BACKING_INDEX_PREFIX)) { // add a default mapping for the timestamp field, at the lowest precedence, to make bootstrapping data streams more @@ -1165,6 +1243,15 @@ public static List collectMappings(final ClusterState state, }) .ifPresent(mappings::add); } + + // Now use context mappings which take the highest precedence + Optional.ofNullable(template.context()) + .map(ctx -> findContextTemplate(state.metadata(), ctx)) + .map(name -> state.metadata().componentTemplates().get(name)) + .map(ComponentTemplate::template) + .map(Template::mappings) + .ifPresent(mappings::add); + return Collections.unmodifiableList(mappings); } @@ -1226,8 +1313,14 @@ private static Settings resolveSettings(Metadata metadata, ComposableIndexTempla Settings.Builder templateSettings = Settings.builder(); componentSettings.forEach(templateSettings::put); - // Add the actual index template's settings to the end, since it takes the highest precedence. + // Add the actual index template's settings now, since it takes the next precedence. Optional.ofNullable(template.template()).map(Template::settings).ifPresent(templateSettings::put); + + // Add the template referred by context since it will take the highest precedence. + final String systemTemplate = findContextTemplate(metadata, template.context()); + final ComponentTemplate componentTemplate = metadata.componentTemplates().get(systemTemplate); + Optional.ofNullable(componentTemplate).map(ComponentTemplate::template).map(Template::settings).ifPresent(templateSettings::put); + return templateSettings.build(); } @@ -1269,8 +1362,16 @@ public static List> resolveAliases(final Metadata met .filter(Objects::nonNull) .collect(Collectors.toList()); - // Add the actual index template's aliases to the end if they exist + // Add the actual index template's aliases now if they exist Optional.ofNullable(template.template()).map(Template::aliases).ifPresent(aliases::add); + + // Now use context referenced template's aliases which take the highest precedence + if (template.context() != null) { + final String systemTemplate = findContextTemplate(metadata, template.context()); + final ComponentTemplate componentTemplate = metadata.componentTemplates().get(systemTemplate); + Optional.ofNullable(componentTemplate.template()).map(Template::aliases).ifPresent(aliases::add); + } + // Aliases are applied in order, but subsequent alias configuration from the same name is // ignored, so in order for the order to be correct, alias configuration should be in order // of precedence (with the index template first) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java index 1406287149e8d..43894db86c512 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java @@ -55,6 +55,7 @@ import org.opensearch.core.common.Strings; import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; +import org.opensearch.index.compositeindex.CompositeIndexValidator; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MapperService.MergeReason; @@ -282,6 +283,7 @@ private ClusterState applyRequest( // first, simulate: just call merge and ignore the result existingMapper.merge(newMapper.mapping(), MergeReason.MAPPING_UPDATE); } + } Metadata.Builder builder = Metadata.builder(metadata); boolean updated = false; @@ -291,7 +293,7 @@ private ClusterState applyRequest( // we use the exact same indexService and metadata we used to validate above here to actually apply the update final Index index = indexMetadata.getIndex(); final MapperService mapperService = indexMapperServices.get(index); - + boolean isCompositeFieldPresent = !mapperService.getCompositeFieldTypes().isEmpty(); CompressedXContent existingSource = null; DocumentMapper existingMapper = mapperService.documentMapper(); if (existingMapper != null) { @@ -302,6 +304,14 @@ private ClusterState applyRequest( mappingUpdateSource, MergeReason.MAPPING_UPDATE ); + + CompositeIndexValidator.validate( + mapperService, + indicesService.getCompositeIndexSettings(), + mapperService.getIndexSettings(), + isCompositeFieldPresent + ); + CompressedXContent updatedSource = mergedMapper.mappingSource(); if (existingSource != null) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java new file mode 100644 index 0000000000000..9b5c6bc2369a6 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java @@ -0,0 +1,327 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.AbstractDiffable; +import org.opensearch.cluster.Diff; +import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.search.ResourceType; +import org.joda.time.Instant; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Class to define the QueryGroup schema + * { + * "_id": "fafjafjkaf9ag8a9ga9g7ag0aagaga", + * "resource_limits": { + * "memory": 0.4 + * }, + * "resiliency_mode": "enforced", + * "name": "analytics", + * "updated_at": 4513232415 + * } + */ +@ExperimentalApi +public class QueryGroup extends AbstractDiffable implements ToXContentObject { + + public static final String _ID_STRING = "_id"; + public static final String NAME_STRING = "name"; + public static final String RESILIENCY_MODE_STRING = "resiliency_mode"; + public static final String UPDATED_AT_STRING = "updated_at"; + public static final String RESOURCE_LIMITS_STRING = "resource_limits"; + private static final int MAX_CHARS_ALLOWED_IN_NAME = 50; + private final String name; + private final String _id; + private final ResiliencyMode resiliencyMode; + // It is an epoch in millis + private final long updatedAtInMillis; + private final Map resourceLimits; + + public QueryGroup(String name, ResiliencyMode resiliencyMode, Map resourceLimits) { + this(name, UUIDs.randomBase64UUID(), resiliencyMode, resourceLimits, Instant.now().getMillis()); + } + + public QueryGroup(String name, String _id, ResiliencyMode resiliencyMode, Map resourceLimits, long updatedAt) { + Objects.requireNonNull(name, "QueryGroup.name can't be null"); + Objects.requireNonNull(resourceLimits, "QueryGroup.resourceLimits can't be null"); + Objects.requireNonNull(resiliencyMode, "QueryGroup.resiliencyMode can't be null"); + Objects.requireNonNull(_id, "QueryGroup._id can't be null"); + validateName(name); + + if (resourceLimits.isEmpty()) { + throw new IllegalArgumentException("QueryGroup.resourceLimits should at least have 1 resource limit"); + } + validateResourceLimits(resourceLimits); + if (!isValid(updatedAt)) { + throw new IllegalArgumentException("QueryGroup.updatedAtInMillis is not a valid epoch"); + } + + this.name = name; + this._id = _id; + this.resiliencyMode = resiliencyMode; + this.resourceLimits = resourceLimits; + this.updatedAtInMillis = updatedAt; + } + + private static boolean isValid(long updatedAt) { + long minValidTimestamp = Instant.ofEpochMilli(0L).getMillis(); + + // Use Instant.now() to get the current time in seconds since epoch + long currentSeconds = Instant.now().getMillis(); + + // Check if the timestamp is within a reasonable range + return minValidTimestamp <= updatedAt && updatedAt <= currentSeconds; + } + + public QueryGroup(StreamInput in) throws IOException { + this( + in.readString(), + in.readString(), + ResiliencyMode.fromName(in.readString()), + in.readMap((i) -> ResourceType.fromName(i.readString()), StreamInput::readDouble), + in.readLong() + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeString(_id); + out.writeString(resiliencyMode.getName()); + out.writeMap(resourceLimits, ResourceType::writeTo, StreamOutput::writeDouble); + out.writeLong(updatedAtInMillis); + } + + public static void validateName(String name) { + if (name == null || name.isEmpty() || name.length() > MAX_CHARS_ALLOWED_IN_NAME) { + throw new IllegalArgumentException("QueryGroup.name shouldn't be null, empty or more than 50 chars long"); + } + } + + private void validateResourceLimits(Map resourceLimits) { + for (Map.Entry resource : resourceLimits.entrySet()) { + Double threshold = resource.getValue(); + Objects.requireNonNull(resource.getKey(), "resourceName can't be null"); + Objects.requireNonNull(threshold, "resource limit threshold for" + resource.getKey().getName() + " : can't be null"); + + if (Double.compare(threshold, 0.0) <= 0 || Double.compare(threshold, 1.0) > 0) { + throw new IllegalArgumentException("resource value should be greater than 0 and less or equal to 1.0"); + } + } + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field(_ID_STRING, _id); + builder.field(NAME_STRING, name); + builder.field(RESILIENCY_MODE_STRING, resiliencyMode.getName()); + builder.field(UPDATED_AT_STRING, updatedAtInMillis); + // write resource limits + builder.startObject(RESOURCE_LIMITS_STRING); + for (ResourceType resourceType : ResourceType.values()) { + if (resourceLimits.containsKey(resourceType)) { + builder.field(resourceType.getName(), resourceLimits.get(resourceType)); + } + } + builder.endObject(); + + builder.endObject(); + return builder; + } + + public static QueryGroup fromXContent(final XContentParser parser) throws IOException { + return Builder.fromXContent(parser).build(); + } + + public static Diff readDiff(final StreamInput in) throws IOException { + return readDiffFrom(QueryGroup::new, in); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + QueryGroup that = (QueryGroup) o; + return Objects.equals(name, that.name) + && Objects.equals(resiliencyMode, that.resiliencyMode) + && Objects.equals(resourceLimits, that.resourceLimits) + && Objects.equals(_id, that._id) + && updatedAtInMillis == that.updatedAtInMillis; + } + + @Override + public int hashCode() { + return Objects.hash(name, resourceLimits, updatedAtInMillis, _id); + } + + public String getName() { + return name; + } + + public ResiliencyMode getResiliencyMode() { + return resiliencyMode; + } + + public Map getResourceLimits() { + return resourceLimits; + } + + public String get_id() { + return _id; + } + + public long getUpdatedAtInMillis() { + return updatedAtInMillis; + } + + /** + * builder method for the {@link QueryGroup} + * @return Builder object + */ + public static Builder builder() { + return new Builder(); + } + + /** + * This enum models the different QueryGroup resiliency modes + * SOFT - means that this query group can consume more than query group resource limits if node is not in duress + * ENFORCED - means that it will never breach the assigned limits and will cancel as soon as the limits are breached + * MONITOR - it will not cause any cancellation but just log the eligible task cancellations + */ + @ExperimentalApi + public enum ResiliencyMode { + SOFT("soft"), + ENFORCED("enforced"), + MONITOR("monitor"); + + private final String name; + + ResiliencyMode(String mode) { + this.name = mode; + } + + public String getName() { + return name; + } + + public static ResiliencyMode fromName(String s) { + for (ResiliencyMode mode : values()) { + if (mode.getName().equalsIgnoreCase(s)) return mode; + + } + throw new IllegalArgumentException("Invalid value for QueryGroupMode: " + s); + } + } + + /** + * Builder class for {@link QueryGroup} + */ + @ExperimentalApi + public static class Builder { + private String name; + private String _id; + private ResiliencyMode resiliencyMode; + private long updatedAt; + private Map resourceLimits; + + private Builder() {} + + public static Builder fromXContent(XContentParser parser) throws IOException { + if (parser.currentToken() == null) { // fresh parser? move to the first token + parser.nextToken(); + } + + Builder builder = builder(); + + XContentParser.Token token = parser.currentToken(); + + if (token != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Expected START_OBJECT token but found [" + parser.currentName() + "]"); + } + + String fieldName = ""; + // Map to hold resources + final Map resourceLimits = new HashMap<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token.isValue()) { + if (fieldName.equals(_ID_STRING)) { + builder._id(parser.text()); + } else if (fieldName.equals(NAME_STRING)) { + builder.name(parser.text()); + } else if (fieldName.equals(RESILIENCY_MODE_STRING)) { + builder.mode(parser.text()); + } else if (fieldName.equals(UPDATED_AT_STRING)) { + builder.updatedAt(parser.longValue()); + } else { + throw new IllegalArgumentException(fieldName + " is not a valid field in QueryGroup"); + } + } else if (token == XContentParser.Token.START_OBJECT) { + + if (!fieldName.equals(RESOURCE_LIMITS_STRING)) { + throw new IllegalArgumentException( + "QueryGroup.resourceLimits is an object and expected token was { " + " but found " + token + ); + } + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else { + resourceLimits.put(ResourceType.fromName(fieldName), parser.doubleValue()); + } + } + + } + } + return builder.resourceLimits(resourceLimits); + } + + public Builder name(String name) { + this.name = name; + return this; + } + + public Builder _id(String _id) { + this._id = _id; + return this; + } + + public Builder mode(String mode) { + this.resiliencyMode = ResiliencyMode.fromName(mode); + return this; + } + + public Builder updatedAt(long updatedAt) { + this.updatedAt = updatedAt; + return this; + } + + public Builder resourceLimits(Map resourceLimits) { + this.resourceLimits = resourceLimits; + return this; + } + + public QueryGroup build() { + return new QueryGroup(name, _id, resiliencyMode, resourceLimits, updatedAt); + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroupMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroupMetadata.java new file mode 100644 index 0000000000000..79732bc505ee2 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroupMetadata.java @@ -0,0 +1,185 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.Version; +import org.opensearch.cluster.Diff; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.NamedDiff; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.cluster.metadata.Metadata.ALL_CONTEXTS; + +/** + * This class holds the QueryGroupMetadata + * sample schema + * { + * "queryGroups": { + * "_id": { + * {@link QueryGroup} + * }, + * ... + * } + * } + */ +public class QueryGroupMetadata implements Metadata.Custom { + public static final String TYPE = "queryGroups"; + private static final ParseField QUERY_GROUP_FIELD = new ParseField("queryGroups"); + + private final Map queryGroups; + + public QueryGroupMetadata(Map queryGroups) { + this.queryGroups = queryGroups; + } + + public QueryGroupMetadata(StreamInput in) throws IOException { + this.queryGroups = in.readMap(StreamInput::readString, QueryGroup::new); + } + + public Map queryGroups() { + return this.queryGroups; + } + + /** + * Returns the name of the writeable object + */ + @Override + public String getWriteableName() { + return TYPE; + } + + /** + * The minimal version of the recipient this object can be sent to + */ + @Override + public Version getMinimalSupportedVersion() { + return Version.V_3_0_0; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(queryGroups, StreamOutput::writeString, (stream, val) -> val.writeTo(stream)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + for (Map.Entry entry : queryGroups.entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + return builder; + } + + public static QueryGroupMetadata fromXContent(XContentParser parser) throws IOException { + Map queryGroupMap = new HashMap<>(); + + if (parser.currentToken() == null) { + parser.nextToken(); + } + + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException( + "QueryGroupMetadata.fromXContent was expecting a { token but found : " + parser.currentToken() + ); + } + XContentParser.Token token = parser.currentToken(); + String fieldName = parser.currentName(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else { + QueryGroup queryGroup = QueryGroup.fromXContent(parser); + queryGroupMap.put(fieldName, queryGroup); + } + } + + return new QueryGroupMetadata(queryGroupMap); + } + + @Override + public Diff diff(final Metadata.Custom previousState) { + return new QueryGroupMetadataDiff((QueryGroupMetadata) previousState, this); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return new QueryGroupMetadataDiff(in); + } + + @Override + public EnumSet context() { + return ALL_CONTEXTS; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + QueryGroupMetadata that = (QueryGroupMetadata) o; + return Objects.equals(queryGroups, that.queryGroups); + } + + @Override + public int hashCode() { + return Objects.hash(queryGroups); + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } + + /** + * QueryGroupMetadataDiff + */ + static class QueryGroupMetadataDiff implements NamedDiff { + final Diff> dataStreamDiff; + + QueryGroupMetadataDiff(final QueryGroupMetadata before, final QueryGroupMetadata after) { + dataStreamDiff = DiffableUtils.diff(before.queryGroups, after.queryGroups, DiffableUtils.getStringKeySerializer()); + } + + QueryGroupMetadataDiff(final StreamInput in) throws IOException { + this.dataStreamDiff = DiffableUtils.readJdkMapDiff( + in, + DiffableUtils.getStringKeySerializer(), + QueryGroup::new, + QueryGroup::readDiff + ); + } + + /** + * Returns the name of the writeable object + */ + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + dataStreamDiff.writeTo(out); + } + + @Override + public Metadata.Custom apply(Metadata.Custom part) { + return new QueryGroupMetadata(new HashMap<>(dataStreamDiff.apply(((QueryGroupMetadata) part).queryGroups))); + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java index 479143fa9a2f0..41ed7631b0167 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java @@ -34,6 +34,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.AbstractDiffable; +import org.opensearch.cluster.Diff; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Nullable; @@ -75,7 +77,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class IndexShardRoutingTable implements Iterable { +public class IndexShardRoutingTable extends AbstractDiffable implements Iterable { final ShardShuffler shuffler; // Shuffler for weighted round-robin shard routing. This uses rotation to permute shards. @@ -527,6 +529,12 @@ private static List rankShardsAndUpdateStats( return sortedShards; } + @Override + public void writeTo(StreamOutput out) throws IOException { + this.shardId().getIndex().writeTo(out); + Builder.writeToThin(this, out); + } + private static class NodeRankComparator implements Comparator { private final Map nodeRanks; @@ -1049,6 +1057,14 @@ private void populateInitializingShardWeightsMap(WeightedRouting weightedRouting } } + public static IndexShardRoutingTable readFrom(StreamInput in) throws IOException { + return IndexShardRoutingTable.Builder.readFrom(in); + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(IndexShardRoutingTable::readFrom, in); + } + /** * Builder of an index shard routing table. * diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index 6158461c7d4e9..6242247f34a93 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -42,8 +42,10 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.node.ResponseCollectorService; @@ -245,6 +247,13 @@ public GroupShardsIterator searchShards( preference = Preference.PRIMARY.type(); } + if (FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX) + && IndexModule.DataLocalityType.PARTIAL.name() + .equals(indexMetadataForShard.getSettings().get(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey())) + && (preference == null || preference.isEmpty())) { + preference = Preference.PRIMARY_FIRST.type(); + } + ShardIterator iterator = preferenceActiveShardIterator( shard, clusterState.nodes().getLocalNodeId(), diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java index 6c7b94f316da2..3c6f89ae8c497 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java @@ -378,6 +378,10 @@ public Diff diff(RoutingTable previousState) { return new RoutingTableDiff(previousState, this); } + public Diff incrementalDiff(RoutingTable previousState) { + return new RoutingTableIncrementalDiff(previousState, this); + } + public static Diff readDiffFrom(StreamInput in) throws IOException { return new RoutingTableDiff(in); } @@ -403,7 +407,7 @@ public void writeTo(StreamOutput out) throws IOException { } } - private static class RoutingTableDiff implements Diff { + private static class RoutingTableDiff implements Diff, StringKeyDiffProvider { private final long version; @@ -432,6 +436,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); indicesRouting.writeTo(out); } + + @Override + public DiffableUtils.MapDiff> provideDiff() { + return (DiffableUtils.MapDiff>) indicesRouting; + } } public static Builder builder() { diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTableIncrementalDiff.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTableIncrementalDiff.java new file mode 100644 index 0000000000000..13501a431d9f9 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTableIncrementalDiff.java @@ -0,0 +1,130 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing; + +import org.opensearch.cluster.Diff; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.Index; + +import java.io.IOException; +import java.util.Map; + +import static org.opensearch.cluster.DiffableUtils.MapDiff; + +/** + * Represents a difference between {@link RoutingTable} objects that can be serialized and deserialized. + */ +public class RoutingTableIncrementalDiff implements Diff, StringKeyDiffProvider { + + private final Diff> indicesRouting; + + private final long version; + + private static final DiffableUtils.DiffableValueSerializer CUSTOM_ROUTING_TABLE_DIFFABLE_VALUE_SERIALIZER = + new DiffableUtils.DiffableValueSerializer<>() { + + @Override + public IndexRoutingTable read(StreamInput in, String key) throws IOException { + return IndexRoutingTable.readFrom(in); + } + + @Override + public Diff readDiff(StreamInput in, String key) throws IOException { + return new RoutingTableIncrementalDiff.IndexRoutingTableIncrementalDiff(in); + } + + @Override + public Diff diff(IndexRoutingTable currentState, IndexRoutingTable previousState) { + return new RoutingTableIncrementalDiff.IndexRoutingTableIncrementalDiff( + currentState.getIndex(), + previousState, + currentState + ); + } + }; + + public RoutingTableIncrementalDiff(RoutingTable before, RoutingTable after) { + version = after.version(); + indicesRouting = DiffableUtils.diff( + before.getIndicesRouting(), + after.getIndicesRouting(), + DiffableUtils.getStringKeySerializer(), + CUSTOM_ROUTING_TABLE_DIFFABLE_VALUE_SERIALIZER + ); + } + + public RoutingTableIncrementalDiff(StreamInput in) throws IOException { + version = in.readLong(); + indicesRouting = DiffableUtils.readJdkMapDiff( + in, + DiffableUtils.getStringKeySerializer(), + CUSTOM_ROUTING_TABLE_DIFFABLE_VALUE_SERIALIZER + ); + } + + public static RoutingTableIncrementalDiff readFrom(StreamInput in) throws IOException { + return new RoutingTableIncrementalDiff(in); + } + + @Override + public RoutingTable apply(RoutingTable part) { + return new RoutingTable(version, indicesRouting.apply(part.getIndicesRouting())); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(version); + indicesRouting.writeTo(out); + } + + @Override + public MapDiff> provideDiff() { + return (MapDiff>) indicesRouting; + } + + /** + * Represents a difference between {@link IndexShardRoutingTable} objects that can be serialized and deserialized. + */ + public static class IndexRoutingTableIncrementalDiff implements Diff { + + private final Diff> indexShardRoutingTables; + + private final Index index; + + public IndexRoutingTableIncrementalDiff(Index index, IndexRoutingTable before, IndexRoutingTable after) { + this.index = index; + this.indexShardRoutingTables = DiffableUtils.diff(before.getShards(), after.getShards(), DiffableUtils.getIntKeySerializer()); + } + + private static final DiffableUtils.DiffableValueReader DIFF_VALUE_READER = + new DiffableUtils.DiffableValueReader<>(IndexShardRoutingTable::readFrom, IndexShardRoutingTable::readDiffFrom); + + public IndexRoutingTableIncrementalDiff(StreamInput in) throws IOException { + this.index = new Index(in); + this.indexShardRoutingTables = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getIntKeySerializer(), DIFF_VALUE_READER); + } + + @Override + public IndexRoutingTable apply(IndexRoutingTable part) { + return new IndexRoutingTable(index, indexShardRoutingTables.apply(part.getShards())); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + index.writeTo(out); + indexShardRoutingTables.writeTo(out); + } + + public static IndexRoutingTableIncrementalDiff readFrom(StreamInput in) throws IOException { + return new IndexRoutingTableIncrementalDiff(in); + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/StringKeyDiffProvider.java b/server/src/main/java/org/opensearch/cluster/routing/StringKeyDiffProvider.java new file mode 100644 index 0000000000000..5d36a238e03ff --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/StringKeyDiffProvider.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing; + +import org.opensearch.cluster.DiffableUtils; + +import java.util.Map; + +/** + * Interface for providing a difference (diff) between two maps with {@code String} keys and values of type {@code V}. + * This interface is used to compute and obtain the difference between two versions of a map, typically used + * in cluster state updates or other scenarios where changes need to be tracked and propagated efficiently. + * + * @param the type of the values in the map + */ +public interface StringKeyDiffProvider { + + /** + * Provides the difference between two versions of a map with {@code String} keys and values of type {@code V}. + * The difference is represented as a {@link DiffableUtils.MapDiff} object, which can be used to apply the + * changes to another map or to serialize the diff. + * + * @return a {@link DiffableUtils.MapDiff} object representing the difference between the maps + */ + DiffableUtils.MapDiff> provideDiff(); + +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java index 5ad3a2fd47ce3..e29a81a2c131f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java @@ -72,6 +72,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -617,10 +618,10 @@ private void allocateExistingUnassignedShards(RoutingAllocation allocation) { private void allocateAllUnassignedShards(RoutingAllocation allocation) { ExistingShardsAllocator allocator = existingShardsAllocators.get(ShardsBatchGatewayAllocator.ALLOCATOR_NAME); - allocator.allocateAllUnassignedShards(allocation, true); + Optional.ofNullable(allocator.allocateAllUnassignedShards(allocation, true)).ifPresent(Runnable::run); allocator.afterPrimariesBeforeReplicas(allocation); // Replicas Assignment - allocator.allocateAllUnassignedShards(allocation, false); + Optional.ofNullable(allocator.allocateAllUnassignedShards(allocation, false)).ifPresent(Runnable::run); } private void disassociateDeadNodes(RoutingAllocation allocation) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java index 08fe8f92d1f80..28ad199218884 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java @@ -70,7 +70,7 @@ public static Predicate isPerIndexPrimaryShardsPerN return (params) -> { int perIndexPrimaryShardCount = params.getNode().numPrimaryShards(params.getIndex()); int perIndexAllowedPrimaryShardCount = (int) Math.ceil(params.getBalancer().avgPrimaryShardsPerNode(params.getIndex())); - return perIndexPrimaryShardCount > perIndexAllowedPrimaryShardCount; + return perIndexPrimaryShardCount >= perIndexAllowedPrimaryShardCount; }; } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java index fb2a37237f8b6..eb7a1e7209c37 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java @@ -41,6 +41,7 @@ import org.opensearch.gateway.GatewayAllocator; import org.opensearch.gateway.ShardsBatchGatewayAllocator; +import java.util.ArrayList; import java.util.List; /** @@ -108,14 +109,16 @@ void allocateUnassigned( * * Allocation service will currently run the default implementation of it implemented by {@link ShardsBatchGatewayAllocator} */ - default void allocateAllUnassignedShards(RoutingAllocation allocation, boolean primary) { + default Runnable allocateAllUnassignedShards(RoutingAllocation allocation, boolean primary) { RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + List runnables = new ArrayList<>(); while (iterator.hasNext()) { ShardRouting shardRouting = iterator.next(); if (shardRouting.primary() == primary) { - allocateUnassigned(shardRouting, allocation, iterator); + runnables.add(() -> allocateUnassigned(shardRouting, allocation, iterator)); } } + return () -> runnables.forEach(Runnable::run); } /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index b2443490dd973..212583d1fb14f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -154,6 +154,13 @@ public class BalancedShardsAllocator implements ShardsAllocator { Property.NodeScope ); + public static final Setting IGNORE_THROTTLE_FOR_REMOTE_RESTORE = Setting.boolSetting( + "cluster.routing.allocation.remote_primary.ignore_throttle", + true, + Property.Dynamic, + Property.NodeScope + ); + public static final Setting PRIMARY_SHARD_REBALANCE_BUFFER = Setting.floatSetting( "cluster.routing.allocation.rebalance.primary.buffer", 0.10f, @@ -173,6 +180,8 @@ public class BalancedShardsAllocator implements ShardsAllocator { private volatile WeightFunction weightFunction; private volatile float threshold; + private volatile boolean ignoreThrottleInRestore; + public BalancedShardsAllocator(Settings settings) { this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); } @@ -182,6 +191,7 @@ public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSetting setShardBalanceFactor(SHARD_BALANCE_FACTOR_SETTING.get(settings)); setIndexBalanceFactor(INDEX_BALANCE_FACTOR_SETTING.get(settings)); setPreferPrimaryShardRebalanceBuffer(PRIMARY_SHARD_REBALANCE_BUFFER.get(settings)); + setIgnoreThrottleInRestore(IGNORE_THROTTLE_FOR_REMOTE_RESTORE.get(settings)); updateWeightFunction(); setThreshold(THRESHOLD_SETTING.get(settings)); setPreferPrimaryShardBalance(PREFER_PRIMARY_SHARD_BALANCE.get(settings)); @@ -195,6 +205,7 @@ public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSetting clusterSettings.addSettingsUpdateConsumer(PRIMARY_SHARD_REBALANCE_BUFFER, this::updatePreferPrimaryShardBalanceBuffer); clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_REBALANCE, this::setPreferPrimaryShardRebalance); clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); + clusterSettings.addSettingsUpdateConsumer(IGNORE_THROTTLE_FOR_REMOTE_RESTORE, this::setIgnoreThrottleInRestore); } /** @@ -205,6 +216,10 @@ private void setMovePrimaryFirst(boolean movePrimaryFirst) { setShardMovementStrategy(this.shardMovementStrategy); } + private void setIgnoreThrottleInRestore(boolean ignoreThrottleInRestore) { + this.ignoreThrottleInRestore = ignoreThrottleInRestore; + } + /** * Sets the correct Shard movement strategy to use. * If users are still using deprecated setting `move_primary_first`, we want behavior to remain unchanged. @@ -282,7 +297,8 @@ public void allocate(RoutingAllocation allocation) { weightFunction, threshold, preferPrimaryShardBalance, - preferPrimaryShardRebalance + preferPrimaryShardRebalance, + ignoreThrottleInRestore ); localShardsBalancer.allocateUnassigned(); localShardsBalancer.moveShards(); @@ -304,7 +320,8 @@ public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, f weightFunction, threshold, preferPrimaryShardBalance, - preferPrimaryShardRebalance + preferPrimaryShardRebalance, + ignoreThrottleInRestore ); AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN; MoveDecision moveDecision = MoveDecision.NOT_TAKEN; @@ -459,6 +476,7 @@ void updateRebalanceConstraint(String constraint, boolean add) { public static class ModelNode implements Iterable { private final Map indices = new HashMap<>(); private int numShards = 0; + private int numPrimaryShards = 0; private final RoutingNode routingNode; ModelNode(RoutingNode routingNode) { @@ -492,7 +510,7 @@ public int numPrimaryShards(String idx) { } public int numPrimaryShards() { - return indices.values().stream().mapToInt(index -> index.numPrimaryShards()).sum(); + return numPrimaryShards; } public int highestPrimary(String index) { @@ -510,6 +528,10 @@ public void addShard(ShardRouting shard) { indices.put(index.getIndexId(), index); } index.addShard(shard); + if (shard.primary()) { + numPrimaryShards++; + } + numShards++; } @@ -521,6 +543,11 @@ public void removeShard(ShardRouting shard) { indices.remove(shard.getIndexName()); } } + + if (shard.primary()) { + numPrimaryShards--; + } + numShards--; } @@ -558,7 +585,7 @@ public Balancer( float threshold, boolean preferPrimaryBalance ) { - super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance, false); + super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance, false, false); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index ec25d041bda43..7e4ae58548c55 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.IntroSorter; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingPool; @@ -32,7 +33,6 @@ import org.opensearch.gateway.PriorityComparator; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -41,7 +41,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; @@ -62,6 +61,8 @@ public class LocalShardsBalancer extends ShardsBalancer { private final boolean preferPrimaryBalance; private final boolean preferPrimaryRebalance; + + private final boolean ignoreThrottleInRestore; private final BalancedShardsAllocator.WeightFunction weight; private final float threshold; @@ -70,6 +71,7 @@ public class LocalShardsBalancer extends ShardsBalancer { private final float avgPrimaryShardsPerNode; private final BalancedShardsAllocator.NodeSorter sorter; private final Set inEligibleTargetNode; + private int totalShardCount = 0; public LocalShardsBalancer( Logger logger, @@ -78,7 +80,8 @@ public LocalShardsBalancer( BalancedShardsAllocator.WeightFunction weight, float threshold, boolean preferPrimaryBalance, - boolean preferPrimaryRebalance + boolean preferPrimaryRebalance, + boolean ignoreThrottleInRestore ) { this.logger = logger; this.allocation = allocation; @@ -95,6 +98,7 @@ public LocalShardsBalancer( this.preferPrimaryBalance = preferPrimaryBalance; this.preferPrimaryRebalance = preferPrimaryRebalance; this.shardMovementStrategy = shardMovementStrategy; + this.ignoreThrottleInRestore = ignoreThrottleInRestore; } /** @@ -127,8 +131,7 @@ public float avgPrimaryShardsPerNode() { */ @Override public float avgShardsPerNode() { - float totalShards = nodes.values().stream().map(BalancedShardsAllocator.ModelNode::numShards).reduce(0, Integer::sum); - return totalShards / nodes.size(); + return totalShardCount / nodes.size(); } /** @@ -600,6 +603,7 @@ void moveShards() { final BalancedShardsAllocator.ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); final BalancedShardsAllocator.ModelNode targetNode = nodes.get(moveDecision.getTargetNode().getId()); sourceNode.removeShard(shardRouting); + --totalShardCount; Tuple relocatingShards = routingNodes.relocateShard( shardRouting, targetNode.getNodeId(), @@ -607,6 +611,7 @@ void moveShards() { allocation.changes() ); targetNode.addShard(relocatingShards.v2()); + ++totalShardCount; if (logger.isTraceEnabled()) { logger.trace("Moved shard [{}] to node [{}]", shardRouting, targetNode.getRoutingNode()); } @@ -726,6 +731,7 @@ private Map buildModelFromAssigned() /* we skip relocating shards here since we expect an initializing shard with the same id coming in */ if (RoutingPool.LOCAL_ONLY.equals(RoutingPool.getShardPool(shard, allocation)) && shard.state() != RELOCATING) { node.addShard(shard); + ++totalShardCount; if (logger.isTraceEnabled()) { logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId()); } @@ -779,15 +785,16 @@ void allocateUnassigned() { * if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with * the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ignoreUnassigned. */ - ShardRouting[] unassignedShards = unassigned.drain(); - List allUnassignedShards = Arrays.stream(unassignedShards).collect(Collectors.toList()); - List localUnassignedShards = allUnassignedShards.stream() - .filter(shard -> RoutingPool.LOCAL_ONLY.equals(RoutingPool.getShardPool(shard, allocation))) - .collect(Collectors.toList()); - allUnassignedShards.removeAll(localUnassignedShards); - allUnassignedShards.forEach(shard -> routingNodes.unassigned().add(shard)); - unassignedShards = localUnassignedShards.toArray(new ShardRouting[0]); - ShardRouting[] primary = unassignedShards; + List primaryList = new ArrayList<>(); + for (ShardRouting shard : unassigned.drain()) { + if (RoutingPool.LOCAL_ONLY.equals(RoutingPool.getShardPool(shard, allocation))) { + primaryList.add(shard); + } else { + routingNodes.unassigned().add(shard); + } + } + + ShardRouting[] primary = primaryList.toArray(new ShardRouting[0]); ShardRouting[] secondary = new ShardRouting[primary.length]; int secondaryLength = 0; int primaryLength = primary.length; @@ -816,6 +823,7 @@ void allocateUnassigned() { ); shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes()); minNode.addShard(shard); + ++totalShardCount; if (!shard.primary()) { // copy over the same replica shards to the secondary array so they will get allocated // in a subsequent iteration, allowing replicas of other shards to be allocated first @@ -845,6 +853,7 @@ void allocateUnassigned() { allocation.routingTable() ); minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize)); + ++totalShardCount; } else { if (logger.isTraceEnabled()) { logger.trace("No Node found to assign shard [{}]", shard); @@ -914,7 +923,15 @@ AllocateUnassignedDecision decideAllocateUnassigned(final ShardRouting shard) { nodeExplanationMap.put(node.getNodeId(), new NodeAllocationResult(node.getRoutingNode().node(), currentDecision, 0)); nodeWeights.add(Tuple.tuple(node.getNodeId(), currentWeight)); } - if (currentDecision.type() == Decision.Type.YES || currentDecision.type() == Decision.Type.THROTTLE) { + + // For REMOTE_STORE recoveries, THROTTLE is as good as NO as we want faster recoveries + // The side effect of this are increased relocations post these allocations. + boolean considerThrottleAsNo = ignoreThrottleInRestore + && shard.recoverySource().getType() == RecoverySource.Type.REMOTE_STORE + && shard.primary(); + + if (currentDecision.type() == Decision.Type.YES + || (currentDecision.type() == Decision.Type.THROTTLE && considerThrottleAsNo == false)) { final boolean updateMinNode; if (currentWeight == minWeight) { /* we have an equal weight tie breaking: @@ -1012,18 +1029,21 @@ private boolean tryRelocateShard(BalancedShardsAllocator.ModelNode minNode, Bala } final Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision); maxNode.removeShard(shard); + --totalShardCount; long shardSize = allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); if (decision.type() == Decision.Type.YES) { /* only allocate on the cluster if we are not throttled */ logger.debug("Relocate [{}] from [{}] to [{}]", shard, maxNode.getNodeId(), minNode.getNodeId()); minNode.addShard(routingNodes.relocateShard(shard, minNode.getNodeId(), shardSize, allocation.changes()).v1()); + ++totalShardCount; return true; } else { /* allocate on the model even if throttled */ logger.debug("Simulate relocation of [{}] from [{}] to [{}]", shard, maxNode.getNodeId(), minNode.getNodeId()); assert decision.type() == Decision.Type.THROTTLE; minNode.addShard(shard.relocate(minNode.getNodeId(), shardSize)); + ++totalShardCount; return false; } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 5344d95b217a7..16c94acfbb553 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -111,7 +111,6 @@ public class AwarenessAllocationDecider extends AllocationDecider { ); private volatile List awarenessAttributes; - private volatile Map> forcedAwarenessAttributes; public AwarenessAllocationDecider(Settings settings, ClusterSettings clusterSettings) { @@ -163,8 +162,8 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); int shardCount = indexMetadata.getNumberOfReplicas() + 1; // 1 for primary for (String awarenessAttribute : awarenessAttributes) { - // the node the shard exists on must be associated with an awareness attribute - if (node.node().getAttributes().containsKey(awarenessAttribute) == false) { + // the node the shard exists on must be associated with an awareness attribute. + if (isAwarenessAttributeAssociatedWithNode(node, awarenessAttribute) == false) { return allocation.decision( Decision.NO, NAME, @@ -175,36 +174,10 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout ); } + int currentNodeCount = getCurrentNodeCountForAttribute(shardRouting, node, allocation, moveToNode, awarenessAttribute); + // build attr_value -> nodes map Set nodesPerAttribute = allocation.routingNodes().nodesPerAttributesCounts(awarenessAttribute); - - // build the count of shards per attribute value - Map shardPerAttribute = new HashMap<>(); - for (ShardRouting assignedShard : allocation.routingNodes().assignedShards(shardRouting.shardId())) { - if (assignedShard.started() || assignedShard.initializing()) { - // Note: this also counts relocation targets as that will be the new location of the shard. - // Relocation sources should not be counted as the shard is moving away - RoutingNode routingNode = allocation.routingNodes().node(assignedShard.currentNodeId()); - shardPerAttribute.merge(routingNode.node().getAttributes().get(awarenessAttribute), 1, Integer::sum); - } - } - - if (moveToNode) { - if (shardRouting.assignedToNode()) { - String nodeId = shardRouting.relocating() ? shardRouting.relocatingNodeId() : shardRouting.currentNodeId(); - if (node.nodeId().equals(nodeId) == false) { - // we work on different nodes, move counts around - shardPerAttribute.compute( - allocation.routingNodes().node(nodeId).node().getAttributes().get(awarenessAttribute), - (k, v) -> (v == null) ? 0 : v - 1 - ); - shardPerAttribute.merge(node.node().getAttributes().get(awarenessAttribute), 1, Integer::sum); - } - } else { - shardPerAttribute.merge(node.node().getAttributes().get(awarenessAttribute), 1, Integer::sum); - } - } - int numberOfAttributes = nodesPerAttribute.size(); List fullValues = forcedAwarenessAttributes.get(awarenessAttribute); @@ -216,9 +189,8 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout } numberOfAttributes = attributesSet.size(); } - // TODO should we remove ones that are not part of full list? - final int currentNodeCount = shardPerAttribute.get(node.node().getAttributes().get(awarenessAttribute)); + // TODO should we remove ones that are not part of full list? final int maximumNodeCount = (shardCount + numberOfAttributes - 1) / numberOfAttributes; // ceil(shardCount/numberOfAttributes) if (currentNodeCount > maximumNodeCount) { return allocation.decision( @@ -238,4 +210,57 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout return allocation.decision(Decision.YES, NAME, "node meets all awareness attribute requirements"); } + + private int getCurrentNodeCountForAttribute( + ShardRouting shardRouting, + RoutingNode node, + RoutingAllocation allocation, + boolean moveToNode, + String awarenessAttribute + ) { + // build the count of shards per attribute value + final String shardAttributeForNode = getAttributeValueForNode(node, awarenessAttribute); + int currentNodeCount = 0; + final List assignedShards = allocation.routingNodes().assignedShards(shardRouting.shardId()); + + for (ShardRouting assignedShard : assignedShards) { + if (assignedShard.started() || assignedShard.initializing()) { + // Note: this also counts relocation targets as that will be the new location of the shard. + // Relocation sources should not be counted as the shard is moving away + RoutingNode routingNode = allocation.routingNodes().node(assignedShard.currentNodeId()); + // Increase node count when + if (getAttributeValueForNode(routingNode, awarenessAttribute).equals(shardAttributeForNode)) { + ++currentNodeCount; + } + } + } + + if (moveToNode) { + if (shardRouting.assignedToNode()) { + String nodeId = shardRouting.relocating() ? shardRouting.relocatingNodeId() : shardRouting.currentNodeId(); + if (node.nodeId().equals(nodeId) == false) { + // we work on different nodes, move counts around + if (getAttributeValueForNode(allocation.routingNodes().node(nodeId), awarenessAttribute).equals(shardAttributeForNode) + && currentNodeCount > 0) { + --currentNodeCount; + } + + ++currentNodeCount; + } + } else { + ++currentNodeCount; + } + } + + return currentNodeCount; + } + + private boolean isAwarenessAttributeAssociatedWithNode(RoutingNode node, String awarenessAttribute) { + return node.node().getAttributes().containsKey(awarenessAttribute); + } + + private String getAttributeValueForNode(final RoutingNode node, final String awarenessAttribute) { + return node.node().getAttributes().get(awarenessAttribute); + } + } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index efa5115939d3c..5fc3f282f33f7 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -140,9 +140,8 @@ public static long sizeOfRelocatingShards( // Where reserved space is unavailable (e.g. stats are out-of-sync) compute a conservative estimate for initialising shards final List initializingShards = node.shardsWithState(ShardRoutingState.INITIALIZING); - initializingShards.removeIf(shardRouting -> reservedSpace.containsShardId(shardRouting.shardId())); for (ShardRouting routing : initializingShards) { - if (routing.relocatingNodeId() == null) { + if (routing.relocatingNodeId() == null || reservedSpace.containsShardId(routing.shardId())) { // in practice the only initializing-but-not-relocating shards with a nonzero expected shard size will be ones created // by a resize (shrink/split/clone) operation which we expect to happen using hard links, so they shouldn't be taking // any additional space and can be ignored here @@ -230,7 +229,14 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing // subtractLeavingShards is passed as false here, because they still use disk space, and therefore we should be extra careful // and take the size into account - final DiskUsageWithRelocations usage = getDiskUsage(node, allocation, usages, false); + final DiskUsageWithRelocations usage = getDiskUsage( + node, + allocation, + usages, + clusterInfo.getAvgFreeByte(), + clusterInfo.getAvgTotalBytes(), + false + ); // First, check that the node currently over the low watermark double freeDiskPercentage = usage.getFreeDiskAsPercentage(); // Cache the used disk percentage for displaying disk percentages consistent with documentation @@ -492,7 +498,14 @@ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAl // subtractLeavingShards is passed as true here, since this is only for shards remaining, we will *eventually* have enough disk // since shards are moving away. No new shards will be incoming since in canAllocate we pass false for this check. - final DiskUsageWithRelocations usage = getDiskUsage(node, allocation, usages, true); + final DiskUsageWithRelocations usage = getDiskUsage( + node, + allocation, + usages, + clusterInfo.getAvgFreeByte(), + clusterInfo.getAvgTotalBytes(), + true + ); final String dataPath = clusterInfo.getDataPath(shardRouting); // If this node is already above the high threshold, the shard cannot remain (get it off!) final double freeDiskPercentage = usage.getFreeDiskAsPercentage(); @@ -581,13 +594,15 @@ private DiskUsageWithRelocations getDiskUsage( RoutingNode node, RoutingAllocation allocation, final Map usages, + final long avgFreeBytes, + final long avgTotalBytes, boolean subtractLeavingShards ) { DiskUsage usage = usages.get(node.nodeId()); if (usage == null) { // If there is no usage, and we have other nodes in the cluster, // use the average usage for all nodes as the usage for this node - usage = averageUsage(node, usages); + usage = new DiskUsage(node.nodeId(), node.node().getName(), "_na_", avgTotalBytes, avgFreeBytes); if (logger.isDebugEnabled()) { logger.debug( "unable to determine disk usage for {}, defaulting to average across nodes [{} total] [{} free] [{}% free]", @@ -619,26 +634,6 @@ private DiskUsageWithRelocations getDiskUsage( return diskUsageWithRelocations; } - /** - * Returns a {@link DiskUsage} for the {@link RoutingNode} using the - * average usage of other nodes in the disk usage map. - * @param node Node to return an averaged DiskUsage object for - * @param usages Map of nodeId to DiskUsage for all known nodes - * @return DiskUsage representing given node using the average disk usage - */ - DiskUsage averageUsage(RoutingNode node, final Map usages) { - if (usages.size() == 0) { - return new DiskUsage(node.nodeId(), node.node().getName(), "_na_", 0, 0); - } - long totalBytes = 0; - long freeBytes = 0; - for (DiskUsage du : usages.values()) { - totalBytes += du.getTotalBytes(); - freeBytes += du.getFreeBytes(); - } - return new DiskUsage(node.nodeId(), node.node().getName(), "_na_", totalBytes / usages.size(), freeBytes / usages.size()); - } - /** * Given the DiskUsage for a node and the size of the shard, return the * percentage of free disk if the shard were to be allocated to the node. diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java index 4fc5fff805663..67fe4ea1dcb1b 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java @@ -44,8 +44,6 @@ import org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; import org.opensearch.node.remotestore.RemoteStoreNodeService.Direction; -import java.util.Locale; - /** * A new allocation decider for migration of document replication clusters to remote store backed clusters: * - For STRICT compatibility mode, the decision is always YES @@ -101,7 +99,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing if (migrationDirection.equals(Direction.NONE)) { // remote backed indices on docrep nodes and non remote backed indices on remote nodes are not allowed boolean isNoDecision = remoteSettingsBackedIndex ^ targetNode.isRemoteStoreNode(); - String reason = String.format(Locale.ROOT, " for %sremote store backed index", remoteSettingsBackedIndex ? "" : "non "); + String reason = " for " + (remoteSettingsBackedIndex ? "" : "non ") + "remote store backed index"; return allocation.decision( isNoDecision ? Decision.NO : Decision.YES, NAME, @@ -114,11 +112,9 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing // check for remote store backed indices if (remoteSettingsBackedIndex && targetNode.isRemoteStoreNode() == false) { // allocations and relocations must be to a remote node - String reason = String.format( - Locale.ROOT, - " because a remote store backed index's shard copy can only be %s to a remote node", - ((shardRouting.assignedToNode() == false) ? "allocated" : "relocated") - ); + String reason = new StringBuilder(" because a remote store backed index's shard copy can only be ").append( + (shardRouting.assignedToNode() == false) ? "allocated" : "relocated" + ).append(" to a remote node").toString(); return allocation.decision(Decision.NO, NAME, getDecisionDetails(false, shardRouting, targetNode, reason)); } @@ -168,16 +164,18 @@ private Decision replicaShardDecision(ShardRouting replicaShardRouting, Discover // get detailed reason for the decision private String getDecisionDetails(boolean isYes, ShardRouting shardRouting, DiscoveryNode targetNode, String reason) { - return String.format( - Locale.ROOT, - "[%s migration_direction]: %s shard copy %s be %s to a %s node%s", - migrationDirection.direction, - (shardRouting.primary() ? "primary" : "replica"), - (isYes ? "can" : "can not"), - ((shardRouting.assignedToNode() == false) ? "allocated" : "relocated"), - (targetNode.isRemoteStoreNode() ? "remote" : "non-remote"), - reason - ); + return new StringBuilder("[").append(migrationDirection.direction) + .append(" migration_direction]: ") + .append(shardRouting.primary() ? "primary" : "replica") + .append(" shard copy ") + .append(isYes ? "can" : "can not") + .append(" be ") + .append((shardRouting.assignedToNode() == false) ? "allocated" : "relocated") + .append(" to a ") + .append(targetNode.isRemoteStoreNode() ? "remote" : "non-remote") + .append(" node") + .append(reason) + .toString(); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index c008102554e8c..6f211f370de95 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.routing.allocation.decider; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; @@ -125,8 +124,7 @@ private Decision doDecide( RoutingAllocation allocation, BiPredicate decider ) { - IndexMetadata indexMd = allocation.metadata().getIndexSafe(shardRouting.index()); - final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings); + final int indexShardLimit = allocation.metadata().getIndexSafe(shardRouting.index()).getIndexTotalShardsPerNodeLimit(); // Capture the limit here in case it changes during this method's // execution final int clusterShardLimit = this.clusterShardLimit; diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java index cc1b0713393f3..220093b428989 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java @@ -11,35 +11,28 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.store.IndexInput; import org.opensearch.action.LatchedActionListener; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.Diff; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; -import org.opensearch.common.CheckedRunnable; -import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; -import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.cluster.routing.RoutingTableIncrementalDiff; +import org.opensearch.cluster.routing.StringKeyDiffProvider; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.blobstore.stream.write.WritePriority; -import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; -import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; -import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.common.remote.RemoteWritableEntityStore; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.index.Index; +import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; import org.opensearch.gateway.remote.RemoteStateTransferException; +import org.opensearch.gateway.remote.model.RemoteRoutingTableBlobStore; import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; -import org.opensearch.index.remote.RemoteStoreEnums; -import org.opensearch.index.remote.RemoteStorePathStrategy; -import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.repositories.RepositoriesService; @@ -52,12 +45,10 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.concurrent.ExecutorService; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; -import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled; /** @@ -67,64 +58,30 @@ */ public class InternalRemoteRoutingTableService extends AbstractLifecycleComponent implements RemoteRoutingTableService { - /** - * This setting is used to set the remote routing table store blob store path type strategy. - */ - public static final Setting REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING = new Setting<>( - "cluster.remote_store.routing_table.path_type", - RemoteStoreEnums.PathType.HASHED_PREFIX.toString(), - RemoteStoreEnums.PathType::parseString, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - - /** - * This setting is used to set the remote routing table store blob store path hash algorithm strategy. - * This setting will come to effect if the {@link #REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING} - * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. - */ - public static final Setting REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING = new Setting<>( - "cluster.remote_store.routing_table.path_hash_algo", - RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64.toString(), - RemoteStoreEnums.PathHashAlgorithm::parseString, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - - public static final String INDEX_ROUTING_PATH_TOKEN = "index-routing"; - public static final String INDEX_ROUTING_FILE_PREFIX = "index_routing"; - public static final String INDEX_ROUTING_METADATA_PREFIX = "indexRouting--"; - private static final Logger logger = LogManager.getLogger(InternalRemoteRoutingTableService.class); private final Settings settings; private final Supplier repositoriesService; + private Compressor compressor; + private RemoteWritableEntityStore remoteIndexRoutingTableStore; + private RemoteWritableEntityStore, RemoteRoutingTableDiff> remoteRoutingTableDiffStore; + private final ClusterSettings clusterSettings; private BlobStoreRepository blobStoreRepository; - private RemoteStoreEnums.PathType pathType; - private RemoteStoreEnums.PathHashAlgorithm pathHashAlgo; - private ThreadPool threadPool; + private final ThreadPool threadPool; + private final String clusterName; public InternalRemoteRoutingTableService( Supplier repositoriesService, Settings settings, ClusterSettings clusterSettings, - ThreadPool threadpool + ThreadPool threadpool, + String clusterName ) { assert isRemoteRoutingTableEnabled(settings) : "Remote routing table is not enabled"; this.repositoriesService = repositoriesService; this.settings = settings; - this.pathType = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING); - this.pathHashAlgo = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING); - clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING, this::setPathTypeSetting); - clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING, this::setPathHashAlgoSetting); this.threadPool = threadpool; - } - - private void setPathTypeSetting(RemoteStoreEnums.PathType pathType) { - this.pathType = pathType; - } - - private void setPathHashAlgoSetting(RemoteStoreEnums.PathHashAlgorithm pathHashAlgo) { - this.pathHashAlgo = pathHashAlgo; + this.clusterName = clusterName; + this.clusterSettings = clusterSettings; } public List getIndicesRouting(RoutingTable routingTable) { @@ -133,61 +90,69 @@ public List getIndicesRouting(RoutingTable routingTable) { /** * Returns diff between the two routing tables, which includes upserts and deletes. + * * @param before previous routing table - * @param after current routing table - * @return diff of the previous and current routing table + * @param after current routing table + * @return incremental diff of the previous and current routing table */ - public DiffableUtils.MapDiff> getIndicesRoutingMapDiff( - RoutingTable before, - RoutingTable after - ) { - return DiffableUtils.diff( - before.getIndicesRouting(), - after.getIndicesRouting(), - DiffableUtils.getStringKeySerializer(), - CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER - ); + @Override + public StringKeyDiffProvider getIndicesRoutingMapDiff(RoutingTable before, RoutingTable after) { + return new RoutingTableIncrementalDiff(before, after); } /** - * Create async action for writing one {@code IndexRoutingTable} to remote store - * @param clusterState current cluster state + * Async action for writing one {@code IndexRoutingTable} to remote store + * + * @param term current term + * @param version current version + * @param clusterUUID current cluster UUID * @param indexRouting indexRoutingTable to write to remote store * @param latchedActionListener listener for handling async action response - * @param clusterBasePath base path for remote file - * @return returns runnable async action */ - public CheckedRunnable getIndexRoutingAsyncAction( - ClusterState clusterState, + @Override + public void getAsyncIndexRoutingWriteAction( + String clusterUUID, + long term, + long version, IndexRoutingTable indexRouting, - LatchedActionListener latchedActionListener, - BlobPath clusterBasePath + LatchedActionListener latchedActionListener ) { - BlobPath indexRoutingPath = clusterBasePath.add(INDEX_ROUTING_PATH_TOKEN); - BlobPath path = pathType.path( - RemoteStorePathStrategy.PathInput.builder().basePath(indexRoutingPath).indexUUID(indexRouting.getIndex().getUUID()).build(), - pathHashAlgo + RemoteIndexRoutingTable remoteIndexRoutingTable = new RemoteIndexRoutingTable(indexRouting, clusterUUID, compressor, term, version); + + ActionListener completionListener = ActionListener.wrap( + resp -> latchedActionListener.onResponse(remoteIndexRoutingTable.getUploadedMetadata()), + ex -> latchedActionListener.onFailure( + new RemoteStateTransferException("Exception in writing index to remote store: " + indexRouting.getIndex().toString(), ex) + ) ); - final BlobContainer blobContainer = blobStoreRepository.blobStore().blobContainer(path); - final String fileName = getIndexRoutingFileName(clusterState.term(), clusterState.version()); + remoteIndexRoutingTableStore.writeAsync(remoteIndexRoutingTable, completionListener); + } + @Override + public void getAsyncIndexRoutingDiffWriteAction( + String clusterUUID, + long term, + long version, + StringKeyDiffProvider routingTableDiff, + LatchedActionListener latchedActionListener + ) { + RemoteRoutingTableDiff remoteRoutingTableDiff = new RemoteRoutingTableDiff( + (RoutingTableIncrementalDiff) routingTableDiff, + clusterUUID, + compressor, + term, + version + ); ActionListener completionListener = ActionListener.wrap( - resp -> latchedActionListener.onResponse( - new ClusterMetadataManifest.UploadedIndexMetadata( - indexRouting.getIndex().getName(), - indexRouting.getIndex().getUUID(), - path.buildAsString() + fileName, - INDEX_ROUTING_METADATA_PREFIX - ) - ), + resp -> latchedActionListener.onResponse(remoteRoutingTableDiff.getUploadedMetadata()), ex -> latchedActionListener.onFailure( - new RemoteStateTransferException("Exception in writing index to remote store: " + indexRouting.getIndex().toString(), ex) + new RemoteStateTransferException("Exception in writing index routing diff to remote store", ex) ) ); - return () -> uploadIndex(indexRouting, fileName, blobContainer, completionListener); + remoteRoutingTableDiffStore.writeAsync(remoteRoutingTableDiff, completionListener); } /** @@ -214,111 +179,36 @@ public List getAllUploadedIndices return new ArrayList<>(allUploadedIndicesRouting.values()); } - private void uploadIndex( - IndexRoutingTable indexRouting, - String fileName, - BlobContainer blobContainer, - ActionListener completionListener + @Override + public void getAsyncIndexRoutingReadAction( + String clusterUUID, + String uploadedFilename, + LatchedActionListener latchedActionListener ) { - RemoteIndexRoutingTable indexRoutingInput = new RemoteIndexRoutingTable(indexRouting); - BytesReference bytesInput = null; - try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { - indexRoutingInput.writeTo(streamOutput); - bytesInput = streamOutput.bytes(); - } catch (IOException e) { - logger.error("Failed to serialize IndexRoutingTable for [{}]: [{}]", indexRouting, e); - completionListener.onFailure(e); - return; - } - if (blobContainer instanceof AsyncMultiStreamBlobContainer == false) { - try { - blobContainer.writeBlob(fileName, bytesInput.streamInput(), bytesInput.length(), true); - completionListener.onResponse(null); - } catch (IOException e) { - logger.error("Failed to write IndexRoutingTable to remote store for indexRouting [{}]: [{}]", indexRouting, e); - completionListener.onFailure(e); - } - return; - } + ActionListener actionListener = ActionListener.wrap( + latchedActionListener::onResponse, + latchedActionListener::onFailure + ); - try (IndexInput input = new ByteArrayIndexInput("indexrouting", BytesReference.toBytes(bytesInput))) { - try ( - RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( - fileName, - fileName, - input.length(), - true, - WritePriority.URGENT, - (size, position) -> new OffsetRangeIndexInputStream(input, size, position), - null, - false - ) - ) { - ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload( - remoteTransferContainer.createWriteContext(), - completionListener - ); - } catch (IOException e) { - logger.error("Failed to write IndexRoutingTable to remote store for indexRouting [{}]: [{}]", indexRouting, e); - completionListener.onFailure(e); - } - } catch (IOException e) { - logger.error( - "Failed to create transfer object for IndexRoutingTable for remote store upload for indexRouting [{}]: [{}]", - indexRouting, - e - ); - completionListener.onFailure(e); - } + RemoteIndexRoutingTable remoteIndexRoutingTable = new RemoteIndexRoutingTable(uploadedFilename, clusterUUID, compressor); + + remoteIndexRoutingTableStore.readAsync(remoteIndexRoutingTable, actionListener); } @Override - public CheckedRunnable getAsyncIndexRoutingReadAction( + public void getAsyncIndexRoutingTableDiffReadAction( + String clusterUUID, String uploadedFilename, - Index index, - LatchedActionListener latchedActionListener + LatchedActionListener> latchedActionListener ) { - int idx = uploadedFilename.lastIndexOf("/"); - String blobFileName = uploadedFilename.substring(idx + 1); - BlobContainer blobContainer = blobStoreRepository.blobStore() - .blobContainer(BlobPath.cleanPath().add(uploadedFilename.substring(0, idx))); - - return () -> readAsync( - blobContainer, - blobFileName, - index, - threadPool.executor(ThreadPool.Names.REMOTE_STATE_READ), - ActionListener.wrap( - response -> latchedActionListener.onResponse(response.getIndexRoutingTable()), - latchedActionListener::onFailure - ) + ActionListener> actionListener = ActionListener.wrap( + latchedActionListener::onResponse, + latchedActionListener::onFailure ); - } - private void readAsync( - BlobContainer blobContainer, - String name, - Index index, - ExecutorService executorService, - ActionListener listener - ) { - executorService.execute(() -> { - try { - listener.onResponse(read(blobContainer, name, index)); - } catch (Exception e) { - listener.onFailure(e); - } - }); - } - - private RemoteIndexRoutingTable read(BlobContainer blobContainer, String path, Index index) { - try { - return new RemoteIndexRoutingTable(blobContainer.readBlob(path), index); - } catch (IOException | AssertionError e) { - logger.error(() -> new ParameterizedMessage("RoutingTable read failed for path {}", path), e); - throw new RemoteStateTransferException("Failed to read RemoteRoutingTable from Manifest with error ", e); - } + RemoteRoutingTableDiff remoteRoutingTableDiff = new RemoteRoutingTableDiff(uploadedFilename, clusterUUID, compressor); + remoteRoutingTableDiffStore.readAsync(remoteRoutingTableDiff, actionListener); } @Override @@ -335,16 +225,6 @@ public List getUpdatedIndexRoutin }).collect(Collectors.toList()); } - private String getIndexRoutingFileName(long term, long version) { - return String.join( - DELIMITER, - INDEX_ROUTING_FILE_PREFIX, - RemoteStoreUtils.invertLong(term), - RemoteStoreUtils.invertLong(version), - RemoteStoreUtils.invertLong(System.currentTimeMillis()) - ); - } - @Override protected void doClose() throws IOException { if (blobStoreRepository != null) { @@ -362,6 +242,25 @@ protected void doStart() { final Repository repository = repositoriesService.get().repository(remoteStoreRepo); assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; blobStoreRepository = (BlobStoreRepository) repository; + compressor = blobStoreRepository.getCompressor(); + + this.remoteIndexRoutingTableStore = new RemoteRoutingTableBlobStore<>( + new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool), + blobStoreRepository, + clusterName, + threadPool, + ThreadPool.Names.REMOTE_STATE_READ, + clusterSettings + ); + + this.remoteRoutingTableDiffStore = new RemoteWriteableEntityBlobStore<>( + new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool), + blobStoreRepository, + clusterName, + threadPool, + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN + ); } @Override @@ -378,4 +277,13 @@ public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOExcep } } + public void deleteStaleIndexRoutingDiffPaths(List stalePaths) throws IOException { + try { + logger.debug(() -> "Deleting stale index routing diff files from remote - " + stalePaths); + blobStoreRepository.blobStore().blobContainer(BlobPath.cleanPath()).deleteBlobsIgnoringIfNotExists(stalePaths); + } catch (IOException e) { + logger.error(() -> new ParameterizedMessage("Failed to delete some stale index routing diff paths from {}", stalePaths), e); + throw e; + } + } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java index 6236d107d0220..17687199c39d6 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java @@ -9,19 +9,16 @@ package org.opensearch.cluster.routing.remote; import org.opensearch.action.LatchedActionListener; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.Diff; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; -import org.opensearch.common.CheckedRunnable; -import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.cluster.routing.RoutingTableIncrementalDiff; +import org.opensearch.cluster.routing.StringKeyDiffProvider; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; -import org.opensearch.core.index.Index; import org.opensearch.gateway.remote.ClusterMetadataManifest; import java.io.IOException; import java.util.List; -import java.util.Map; /** * Noop impl for RemoteRoutingTableService. @@ -34,22 +31,30 @@ public List getIndicesRouting(RoutingTable routingTable) { } @Override - public DiffableUtils.MapDiff> getIndicesRoutingMapDiff( - RoutingTable before, - RoutingTable after - ) { - return DiffableUtils.diff(Map.of(), Map.of(), DiffableUtils.getStringKeySerializer(), CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER); + public StringKeyDiffProvider getIndicesRoutingMapDiff(RoutingTable before, RoutingTable after) { + return new RoutingTableIncrementalDiff(RoutingTable.builder().build(), RoutingTable.builder().build()); } @Override - public CheckedRunnable getIndexRoutingAsyncAction( - ClusterState clusterState, + public void getAsyncIndexRoutingWriteAction( + String clusterUUID, + long term, + long version, IndexRoutingTable indexRouting, - LatchedActionListener latchedActionListener, - BlobPath clusterBasePath + LatchedActionListener latchedActionListener + ) { + // noop + } + + @Override + public void getAsyncIndexRoutingDiffWriteAction( + String clusterUUID, + long term, + long version, + StringKeyDiffProvider routingTableDiff, + LatchedActionListener latchedActionListener ) { // noop - return () -> {}; } @Override @@ -63,13 +68,21 @@ public List getAllUploadedIndices } @Override - public CheckedRunnable getAsyncIndexRoutingReadAction( + public void getAsyncIndexRoutingReadAction( + String clusterUUID, String uploadedFilename, - Index index, LatchedActionListener latchedActionListener ) { // noop - return () -> {}; + } + + @Override + public void getAsyncIndexRoutingTableDiffReadAction( + String clusterUUID, + String uploadedFilename, + LatchedActionListener> latchedActionListener + ) { + // noop } @Override @@ -100,4 +113,8 @@ protected void doClose() throws IOException { public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOException { // noop } + + public void deleteStaleIndexRoutingDiffPaths(List stalePaths) throws IOException { + // noop + } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java index d455dfb58eabc..d7ef3a29aa21f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java @@ -9,21 +9,15 @@ package org.opensearch.cluster.routing.remote; import org.opensearch.action.LatchedActionListener; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.Diff; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; -import org.opensearch.common.CheckedRunnable; -import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.cluster.routing.StringKeyDiffProvider; import org.opensearch.common.lifecycle.LifecycleComponent; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.index.Index; import org.opensearch.gateway.remote.ClusterMetadataManifest; import java.io.IOException; import java.util.List; -import java.util.Map; /** * A Service which provides APIs to upload and download routing table from remote store. @@ -31,42 +25,42 @@ * @opensearch.internal */ public interface RemoteRoutingTableService extends LifecycleComponent { - public static final DiffableUtils.NonDiffableValueSerializer CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER = - new DiffableUtils.NonDiffableValueSerializer() { - @Override - public void write(IndexRoutingTable value, StreamOutput out) throws IOException { - value.writeTo(out); - } - - @Override - public IndexRoutingTable read(StreamInput in, String key) throws IOException { - return IndexRoutingTable.readFrom(in); - } - }; List getIndicesRouting(RoutingTable routingTable); - CheckedRunnable getAsyncIndexRoutingReadAction( + void getAsyncIndexRoutingReadAction( + String clusterUUID, String uploadedFilename, - Index index, LatchedActionListener latchedActionListener ); + void getAsyncIndexRoutingTableDiffReadAction( + String clusterUUID, + String uploadedFilename, + LatchedActionListener> latchedActionListener + ); + List getUpdatedIndexRoutingTableMetadata( List updatedIndicesRouting, List allIndicesRouting ); - DiffableUtils.MapDiff> getIndicesRoutingMapDiff( - RoutingTable before, - RoutingTable after - ); + StringKeyDiffProvider getIndicesRoutingMapDiff(RoutingTable before, RoutingTable after); - CheckedRunnable getIndexRoutingAsyncAction( - ClusterState clusterState, + void getAsyncIndexRoutingWriteAction( + String clusterUUID, + long term, + long version, IndexRoutingTable indexRouting, - LatchedActionListener latchedActionListener, - BlobPath clusterBasePath + LatchedActionListener latchedActionListener + ); + + void getAsyncIndexRoutingDiffWriteAction( + String clusterUUID, + long term, + long version, + StringKeyDiffProvider routingTableDiff, + LatchedActionListener latchedActionListener ); List getAllUploadedIndicesRouting( @@ -75,6 +69,8 @@ List getAllUploadedIndicesRouting List indicesRoutingToDelete ); - public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOException; + void deleteStaleIndexRoutingPaths(List stalePaths) throws IOException; + + void deleteStaleIndexRoutingDiffPaths(List stalePaths) throws IOException; } diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java index 82837191a30b7..56dfa03215a64 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java @@ -34,10 +34,11 @@ public static RemoteRoutingTableService getService( Supplier repositoriesService, Settings settings, ClusterSettings clusterSettings, - ThreadPool threadPool + ThreadPool threadPool, + String clusterName ) { if (isRemoteRoutingTableEnabled(settings)) { - return new InternalRemoteRoutingTableService(repositoriesService, settings, clusterSettings, threadPool); + return new InternalRemoteRoutingTableService(repositoriesService, settings, clusterSettings, threadPool, clusterName); } return new NoopRemoteRoutingTableService(); } diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 6234427445754..b2548a8976c73 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -61,6 +61,7 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.metrics.tags.Tags; @@ -396,7 +397,7 @@ private void submitStateUpdateTask( final ThreadContext threadContext = threadPool.getThreadContext(); final Supplier supplier = threadContext.newRestorableContext(true); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); final UpdateTask updateTask = new UpdateTask( config.priority(), source, diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index 686e9793a8fd3..713de8cdd0fda 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -66,6 +66,7 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.Assertions; import org.opensearch.core.common.text.Text; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -84,6 +85,7 @@ import java.util.Objects; import java.util.Optional; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -221,10 +223,10 @@ protected void onTimeout(List tasks, TimeValue timeout) { } @Override - protected void run(Object batchingKey, List tasks, String tasksSummary) { + protected void run(Object batchingKey, List tasks, Function taskSummaryGenerator) { ClusterStateTaskExecutor taskExecutor = (ClusterStateTaskExecutor) batchingKey; List updateTasks = (List) tasks; - runTasks(new TaskInputs(taskExecutor, updateTasks, tasksSummary)); + runTasks(new TaskInputs(taskExecutor, updateTasks, taskSummaryGenerator)); } class UpdateTask extends BatchedTask { @@ -297,26 +299,33 @@ public static boolean assertNotMasterUpdateThread(String reason) { } private void runTasks(TaskInputs taskInputs) { - final String summary = taskInputs.summary; + final String longSummary = logger.isTraceEnabled() ? taskInputs.taskSummaryGenerator.apply(true) : ""; + final String shortSummary = taskInputs.taskSummaryGenerator.apply(false); + if (!lifecycle.started()) { - logger.debug("processing [{}]: ignoring, cluster-manager service not started", summary); + logger.debug("processing [{}]: ignoring, cluster-manager service not started", shortSummary); return; } - logger.debug("executing cluster state update for [{}]", summary); + if (logger.isTraceEnabled()) { + logger.trace("executing cluster state update for [{}]", longSummary); + } else { + logger.debug("executing cluster state update for [{}]", shortSummary); + } + final ClusterState previousClusterState = state(); if (!previousClusterState.nodes().isLocalNodeElectedClusterManager() && taskInputs.runOnlyWhenClusterManager()) { - logger.debug("failing [{}]: local node is no longer cluster-manager", summary); + logger.debug("failing [{}]: local node is no longer cluster-manager", shortSummary); taskInputs.onNoLongerClusterManager(); return; } final long computationStartTime = threadPool.preciseRelativeTimeInNanos(); - final TaskOutputs taskOutputs = calculateTaskOutputs(taskInputs, previousClusterState); + final TaskOutputs taskOutputs = calculateTaskOutputs(taskInputs, previousClusterState, shortSummary); taskOutputs.notifyFailedTasks(); final TimeValue computationTime = getTimeSince(computationStartTime); - logExecutionTime(computationTime, "compute cluster state update", summary); + logExecutionTime(computationTime, "compute cluster state update", shortSummary); clusterManagerMetrics.recordLatency( clusterManagerMetrics.clusterStateComputeHistogram, @@ -328,17 +337,17 @@ private void runTasks(TaskInputs taskInputs) { final long notificationStartTime = threadPool.preciseRelativeTimeInNanos(); taskOutputs.notifySuccessfulTasksOnUnchangedClusterState(); final TimeValue executionTime = getTimeSince(notificationStartTime); - logExecutionTime(executionTime, "notify listeners on unchanged cluster state", summary); + logExecutionTime(executionTime, "notify listeners on unchanged cluster state", shortSummary); } else { final ClusterState newClusterState = taskOutputs.newClusterState; if (logger.isTraceEnabled()) { - logger.trace("cluster state updated, source [{}]\n{}", summary, newClusterState); + logger.trace("cluster state updated, source [{}]\n{}", longSummary, newClusterState); } else { - logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), summary); + logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), shortSummary); } final long publicationStartTime = threadPool.preciseRelativeTimeInNanos(); try { - ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(summary, newClusterState, previousClusterState); + ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(shortSummary, newClusterState, previousClusterState); // new cluster state, notify all listeners final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta(); if (nodesDelta.hasChanges() && logger.isInfoEnabled()) { @@ -346,7 +355,7 @@ private void runTasks(TaskInputs taskInputs) { if (nodesDeltaSummary.length() > 0) { logger.info( "{}, term: {}, version: {}, delta: {}", - summary, + shortSummary, newClusterState.term(), newClusterState.version(), nodesDeltaSummary @@ -357,7 +366,7 @@ private void runTasks(TaskInputs taskInputs) { logger.debug("publishing cluster state version [{}]", newClusterState.version()); publish(clusterChangedEvent, taskOutputs, publicationStartTime); } catch (Exception e) { - handleException(summary, publicationStartTime, newClusterState, e); + handleException(shortSummary, publicationStartTime, newClusterState, e); } } } @@ -452,8 +461,8 @@ private void handleException(String summary, long startTimeMillis, ClusterState // TODO: do we want to call updateTask.onFailure here? } - private TaskOutputs calculateTaskOutputs(TaskInputs taskInputs, ClusterState previousClusterState) { - ClusterTasksResult clusterTasksResult = executeTasks(taskInputs, previousClusterState); + private TaskOutputs calculateTaskOutputs(TaskInputs taskInputs, ClusterState previousClusterState, String taskSummary) { + ClusterTasksResult clusterTasksResult = executeTasks(taskInputs, previousClusterState, taskSummary); ClusterState newClusterState = patchVersions(previousClusterState, clusterTasksResult); return new TaskOutputs( taskInputs, @@ -897,7 +906,7 @@ public void onTimeout() { } } - private ClusterTasksResult executeTasks(TaskInputs taskInputs, ClusterState previousClusterState) { + private ClusterTasksResult executeTasks(TaskInputs taskInputs, ClusterState previousClusterState, String taskSummary) { ClusterTasksResult clusterTasksResult; try { List inputs = taskInputs.updateTasks.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList()); @@ -913,7 +922,7 @@ private ClusterTasksResult executeTasks(TaskInputs taskInputs, ClusterSt "failed to execute cluster state update (on version: [{}], uuid: [{}]) for [{}]\n{}{}{}", previousClusterState.version(), previousClusterState.stateUUID(), - taskInputs.summary, + taskSummary, previousClusterState.nodes(), previousClusterState.routingTable(), previousClusterState.getRoutingNodes() @@ -955,14 +964,19 @@ private List getNonFailedTasks(TaskInputs taskInputs, Cluste * Represents a set of tasks to be processed together with their executor */ private class TaskInputs { - final String summary; + final List updateTasks; final ClusterStateTaskExecutor executor; + final Function taskSummaryGenerator; - TaskInputs(ClusterStateTaskExecutor executor, List updateTasks, String summary) { - this.summary = summary; + TaskInputs( + ClusterStateTaskExecutor executor, + List updateTasks, + final Function taskSummaryGenerator + ) { this.executor = executor; this.updateTasks = updateTasks; + this.taskSummaryGenerator = taskSummaryGenerator; } boolean runOnlyWhenClusterManager() { @@ -1009,7 +1023,7 @@ public void submitStateUpdateTasks( final ThreadContext threadContext = threadPool.getThreadContext(); final Supplier supplier = threadContext.newRestorableContext(true); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); List safeTasks = tasks.entrySet() .stream() diff --git a/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java b/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java index 5e58f495a16fb..3513bfffb7157 100644 --- a/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java +++ b/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java @@ -177,7 +177,6 @@ void runIfNotProcessed(BatchedTask updateTask) { // to give other tasks with different batching key a chance to execute. if (updateTask.processed.get() == false) { final List toExecute = new ArrayList<>(); - final Map> processTasksBySource = new HashMap<>(); // While removing task, need to remove task first from taskMap and then remove identity from identityMap. // Changing this order might lead to duplicate task during submission. LinkedHashSet pending = tasksPerBatchingKey.remove(updateTask.batchingKey); @@ -187,7 +186,6 @@ void runIfNotProcessed(BatchedTask updateTask) { if (task.processed.getAndSet(true) == false) { logger.trace("will process {}", task); toExecute.add(task); - processTasksBySource.computeIfAbsent(task.source, s -> new ArrayList<>()).add(task); } else { logger.trace("skipping {}, already processed", task); } @@ -195,22 +193,34 @@ void runIfNotProcessed(BatchedTask updateTask) { } if (toExecute.isEmpty() == false) { - final String tasksSummary = processTasksBySource.entrySet().stream().map(entry -> { - String tasks = updateTask.describeTasks(entry.getValue()); - return tasks.isEmpty() ? entry.getKey() : entry.getKey() + "[" + tasks + "]"; - }).reduce((s1, s2) -> s1 + ", " + s2).orElse(""); - + Function taskSummaryGenerator = (longSummaryRequired) -> { + if (longSummaryRequired == null || !longSummaryRequired) { + return buildShortSummary(updateTask.batchingKey, toExecute.size()); + } + final Map> processTasksBySource = new HashMap<>(); + for (final BatchedTask task : toExecute) { + processTasksBySource.computeIfAbsent(task.source, s -> new ArrayList<>()).add(task); + } + return processTasksBySource.entrySet().stream().map(entry -> { + String tasks = updateTask.describeTasks(entry.getValue()); + return tasks.isEmpty() ? entry.getKey() : entry.getKey() + "[" + tasks + "]"; + }).reduce((s1, s2) -> s1 + ", " + s2).orElse(""); + }; taskBatcherListener.onBeginProcessing(toExecute); - run(updateTask.batchingKey, toExecute, tasksSummary); + run(updateTask.batchingKey, toExecute, taskSummaryGenerator); } } } + private String buildShortSummary(final Object batchingKey, final int taskCount) { + return "Tasks batched with key: " + batchingKey.toString().split("\\$")[0] + " and count: " + taskCount; + } + /** * Action to be implemented by the specific batching implementation * All tasks have the given batching key. */ - protected abstract void run(Object batchingKey, List tasks, String tasksSummary); + protected abstract void run(Object batchingKey, List tasks, Function taskSummaryGenerator); /** * Represents a runnable task that supports batching. diff --git a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java index ed324e4e62d8f..ee21c343e2ea1 100644 --- a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java @@ -38,7 +38,7 @@ import org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter; import org.apache.logging.log4j.core.pattern.PatternConverter; import org.apache.logging.log4j.core.pattern.ThrowablePatternConverter; -import org.apache.logging.log4j.util.Strings; +import org.opensearch.core.common.Strings; import java.nio.charset.Charset; import java.util.StringJoiner; @@ -84,7 +84,7 @@ public static JsonThrowablePatternConverter newInstance(final Configuration conf @Override public void format(final LogEvent event, final StringBuilder toAppendTo) { String consoleStacktrace = formatStacktrace(event); - if (Strings.isNotEmpty(consoleStacktrace)) { + if (!Strings.isNullOrEmpty(consoleStacktrace)) { String jsonStacktrace = formatJson(consoleStacktrace); toAppendTo.append(", "); diff --git a/server/src/main/java/org/opensearch/common/remote/AbstractClusterMetadataWriteableBlobEntity.java b/server/src/main/java/org/opensearch/common/remote/AbstractClusterMetadataWriteableBlobEntity.java new file mode 100644 index 0000000000000..b9492da04680d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/remote/AbstractClusterMetadataWriteableBlobEntity.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.remote; + +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; + +/** + * An extension of {@link RemoteWriteableEntity} class which caters to the use case of writing to and reading from a blob storage + * + * @param The class type which can be uploaded to or downloaded from a blob storage. + */ +public abstract class AbstractClusterMetadataWriteableBlobEntity extends RemoteWriteableBlobEntity { + + protected final NamedXContentRegistry namedXContentRegistry; + + public AbstractClusterMetadataWriteableBlobEntity( + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor); + this.namedXContentRegistry = namedXContentRegistry; + } + + public AbstractClusterMetadataWriteableBlobEntity(final String clusterUUID, final Compressor compressor) { + super(clusterUUID, compressor); + this.namedXContentRegistry = null; + } + + public abstract UploadedMetadata getUploadedMetadata(); + + public NamedXContentRegistry getNamedXContentRegistry() { + return namedXContentRegistry; + } +} diff --git a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableEntityManager.java b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableEntityManager.java new file mode 100644 index 0000000000000..8e2de1580a49f --- /dev/null +++ b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableEntityManager.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.remote; + +import org.opensearch.core.action.ActionListener; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.model.RemoteReadResult; + +import java.util.HashMap; +import java.util.Map; + +/** + * An abstract class that provides a base implementation for managing remote entities in the remote store. + */ +public abstract class AbstractRemoteWritableEntityManager implements RemoteWritableEntityManager { + /** + * A map that stores the remote writable entity stores, keyed by the entity type. + */ + protected final Map remoteWritableEntityStores = new HashMap<>(); + + /** + * Retrieves the remote writable entity store for the given entity. + * + * @param entity the entity for which the store is requested + * @return the remote writable entity store for the given entity + * @throws IllegalArgumentException if the entity type is unknown + */ + protected RemoteWritableEntityStore getStore(AbstractClusterMetadataWriteableBlobEntity entity) { + RemoteWritableEntityStore remoteStore = remoteWritableEntityStores.get(entity.getType()); + if (remoteStore == null) { + throw new IllegalArgumentException("Unknown entity type [" + entity.getType() + "]"); + } + return remoteStore; + } + + /** + * Returns an ActionListener for handling the write operation for the specified component, remote object, and latched action listener. + * + * @param component the component for which the write operation is performed + * @param remoteEntity the remote object to be written + * @param listener the listener to be notified when the write operation completes + * @return an ActionListener for handling the write operation + */ + protected abstract ActionListener getWrappedWriteListener( + String component, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, + ActionListener listener + ); + + /** + * Returns an ActionListener for handling the read operation for the specified component, + * remote object, and latched action listener. + * + * @param component the component for which the read operation is performed + * @param remoteEntity the remote object to be read + * @param listener the listener to be notified when the read operation completes + * @return an ActionListener for handling the read operation + */ + protected abstract ActionListener getWrappedReadListener( + String component, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, + ActionListener listener + ); + + @Override + public void writeAsync( + String component, + AbstractClusterMetadataWriteableBlobEntity entity, + ActionListener listener + ) { + getStore(entity).writeAsync(entity, getWrappedWriteListener(component, entity, listener)); + } + + @Override + public void readAsync(String component, AbstractClusterMetadataWriteableBlobEntity entity, ActionListener listener) { + getStore(entity).readAsync(entity, getWrappedReadListener(component, entity, listener)); + } +} diff --git a/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityManager.java b/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityManager.java new file mode 100644 index 0000000000000..c27598e368e4d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityManager.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.remote; + +import org.opensearch.core.action.ActionListener; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; +import org.opensearch.gateway.remote.model.RemoteReadResult; + +/** + * The RemoteWritableEntityManager interface provides async read and write methods for managing remote entities in the remote store + */ +public interface RemoteWritableEntityManager { + + /** + * Performs an asynchronous read operation for the specified component and entity. + * + * @param component the component for which the read operation is performed + * @param entity the entity to be read + * @param listener the listener to be notified when the read operation completes. + * The listener's {@link ActionListener#onResponse(Object)} method + * is called with a {@link RemoteReadResult} object containing the + * read data on successful read. The + * {@link ActionListener#onFailure(Exception)} method is called with + * an exception if the read operation fails. + */ + void readAsync(String component, AbstractClusterMetadataWriteableBlobEntity entity, ActionListener listener); + + /** + * Performs an asynchronous write operation for the specified component and entity. + * + * @param component the component for which the write operation is performed + * @param entity the entity to be written + * @param listener the listener to be notified when the write operation completes. + * The listener's {@link ActionListener#onResponse(Object)} method + * is called with a {@link UploadedMetadata} object containing the + * uploaded metadata on successful write. The + * {@link ActionListener#onFailure(Exception)} method is called with + * an exception if the write operation fails. + */ + void writeAsync(String component, AbstractClusterMetadataWriteableBlobEntity entity, ActionListener listener); +} diff --git a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableBlobEntity.java similarity index 66% rename from server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java rename to server/src/main/java/org/opensearch/common/remote/RemoteWriteableBlobEntity.java index 23fc9d3ad77cb..f034ce2d1adf1 100644 --- a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java +++ b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableBlobEntity.java @@ -10,34 +10,25 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.core.compress.Compressor; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.PATH_DELIMITER; /** - * An extension of {@link RemoteWriteableEntity} class which caters to the use case of writing to and reading from a blob storage - * - * @param The class type which can be uploaded to or downloaded from a blob storage. + * The abstract class which represents a {@link RemoteWriteableEntity} that can be written to a store + * @param the entity to be written */ -public abstract class AbstractRemoteWritableBlobEntity implements RemoteWriteableEntity { +public abstract class RemoteWriteableBlobEntity implements RemoteWriteableEntity { protected String blobFileName; protected String blobName; private final String clusterUUID; private final Compressor compressor; - private final NamedXContentRegistry namedXContentRegistry; private String[] pathTokens; - public AbstractRemoteWritableBlobEntity( - final String clusterUUID, - final Compressor compressor, - final NamedXContentRegistry namedXContentRegistry - ) { + public RemoteWriteableBlobEntity(final String clusterUUID, final Compressor compressor) { this.clusterUUID = clusterUUID; this.compressor = compressor; - this.namedXContentRegistry = namedXContentRegistry; } public abstract BlobPathParameters getBlobPathParameters(); @@ -76,16 +67,10 @@ public String clusterUUID() { return clusterUUID; } - public abstract UploadedMetadata getUploadedMetadata(); - public void setFullBlobName(BlobPath blobPath) { this.blobName = blobPath.buildAsString() + blobFileName; } - public NamedXContentRegistry getNamedXContentRegistry() { - return namedXContentRegistry; - } - protected Compressor getCompressor() { return compressor; } diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntityBlobStore.java similarity index 76% rename from server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java rename to server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntityBlobStore.java index 1dd23443f1252..baa44a7c9bde9 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java +++ b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntityBlobStore.java @@ -6,21 +6,19 @@ * compatible open source license. */ -package org.opensearch.gateway.remote.model; +package org.opensearch.common.remote; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.stream.write.WritePriority; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; -import org.opensearch.common.remote.RemoteWritableEntityStore; -import org.opensearch.common.remote.RemoteWriteableEntity; import org.opensearch.core.action.ActionListener; -import org.opensearch.gateway.remote.RemoteClusterStateUtils; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.concurrent.ExecutorService; /** @@ -29,24 +27,27 @@ * @param The entity which can be uploaded to / downloaded from blob store * @param The concrete class implementing {@link RemoteWriteableEntity} which is used as a wrapper for T entity. */ -public class RemoteClusterStateBlobStore> implements RemoteWritableEntityStore { +public class RemoteWriteableEntityBlobStore> implements RemoteWritableEntityStore { private final BlobStoreTransferService transferService; private final BlobStoreRepository blobStoreRepository; private final String clusterName; private final ExecutorService executorService; + private final String pathToken; - public RemoteClusterStateBlobStore( + public RemoteWriteableEntityBlobStore( final BlobStoreTransferService blobStoreTransferService, final BlobStoreRepository blobStoreRepository, final String clusterName, final ThreadPool threadPool, - final String executor + final String executor, + final String pathToken ) { this.transferService = blobStoreTransferService; this.blobStoreRepository = blobStoreRepository; this.clusterName = clusterName; this.executorService = threadPool.executor(executor); + this.pathToken = pathToken; } @Override @@ -88,18 +89,23 @@ public void readAsync(final U entity, final ActionListener listener) { }); } - private BlobPath getBlobPathForUpload(final AbstractRemoteWritableBlobEntity obj) { - BlobPath blobPath = blobStoreRepository.basePath() - .add(RemoteClusterStateUtils.encodeString(clusterName)) - .add("cluster-state") - .add(obj.clusterUUID()); + public String getClusterName() { + return clusterName; + } + + public BlobPath getBlobPathPrefix(String clusterUUID) { + return blobStoreRepository.basePath().add(encodeString(getClusterName())).add(pathToken).add(clusterUUID); + } + + public BlobPath getBlobPathForUpload(final RemoteWriteableBlobEntity obj) { + BlobPath blobPath = getBlobPathPrefix(obj.clusterUUID()); for (String token : obj.getBlobPathParameters().getPathTokens()) { blobPath = blobPath.add(token); } return blobPath; } - private BlobPath getBlobPathForDownload(final AbstractRemoteWritableBlobEntity obj) { + public BlobPath getBlobPathForDownload(final RemoteWriteableBlobEntity obj) { String[] pathTokens = obj.getBlobPathTokens(); BlobPath blobPath = new BlobPath(); if (pathTokens == null || pathTokens.length < 1) { @@ -112,4 +118,8 @@ private BlobPath getBlobPathForDownload(final AbstractRemoteWritableBlobEntity { Map groups = s.getAsGroups(); @@ -263,7 +273,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { */ public static final Map> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( FeatureFlags.TIERED_REMOTE_INDEX, - List.of(IndexModule.INDEX_STORE_LOCALITY_SETTING) + List.of(IndexModule.INDEX_STORE_LOCALITY_SETTING, IndexModule.INDEX_TIERING_STATE) ); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); diff --git a/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java b/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java new file mode 100644 index 0000000000000..cfe2bbb85bda4 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.Randomness; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.TimeoutAwareRunnable; + +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +/** + * A {@link Runnable} that iteratively executes a batch of {@link TimeoutAwareRunnable}s. If the elapsed time exceeds the timeout defined by {@link TimeValue} timeout, then all subsequent {@link TimeoutAwareRunnable}s will have their {@link TimeoutAwareRunnable#onTimeout} method invoked and will not be run. + * + * @opensearch.internal + */ +public class BatchRunnableExecutor implements Runnable { + + private final Supplier timeoutSupplier; + + private final List timeoutAwareRunnables; + + private static final Logger logger = LogManager.getLogger(BatchRunnableExecutor.class); + + public BatchRunnableExecutor(List timeoutAwareRunnables, Supplier timeoutSupplier) { + this.timeoutSupplier = timeoutSupplier; + this.timeoutAwareRunnables = timeoutAwareRunnables; + } + + // for tests + public List getTimeoutAwareRunnables() { + return this.timeoutAwareRunnables; + } + + @Override + public void run() { + logger.debug("Starting execution of runnable of size [{}]", timeoutAwareRunnables.size()); + long startTime = System.nanoTime(); + if (timeoutAwareRunnables.isEmpty()) { + return; + } + Randomness.shuffle(timeoutAwareRunnables); + for (TimeoutAwareRunnable runnable : timeoutAwareRunnables) { + if (timeoutSupplier.get().nanos() < 0 || System.nanoTime() - startTime < timeoutSupplier.get().nanos()) { + runnable.run(); + } else { + logger.debug("Executing timeout for runnable of size [{}]", timeoutAwareRunnables.size()); + runnable.onTimeout(); + } + } + logger.debug( + "Time taken to execute timed runnables in this cycle:[{}ms]", + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime) + ); + onComplete(); + } + + /** + * Callback method that is invoked after all {@link TimeoutAwareRunnable} instances in the batch have been processed. + * By default, this method does nothing, but it can be overridden by subclasses or modified in the implementation if + * there is a need to perform additional actions once the batch execution is completed. + */ + public void onComplete() {} +} diff --git a/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java b/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java new file mode 100644 index 0000000000000..2d7948d414937 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.RandomAccessInput; + +import java.io.IOException; + +/** + * A bitset backed by a byte array. This will initialize and set bits in the byte array based on the index. + */ +public class ByteArrayBackedBitset { + private final byte[] byteArray; + + /** + * Constructor which uses an on heap list. This should be using during construction of the bitset. + */ + public ByteArrayBackedBitset(int capacity) { + byteArray = new byte[capacity]; + } + + /** + * Constructor which set the Lucene's RandomAccessInput to read the bitset into a read-only buffer. + */ + public ByteArrayBackedBitset(RandomAccessInput in, long offset, int length) throws IOException { + byteArray = new byte[length]; + int i = 0; + while (i < length) { + byteArray[i] = in.readByte(offset + i); + i++; + } + } + + /** + * Constructor which set the Lucene's IndexInput to read the bitset into a read-only buffer. + */ + public ByteArrayBackedBitset(IndexInput in, int length) throws IOException { + byteArray = new byte[length]; + int i = 0; + while (i < length) { + byteArray[i] = in.readByte(); + i++; + } + } + + /** + * Sets the bit at the given index to 1. + * Each byte can indicate 8 bits, so the index is divided by 8 to get the byte array index. + * @param index the index to set the bit + */ + public void set(int index) { + int byteArrIndex = index >> 3; + byteArray[byteArrIndex] |= (byte) (1 << (index & 7)); + } + + public int write(IndexOutput output) throws IOException { + int numBytes = 0; + for (Byte bitSet : byteArray) { + output.writeByte(bitSet); + numBytes += Byte.BYTES; + } + return numBytes; + } + + /** + * Retrieves whether the bit is set or not at the given index. + * @param index the index to look up for the bit + * @return true if bit is set, false otherwise + */ + public boolean get(int index) throws IOException { + int byteArrIndex = index >> 3; + return (byteArray[byteArrIndex] & (1 << (index & 7))) != 0; + } + + public int getCurrBytesRead() { + return byteArray.length; + } +} diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 6c6e2f2d600f0..e2554d61116ad 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -72,6 +72,11 @@ public class FeatureFlags { */ public static final String REMOTE_PUBLICATION_EXPERIMENTAL = "opensearch.experimental.feature.remote_store.publication.enabled"; + /** + * Gates the functionality of background task execution. + */ + public static final String BACKGROUND_TASK_EXECUTION_EXPERIMENTAL = "opensearch.experimental.feature.task.background.enabled"; + public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( REMOTE_STORE_MIGRATION_EXPERIMENTAL, false, @@ -100,6 +105,23 @@ public class FeatureFlags { Property.NodeScope ); + /** + * Gates the functionality of star tree index, which improves the performance of search + * aggregations. + */ + public static final String STAR_TREE_INDEX = "opensearch.experimental.feature.composite_index.star_tree.enabled"; + public static final Setting STAR_TREE_INDEX_SETTING = Setting.boolSetting(STAR_TREE_INDEX, false, Property.NodeScope); + + /** + * Gates the functionality of application based configuration templates. + */ + public static final String APPLICATION_BASED_CONFIGURATION_TEMPLATES = "opensearch.experimental.feature.application_templates.enabled"; + public static final Setting APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING = Setting.boolSetting( + APPLICATION_BASED_CONFIGURATION_TEMPLATES, + false, + Property.NodeScope + ); + private static final List> ALL_FEATURE_FLAG_SETTINGS = List.of( REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, EXTENSIONS_SETTING, @@ -108,8 +130,11 @@ public class FeatureFlags { DATETIME_FORMATTER_CACHING_SETTING, TIERED_REMOTE_INDEX_SETTING, PLUGGABLE_CACHE_SETTING, - REMOTE_PUBLICATION_EXPERIMENTAL_SETTING + REMOTE_PUBLICATION_EXPERIMENTAL_SETTING, + STAR_TREE_INDEX_SETTING, + APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING ); + /** * Should store the settings from opensearch.yml. */ diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 906a27e9f398c..070e18481f2a3 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -45,11 +45,13 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.http.HttpTransportSettings; +import org.opensearch.secure_sm.ThreadContextPermission; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskThreadContextStatePropagator; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.security.Permission; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -111,6 +113,12 @@ public final class ThreadContext implements Writeable { */ public static final String ACTION_ORIGIN_TRANSIENT_NAME = "action.origin"; + // thread context permissions + + private static final Permission ACCESS_SYSTEM_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("markAsSystemContext"); + private static final Permission STASH_AND_MERGE_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("stashAndMergeHeaders"); + private static final Permission STASH_WITH_ORIGIN_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("stashWithOrigin"); + private static final Logger logger = LogManager.getLogger(ThreadContext.class); private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(); private final Map defaultHeader; @@ -205,8 +213,19 @@ public Writeable captureAsWriteable() { * For example, a user might not have permission to GET from the tasks index * but the tasks API will perform a get on their behalf using this method * if it can't find the task in memory. + * + * Usage of stashWithOrigin is guarded by a ThreadContextPermission. In order to use + * stashWithOrigin, the codebase needs to explicitly be granted permission in the JSM policy file. + * + * Add an entry in the grant portion of the policy file like this: + * + * permission org.opensearch.secure_sm.ThreadContextPermission "stashWithOrigin"; */ public StoredContext stashWithOrigin(String origin) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(STASH_WITH_ORIGIN_THREAD_CONTEXT_PERMISSION); + } final ThreadContext.StoredContext storedContext = stashContext(); putTransient(ACTION_ORIGIN_TRANSIENT_NAME, origin); return storedContext; @@ -216,8 +235,19 @@ public StoredContext stashWithOrigin(String origin) { * Removes the current context and resets a new context that contains a merge of the current headers and the given headers. * The removed context can be restored when closing the returned {@link StoredContext}. The merge strategy is that headers * that are already existing are preserved unless they are defaults. + * + * Usage of stashAndMergeHeaders is guarded by a ThreadContextPermission. In order to use + * stashAndMergeHeaders, the codebase needs to explicitly be granted permission in the JSM policy file. + * + * Add an entry in the grant portion of the policy file like this: + * + * permission org.opensearch.secure_sm.ThreadContextPermission "stashAndMergeHeaders"; */ public StoredContext stashAndMergeHeaders(Map headers) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(STASH_AND_MERGE_THREAD_CONTEXT_PERMISSION); + } final ThreadContextStruct context = threadLocal.get(); Map newHeader = new HashMap<>(headers); newHeader.putAll(context.requestHeaders); @@ -554,8 +584,19 @@ boolean isDefaultContext() { /** * Marks this thread context as an internal system context. This signals that actions in this context are issued * by the system itself rather than by a user action. + * + * Usage of markAsSystemContext is guarded by a ThreadContextPermission. In order to use + * markAsSystemContext, the codebase needs to explicitly be granted permission in the JSM policy file. + * + * Add an entry in the grant portion of the policy file like this: + * + * permission org.opensearch.secure_sm.ThreadContextPermission "markAsSystemContext"; */ public void markAsSystemContext() { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(ACCESS_SYSTEM_THREAD_CONTEXT_PERMISSION); + } threadLocal.set(threadLocal.get().setSystemContext(propagators)); } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextAccess.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextAccess.java new file mode 100644 index 0000000000000..14f8b8d79bf4d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextAccess.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util.concurrent; + +import org.opensearch.SpecialPermission; +import org.opensearch.common.annotation.InternalApi; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +/** + * This class wraps the {@link ThreadContext} operations requiring access in + * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. + * + * @opensearch.internal + */ +@SuppressWarnings("removal") +@InternalApi +public final class ThreadContextAccess { + + private ThreadContextAccess() {} + + public static T doPrivileged(PrivilegedAction operation) { + SpecialPermission.check(); + return AccessController.doPrivileged(operation); + } + + public static void doPrivilegedVoid(Runnable action) { + SpecialPermission.check(); + AccessController.doPrivileged((PrivilegedAction) () -> { + action.run(); + return null; + }); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/package-info.java b/server/src/main/java/org/opensearch/common/util/concurrent/TimeoutAwareRunnable.java similarity index 52% rename from plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/package-info.java rename to server/src/main/java/org/opensearch/common/util/concurrent/TimeoutAwareRunnable.java index 3cc7900e5ce7d..8d3357ad93095 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/package-info.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/TimeoutAwareRunnable.java @@ -6,7 +6,14 @@ * compatible open source license. */ +package org.opensearch.common.util.concurrent; + /** - * Transport Actions, Requests and Responses for Top N Queries + * Runnable that is aware of a timeout + * + * @opensearch.internal */ -package org.opensearch.plugin.insights.rules.action.top_queries; +public interface TimeoutAwareRunnable extends Runnable { + + void onTimeout(); +} diff --git a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java index 998122d9e5c43..d24571fc5778d 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java +++ b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java @@ -9,6 +9,7 @@ package org.opensearch.common.xcontent; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.AbstractXContentParser; import org.opensearch.core.xcontent.DeprecationHandler; @@ -23,6 +24,9 @@ import java.math.BigInteger; import java.nio.CharBuffer; import java.util.ArrayList; +import java.util.Collections; +import java.util.Deque; +import java.util.LinkedList; /** * JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser @@ -32,21 +36,20 @@ */ public class JsonToStringXContentParser extends AbstractXContentParser { private final String fieldTypeName; - private XContentParser parser; + private final XContentParser parser; - private ArrayList valueList = new ArrayList<>(); - private ArrayList valueAndPathList = new ArrayList<>(); - private ArrayList keyList = new ArrayList<>(); + private final ArrayList valueList = new ArrayList<>(); + private final ArrayList valueAndPathList = new ArrayList<>(); + private final ArrayList keyList = new ArrayList<>(); - private XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent); + private final XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent); - private NamedXContentRegistry xContentRegistry; + private final NamedXContentRegistry xContentRegistry; - private DeprecationHandler deprecationHandler; + private final DeprecationHandler deprecationHandler; private static final String VALUE_AND_PATH_SUFFIX = "._valueAndPath"; private static final String VALUE_SUFFIX = "._value"; - private static final String DOT_SYMBOL = "."; private static final String EQUAL_SYMBOL = "="; public JsonToStringXContentParser( @@ -63,9 +66,14 @@ public JsonToStringXContentParser( } public XContentParser parseObject() throws IOException { + assert currentToken() == Token.START_OBJECT; + parser.nextToken(); // Skip the outer START_OBJECT. Need to return on END_OBJECT. + builder.startObject(); - StringBuilder path = new StringBuilder(fieldTypeName); - parseToken(path, null); + LinkedList path = new LinkedList<>(Collections.singleton(fieldTypeName)); + while (currentToken() != Token.END_OBJECT) { + parseToken(path); + } builder.field(this.fieldTypeName, keyList); builder.field(this.fieldTypeName + VALUE_SUFFIX, valueList); builder.field(this.fieldTypeName + VALUE_AND_PATH_SUFFIX, valueAndPathList); @@ -74,75 +82,55 @@ public XContentParser parseObject() throws IOException { return JsonXContent.jsonXContent.createParser(this.xContentRegistry, this.deprecationHandler, String.valueOf(jString)); } - private void parseToken(StringBuilder path, String currentFieldName) throws IOException { - - while (this.parser.nextToken() != Token.END_OBJECT) { - if (this.parser.currentName() != null) { - currentFieldName = this.parser.currentName(); + private void parseToken(Deque path) throws IOException { + if (this.parser.currentToken() == Token.FIELD_NAME) { + String fieldName = this.parser.currentName(); + path.addLast(fieldName); // Pushing onto the stack *must* be matched by pop + String parts = fieldName; + while (parts.contains(".")) { // Extract the intermediate keys maybe present in fieldName + int dotPos = parts.indexOf('.'); + String part = parts.substring(0, dotPos); + this.keyList.add(part); + parts = parts.substring(dotPos + 1); } - StringBuilder parsedFields = new StringBuilder(); - - if (this.parser.currentToken() == Token.FIELD_NAME) { - path.append(DOT_SYMBOL).append(currentFieldName); - int dotIndex = currentFieldName.indexOf(DOT_SYMBOL); - String fieldNameSuffix = currentFieldName; - // The field name may be of the form foo.bar.baz - // If that's the case, each "part" is a key. - while (dotIndex >= 0) { - String fieldNamePrefix = fieldNameSuffix.substring(0, dotIndex); - if (!fieldNamePrefix.isEmpty()) { - this.keyList.add(fieldNamePrefix); - } - fieldNameSuffix = fieldNameSuffix.substring(dotIndex + 1); - dotIndex = fieldNameSuffix.indexOf(DOT_SYMBOL); - } - if (!fieldNameSuffix.isEmpty()) { - this.keyList.add(fieldNameSuffix); - } - } else if (this.parser.currentToken() == Token.START_ARRAY) { - parseToken(path, currentFieldName); - break; - } else if (this.parser.currentToken() == Token.END_ARRAY) { - // skip - } else if (this.parser.currentToken() == Token.START_OBJECT) { - parseToken(path, currentFieldName); - int dotIndex = path.lastIndexOf(DOT_SYMBOL, path.length()); - - if (dotIndex != -1 && path.length() > currentFieldName.length()) { - path.setLength(path.length() - currentFieldName.length() - 1); - } - } else { - if (!path.toString().contains(currentFieldName)) { - path.append(DOT_SYMBOL).append(currentFieldName); - } - parseValue(parsedFields); - this.valueList.add(parsedFields.toString()); - this.valueAndPathList.add(path + EQUAL_SYMBOL + parsedFields); - int dotIndex = path.lastIndexOf(DOT_SYMBOL, path.length()); - if (dotIndex != -1 && path.length() > currentFieldName.length()) { - path.setLength(path.length() - currentFieldName.length() - 1); - } + this.keyList.add(parts); // parts has no dot, so either it's the original fieldName or it's the last part + this.parser.nextToken(); // advance to the value of fieldName + parseToken(path); // parse the value for fieldName (which will be an array, an object, or a primitive value) + path.removeLast(); // Here is where we pop fieldName from the stack (since we're done with the value of fieldName) + // Note that whichever other branch we just passed through has already ended with nextToken(), so we + // don't need to call it. + } else if (this.parser.currentToken() == Token.START_ARRAY) { + parser.nextToken(); + while (this.parser.currentToken() != Token.END_ARRAY) { + parseToken(path); } - + this.parser.nextToken(); + } else if (this.parser.currentToken() == Token.START_OBJECT) { + parser.nextToken(); + while (this.parser.currentToken() != Token.END_OBJECT) { + parseToken(path); + } + this.parser.nextToken(); + } else if (this.parser.currentToken().isValue()) { + String parsedValue = parseValue(); + if (parsedValue != null) { + this.valueList.add(parsedValue); + this.valueAndPathList.add(Strings.collectionToDelimitedString(path, ".") + EQUAL_SYMBOL + parsedValue); + } + this.parser.nextToken(); } } - private void parseValue(StringBuilder parsedFields) throws IOException { + private String parseValue() throws IOException { switch (this.parser.currentToken()) { case VALUE_BOOLEAN: case VALUE_NUMBER: case VALUE_STRING: case VALUE_NULL: - parsedFields.append(this.parser.textOrNull()); - break; + return this.parser.textOrNull(); // Handle other token types as needed - case FIELD_NAME: - case VALUE_EMBEDDED_OBJECT: - case END_ARRAY: - case START_ARRAY: - break; default: - throw new IOException("Unsupported token type [" + parser.currentToken() + "]"); + throw new IOException("Unsupported value token type [" + parser.currentToken() + "]"); } } diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index 2748938d8b761..709c0eba4f57f 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Strings; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; @@ -61,6 +60,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; @@ -1300,7 +1300,7 @@ public static List collectFileCacheDataPath(NodePath fileCacheNodePath) th * Resolve the custom path for a index's shard. */ public static Path resolveBaseCustomLocation(String customDataPath, Path sharedDataPath, int nodeLockId) { - if (Strings.isNotEmpty(customDataPath)) { + if (!Strings.isNullOrEmpty(customDataPath)) { // This assert is because this should be caught by MetadataCreateIndexService assert sharedDataPath != null; return sharedDataPath.resolve(customDataPath).resolve(Integer.toString(nodeLockId)); diff --git a/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java b/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java index 4f39a39cea678..df642a9f5a743 100644 --- a/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java +++ b/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java @@ -80,6 +80,14 @@ public synchronized void clearShard(ShardId shardId) { this.cache.deleteShard(shardId); } + public boolean hasEmptyCache() { + return this.cache.getCache().isEmpty(); + } + + public AsyncShardFetchCache getCache() { + return this.cache; + } + /** * Cache implementation of transport actions returning batch of shards related data in the response. * Store node level responses of transport actions like {@link TransportNodesListGatewayStartedShardsBatch} or diff --git a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java index 58982e869794f..2b6c5e3f5ae53 100644 --- a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.AllocationDecision; @@ -43,9 +44,11 @@ import org.opensearch.cluster.routing.allocation.NodeAllocationResult; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.core.index.shard.ShardId; import java.util.ArrayList; import java.util.List; +import java.util.Set; /** * An abstract class that implements basic functionality for allocating @@ -78,6 +81,29 @@ public void allocateUnassigned( executeDecision(shardRouting, allocateUnassignedDecision, allocation, unassignedAllocationHandler); } + protected void allocateUnassignedBatchOnTimeout(Set shardIds, RoutingAllocation allocation, boolean primary) { + if (shardIds.isEmpty()) { + return; + } + RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + while (iterator.hasNext()) { + ShardRouting unassignedShard = iterator.next(); + AllocateUnassignedDecision allocationDecision; + if (unassignedShard.primary() == primary && shardIds.contains(unassignedShard.shardId())) { + if (isResponsibleFor(unassignedShard) == false) { + continue; + } + allocationDecision = AllocateUnassignedDecision.throttle(null); + executeDecision(unassignedShard, allocationDecision, allocation, iterator); + } + } + } + + /** + * Is the allocator responsible for allocating the given {@link ShardRouting}? + */ + protected abstract boolean isResponsibleFor(ShardRouting shardRouting); + protected void executeDecision( ShardRouting shardRouting, AllocateUnassignedDecision allocateUnassignedDecision, diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java index f41545cbdf9bf..dea7ca9a08edd 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java @@ -82,7 +82,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { /** * Is the allocator responsible for allocating the given {@link ShardRouting}? */ - protected static boolean isResponsibleFor(final ShardRouting shard) { + protected boolean isResponsibleFor(final ShardRouting shard) { return shard.primary() // must be primary && shard.unassigned() // must be unassigned // only handle either an existing store or a snapshot recovery diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java index 27f9bedc4e495..c493bf717c97f 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java @@ -23,8 +23,10 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; /** * PrimaryShardBatchAllocator is similar to {@link org.opensearch.gateway.PrimaryShardAllocator} only difference is @@ -82,6 +84,7 @@ public AllocateUnassignedDecision makeAllocationDecision(ShardRouting unassigned * @param allocation the allocation state container object */ public void allocateUnassignedBatch(List shardRoutings, RoutingAllocation allocation) { + logger.trace("Starting shard allocation execution for unassigned primary shards: {}", shardRoutings.size()); HashMap ineligibleShardAllocationDecisions = new HashMap<>(); List eligibleShards = new ArrayList<>(); List inEligibleShards = new ArrayList<>(); @@ -99,13 +102,13 @@ public void allocateUnassignedBatch(List shardRoutings, RoutingAll // only fetch data for eligible shards final FetchResult shardsState = fetchData(eligibleShards, inEligibleShards, allocation); + Set batchShardRoutingSet = new HashSet<>(shardRoutings); RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); while (iterator.hasNext()) { ShardRouting unassignedShard = iterator.next(); AllocateUnassignedDecision allocationDecision; - if (shardRoutings.contains(unassignedShard)) { - assert unassignedShard.primary(); + if (unassignedShard.primary() && batchShardRoutingSet.contains(unassignedShard)) { if (ineligibleShardAllocationDecisions.containsKey(unassignedShard.shardId())) { allocationDecision = ineligibleShardAllocationDecisions.get(unassignedShard.shardId()); } else { @@ -115,6 +118,7 @@ public void allocateUnassignedBatch(List shardRoutings, RoutingAll executeDecision(unassignedShard, allocationDecision, allocation, iterator); } } + logger.trace("Finished shard allocation execution for unassigned primary shards: {}", shardRoutings.size()); } /** diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java index d9474b32bdbf6..c30ee8479ac97 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java @@ -100,7 +100,10 @@ protected Runnable cancelExistingRecoveryForBetterMatch( Metadata metadata = allocation.metadata(); RoutingNodes routingNodes = allocation.routingNodes(); ShardRouting primaryShard = allocation.routingNodes().activePrimary(shard.shardId()); - assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary"; + if (primaryShard == null) { + logger.trace("{}: no active primary shard found or allocated, letting actual allocation figure it out", shard); + return null; + } assert primaryShard.currentNodeId() != null; final DiscoveryNode primaryNode = allocation.nodes().get(primaryShard.currentNodeId()); @@ -188,7 +191,7 @@ public void processExistingRecoveries(RoutingAllocation allocation) { /** * Is the allocator responsible for allocating the given {@link ShardRouting}? */ - protected static boolean isResponsibleFor(final ShardRouting shard) { + protected boolean isResponsibleFor(final ShardRouting shard) { return shard.primary() == false // must be a replica && shard.unassigned() // must be unassigned // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java index f2cb3d053440d..020a543ac5fc5 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java @@ -28,10 +28,11 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.Supplier; -import java.util.stream.Collectors; /** * Allocates replica shards in a batch mode @@ -117,6 +118,7 @@ public AllocateUnassignedDecision makeAllocationDecision(ShardRouting unassigned * @param allocation the allocation state container object */ public void allocateUnassignedBatch(List shardRoutings, RoutingAllocation allocation) { + logger.trace("Starting shard allocation execution for unassigned replica shards: {}", shardRoutings.size()); List eligibleShards = new ArrayList<>(); List ineligibleShards = new ArrayList<>(); Map ineligibleShardAllocationDecisions = new HashMap<>(); @@ -135,7 +137,11 @@ public void allocateUnassignedBatch(List shardRoutings, RoutingAll // only fetch data for eligible shards final FetchResult shardsState = fetchData(eligibleShards, ineligibleShards, allocation); - List shardIdsFromBatch = shardRoutings.stream().map(shardRouting -> shardRouting.shardId()).collect(Collectors.toList()); + Set shardIdsFromBatch = new HashSet<>(); + for (ShardRouting shardRouting : shardRoutings) { + ShardId shardId = shardRouting.shardId(); + shardIdsFromBatch.add(shardId); + } RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); while (iterator.hasNext()) { ShardRouting unassignedShard = iterator.next(); @@ -159,6 +165,7 @@ public void allocateUnassignedBatch(List shardRoutings, RoutingAll executeDecision(unassignedShard, allocateUnassignedDecision, allocation, iterator); } } + logger.trace("Finished shard allocation execution for unassigned replica shards: {}", shardRoutings.size()); } private AllocateUnassignedDecision getUnassignedShardAllocationDecision( @@ -166,7 +173,7 @@ private AllocateUnassignedDecision getUnassignedShardAllocationDecision( RoutingAllocation allocation, Supplier> nodeStoreFileMetaDataMapSupplier ) { - if (!isResponsibleFor(shardRouting)) { + if (isResponsibleFor(shardRouting) == false) { return AllocateUnassignedDecision.NOT_TAKEN; } Tuple> result = canBeAllocatedToAtLeastOneNode(shardRouting, allocation); @@ -176,7 +183,7 @@ private AllocateUnassignedDecision getUnassignedShardAllocationDecision( if (allocationDecision.type() != Decision.Type.YES && (!explain || !hasInitiatedFetching(shardRouting))) { // only return early if we are not in explain mode, or we are in explain mode but we have not // yet attempted to fetch any shard data - logger.trace("{}: ignoring allocation, can't be allocated on any node", shardRouting); + logger.trace("{}: ignoring allocation, can't be allocated on any node. Decision: {}", shardRouting, allocationDecision.type()); return AllocateUnassignedDecision.no( UnassignedInfo.AllocationStatus.fromDecision(allocationDecision.type()), result.v2() != null ? new ArrayList<>(result.v2().values()) : null diff --git a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java index 3c0797cd450d2..d18304ea73ed0 100644 --- a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java @@ -27,9 +27,13 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.inject.Inject; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.BatchRunnableExecutor; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.TimeoutAwareRunnable; import org.opensearch.common.util.set.Sets; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; @@ -41,6 +45,7 @@ import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper; import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -68,6 +73,15 @@ public class ShardsBatchGatewayAllocator implements ExistingShardsAllocator { private final long maxBatchSize; private static final short DEFAULT_SHARD_BATCH_SIZE = 2000; + public static final String PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = + "cluster.routing.allocation.shards_batch_gateway_allocator.primary_allocator_timeout"; + public static final String REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = + "cluster.routing.allocation.shards_batch_gateway_allocator.replica_allocator_timeout"; + + private TimeValue primaryShardsBatchGatewayAllocatorTimeout; + private TimeValue replicaShardsBatchGatewayAllocatorTimeout; + public static final TimeValue MIN_ALLOCATOR_TIMEOUT = TimeValue.timeValueSeconds(20); + /** * Number of shards we send in one batch to data nodes for fetching metadata */ @@ -79,6 +93,54 @@ public class ShardsBatchGatewayAllocator implements ExistingShardsAllocator { Setting.Property.NodeScope ); + /** + * Timeout for existing primary shards batch allocator. + * Timeout value must be greater than or equal to 20s or -1ms to effectively disable timeout + */ + public static final Setting PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING = Setting.timeSetting( + PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, + TimeValue.MINUS_ONE, + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue timeValue) { + if (timeValue.compareTo(MIN_ALLOCATOR_TIMEOUT) < 0 && timeValue.compareTo(TimeValue.MINUS_ONE) != 0) { + throw new IllegalArgumentException( + "Setting [" + + PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey() + + "] should be more than 20s or -1ms to disable timeout" + ); + } + } + }, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + /** + * Timeout for existing replica shards batch allocator. + * Timeout value must be greater than or equal to 20s or -1ms to effectively disable timeout + */ + public static final Setting REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING = Setting.timeSetting( + REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, + TimeValue.MINUS_ONE, + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue timeValue) { + if (timeValue.compareTo(MIN_ALLOCATOR_TIMEOUT) < 0 && timeValue.compareTo(TimeValue.MINUS_ONE) != 0) { + throw new IllegalArgumentException( + "Setting [" + + REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey() + + "] should be more than 20s or -1ms to disable timeout" + ); + } + } + }, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + private final RerouteService rerouteService; private final PrimaryShardBatchAllocator primaryShardBatchAllocator; private final ReplicaShardBatchAllocator replicaShardBatchAllocator; @@ -97,7 +159,8 @@ public ShardsBatchGatewayAllocator( RerouteService rerouteService, TransportNodesListGatewayStartedShardsBatch batchStartedAction, TransportNodesListShardStoreMetadataBatch batchStoreAction, - Settings settings + Settings settings, + ClusterSettings clusterSettings ) { this.rerouteService = rerouteService; this.primaryShardBatchAllocator = new InternalPrimaryBatchShardAllocator(); @@ -105,6 +168,10 @@ public ShardsBatchGatewayAllocator( this.batchStartedAction = batchStartedAction; this.batchStoreAction = batchStoreAction; this.maxBatchSize = GATEWAY_ALLOCATOR_BATCH_SIZE.get(settings); + this.primaryShardsBatchGatewayAllocatorTimeout = PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING, this::setPrimaryBatchAllocatorTimeout); + this.replicaShardsBatchGatewayAllocatorTimeout = REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING, this::setReplicaBatchAllocatorTimeout); } @Override @@ -127,7 +194,10 @@ protected ShardsBatchGatewayAllocator(long batchSize) { this.batchStoreAction = null; this.replicaShardBatchAllocator = null; this.maxBatchSize = batchSize; + this.primaryShardsBatchGatewayAllocatorTimeout = null; + this.replicaShardsBatchGatewayAllocatorTimeout = null; } + // for tests @Override @@ -187,14 +257,14 @@ public void allocateUnassigned( } @Override - public void allocateAllUnassignedShards(final RoutingAllocation allocation, boolean primary) { + public BatchRunnableExecutor allocateAllUnassignedShards(final RoutingAllocation allocation, boolean primary) { assert primaryShardBatchAllocator != null; assert replicaShardBatchAllocator != null; - innerAllocateUnassignedBatch(allocation, primaryShardBatchAllocator, replicaShardBatchAllocator, primary); + return innerAllocateUnassignedBatch(allocation, primaryShardBatchAllocator, replicaShardBatchAllocator, primary); } - protected void innerAllocateUnassignedBatch( + protected BatchRunnableExecutor innerAllocateUnassignedBatch( RoutingAllocation allocation, PrimaryShardBatchAllocator primaryBatchShardAllocator, ReplicaShardBatchAllocator replicaBatchShardAllocator, @@ -203,20 +273,55 @@ protected void innerAllocateUnassignedBatch( // create batches for unassigned shards Set batchesToAssign = createAndUpdateBatches(allocation, primary); if (batchesToAssign.isEmpty()) { - return; + return null; } + List runnables = new ArrayList<>(); if (primary) { + Set timedOutPrimaryShardIds = new HashSet<>(); batchIdToStartedShardBatch.values() .stream() .filter(batch -> batchesToAssign.contains(batch.batchId)) - .forEach( - shardsBatch -> primaryBatchShardAllocator.allocateUnassignedBatch(shardsBatch.getBatchedShardRoutings(), allocation) - ); + .forEach(shardsBatch -> runnables.add(new TimeoutAwareRunnable() { + @Override + public void onTimeout() { + timedOutPrimaryShardIds.addAll(shardsBatch.getBatchedShards()); + } + + @Override + public void run() { + primaryBatchShardAllocator.allocateUnassignedBatch(shardsBatch.getBatchedShardRoutings(), allocation); + } + })); + return new BatchRunnableExecutor(runnables, () -> primaryShardsBatchGatewayAllocatorTimeout) { + @Override + public void onComplete() { + logger.trace("Triggering oncomplete after timeout for [{}] primary shards", timedOutPrimaryShardIds.size()); + primaryBatchShardAllocator.allocateUnassignedBatchOnTimeout(timedOutPrimaryShardIds, allocation, true); + } + }; } else { + Set timedOutReplicaShardIds = new HashSet<>(); batchIdToStoreShardBatch.values() .stream() .filter(batch -> batchesToAssign.contains(batch.batchId)) - .forEach(batch -> replicaBatchShardAllocator.allocateUnassignedBatch(batch.getBatchedShardRoutings(), allocation)); + .forEach(batch -> runnables.add(new TimeoutAwareRunnable() { + @Override + public void onTimeout() { + timedOutReplicaShardIds.addAll(batch.getBatchedShards()); + } + + @Override + public void run() { + replicaBatchShardAllocator.allocateUnassignedBatch(batch.getBatchedShardRoutings(), allocation); + } + })); + return new BatchRunnableExecutor(runnables, () -> replicaShardsBatchGatewayAllocatorTimeout) { + @Override + public void onComplete() { + logger.trace("Triggering oncomplete after timeout for [{}] replica shards", timedOutReplicaShardIds.size()); + replicaBatchShardAllocator.allocateUnassignedBatchOnTimeout(timedOutReplicaShardIds, allocation, false); + } + }; } } @@ -516,8 +621,37 @@ protected AsyncShardFetch.FetchResult manifestV2Builder(fields).build() ); - private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V2; + private static final ConstructingObjectParser PARSER_V3 = new ConstructingObjectParser<>( + "cluster_metadata_manifest", + fields -> manifestV3Builder(fields).build() + ); + + private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V3; static { declareParser(PARSER_V0, CODEC_V0); declareParser(PARSER_V1, CODEC_V1); declareParser(PARSER_V2, CODEC_V2); + declareParser(PARSER_V3, CODEC_V3); } private static void declareParser(ConstructingObjectParser parser, long codec_version) { @@ -243,7 +255,7 @@ private static void declareParser(ConstructingObjectParser UploadedIndexMetadata.fromXContent(p), + (p, c) -> UploadedIndexMetadata.fromXContent(p, codec_version), INDICES_FIELD ); parser.declareString(ConstructingObjectParser.constructorArg(), PREVIOUS_CLUSTER_UUID); @@ -277,7 +289,7 @@ private static void declareParser(ConstructingObjectParser UploadedIndexMetadata.fromXContent(p), + (p, c) -> UploadedIndexMetadata.fromXContent(p, codec_version), INDICES_ROUTING_FIELD ); parser.declareNamedObject( @@ -308,7 +320,7 @@ private static void declareParser(ConstructingObjectParser ClusterStateDiffManifest.fromXContent(p), + (p, c) -> ClusterStateDiffManifest.fromXContent(p, codec_version), DIFF_MANIFEST ); } @@ -1112,16 +1124,30 @@ private static String componentPrefix(Object[] fields) { return (String) fields[3]; } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + private static final ConstructingObjectParser PARSER_V0 = new ConstructingObjectParser<>( + "uploaded_index_metadata", + fields -> new UploadedIndexMetadata(indexName(fields), indexUUID(fields), uploadedFilename(fields)) + ); + + private static final ConstructingObjectParser PARSER_V2 = new ConstructingObjectParser<>( "uploaded_index_metadata", fields -> new UploadedIndexMetadata(indexName(fields), indexUUID(fields), uploadedFilename(fields), componentPrefix(fields)) ); + private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V2; + static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_NAME_FIELD); - PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_UUID_FIELD); - PARSER.declareString(ConstructingObjectParser.constructorArg(), UPLOADED_FILENAME_FIELD); - PARSER.declareString(ConstructingObjectParser.constructorArg(), COMPONENT_PREFIX_FIELD); + declareParser(PARSER_V0, CODEC_V0); + declareParser(PARSER_V2, CODEC_V2); + } + + private static void declareParser(ConstructingObjectParser parser, long codec_version) { + parser.declareString(ConstructingObjectParser.constructorArg(), INDEX_NAME_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), INDEX_UUID_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), UPLOADED_FILENAME_FIELD); + if (codec_version >= CODEC_V2) { + parser.declareString(ConstructingObjectParser.constructorArg(), COMPONENT_PREFIX_FIELD); + } } static final String COMPONENT_PREFIX = "index--"; @@ -1130,15 +1156,32 @@ private static String componentPrefix(Object[] fields) { private final String indexUUID; private final String uploadedFilename; + private long codecVersion = CODEC_V2; + public UploadedIndexMetadata(String indexName, String indexUUID, String uploadedFileName) { - this(indexName, indexUUID, uploadedFileName, COMPONENT_PREFIX); + this(indexName, indexUUID, uploadedFileName, CODEC_V2); + } + + public UploadedIndexMetadata(String indexName, String indexUUID, String uploadedFileName, long codecVersion) { + this(indexName, indexUUID, uploadedFileName, COMPONENT_PREFIX, codecVersion); } public UploadedIndexMetadata(String indexName, String indexUUID, String uploadedFileName, String componentPrefix) { + this(indexName, indexUUID, uploadedFileName, componentPrefix, CODEC_V2); + } + + public UploadedIndexMetadata( + String indexName, + String indexUUID, + String uploadedFileName, + String componentPrefix, + long codecVersion + ) { this.componentPrefix = componentPrefix; this.indexName = indexName; this.indexUUID = indexUUID; this.uploadedFilename = uploadedFileName; + this.codecVersion = codecVersion; } public UploadedIndexMetadata(StreamInput in) throws IOException { @@ -1175,10 +1218,13 @@ public String getComponentPrefix() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.field(INDEX_NAME_FIELD.getPreferredName(), getIndexName()) + builder.field(INDEX_NAME_FIELD.getPreferredName(), getIndexName()) .field(INDEX_UUID_FIELD.getPreferredName(), getIndexUUID()) - .field(UPLOADED_FILENAME_FIELD.getPreferredName(), getUploadedFilePath()) - .field(COMPONENT_PREFIX_FIELD.getPreferredName(), getComponentPrefix()); + .field(UPLOADED_FILENAME_FIELD.getPreferredName(), getUploadedFilePath()); + if (codecVersion >= CODEC_V2) { + builder.field(COMPONENT_PREFIX_FIELD.getPreferredName(), getComponentPrefix()); + } + return builder; } @Override @@ -1214,9 +1260,13 @@ public String toString() { return Strings.toString(MediaTypeRegistry.JSON, this); } - public static UploadedIndexMetadata fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); + public static UploadedIndexMetadata fromXContent(XContentParser parser, long codecVersion) throws IOException { + if (codecVersion >= CODEC_V2) { + return CURRENT_PARSER.parse(parser, null); + } + return PARSER_V0.parse(parser, null); } + } /** diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java index 65ae2675a95da..5d378aa0983e3 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java @@ -12,7 +12,6 @@ import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -25,14 +24,15 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; import static org.opensearch.cluster.DiffableUtils.NonDiffableValueSerializer.getAbstractInstance; import static org.opensearch.cluster.DiffableUtils.getStringKeySerializer; -import static org.opensearch.cluster.routing.remote.RemoteRoutingTableService.CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V3; /** * Manifest of diff between two cluster states @@ -52,6 +52,7 @@ public class ClusterStateDiffManifest implements ToXContentFragment, Writeable { private static final String METADATA_CUSTOM_DIFF_FIELD = "metadata_custom_diff"; private static final String UPSERTS_FIELD = "upserts"; private static final String DELETES_FIELD = "deletes"; + private static final String DIFF_FIELD = "diff"; private static final String CLUSTER_BLOCKS_UPDATED_FIELD = "cluster_blocks_diff"; private static final String DISCOVERY_NODES_UPDATED_FIELD = "discovery_nodes_diff"; private static final String ROUTING_TABLE_DIFF = "routing_table_diff"; @@ -69,13 +70,12 @@ public class ClusterStateDiffManifest implements ToXContentFragment, Writeable { private final List customMetadataDeleted; private final boolean clusterBlocksUpdated; private final boolean discoveryNodesUpdated; - private final List indicesRoutingUpdated; - private final List indicesRoutingDeleted; + private String indicesRoutingDiffPath; private final boolean hashesOfConsistentSettingsUpdated; private final List clusterStateCustomUpdated; private final List clusterStateCustomDeleted; - public ClusterStateDiffManifest(ClusterState state, ClusterState previousState) { + public ClusterStateDiffManifest(ClusterState state, ClusterState previousState, String indicesRoutingDiffPath) { fromStateUUID = previousState.stateUUID(); toStateUUID = state.stateUUID(); coordinationMetadataUpdated = !Metadata.isCoordinationMetadataEqual(state.metadata(), previousState.metadata()); @@ -101,18 +101,7 @@ public ClusterStateDiffManifest(ClusterState state, ClusterState previousState) customMetadataUpdated = new ArrayList<>(customDiff.getDiffs().keySet()); customMetadataUpdated.addAll(customDiff.getUpserts().keySet()); customMetadataDeleted = customDiff.getDeletes(); - - DiffableUtils.MapDiff> routingTableDiff = DiffableUtils.diff( - previousState.getRoutingTable().getIndicesRouting(), - state.getRoutingTable().getIndicesRouting(), - DiffableUtils.getStringKeySerializer(), - CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER - ); - - indicesRoutingUpdated = new ArrayList<>(); - routingTableDiff.getUpserts().forEach((k, v) -> indicesRoutingUpdated.add(k)); - - indicesRoutingDeleted = routingTableDiff.getDeletes(); + this.indicesRoutingDiffPath = indicesRoutingDiffPath; hashesOfConsistentSettingsUpdated = !state.metadata() .hashesOfConsistentSettings() .equals(previousState.metadata().hashesOfConsistentSettings()); @@ -140,8 +129,7 @@ public ClusterStateDiffManifest( List indicesDeleted, boolean clusterBlocksUpdated, boolean discoveryNodesUpdated, - List indicesRoutingUpdated, - List indicesRoutingDeleted, + String indicesRoutingDiffPath, boolean hashesOfConsistentSettingsUpdated, List clusterStateCustomUpdated, List clusterStateCustomDeleted @@ -152,17 +140,16 @@ public ClusterStateDiffManifest( this.settingsMetadataUpdated = settingsMetadataUpdated; this.transientSettingsMetadataUpdated = transientSettingsMetadataUpdate; this.templatesMetadataUpdated = templatesMetadataUpdated; - this.customMetadataUpdated = customMetadataUpdated; - this.customMetadataDeleted = customMetadataDeleted; - this.indicesUpdated = indicesUpdated; - this.indicesDeleted = indicesDeleted; + this.customMetadataUpdated = Collections.unmodifiableList(customMetadataUpdated); + this.customMetadataDeleted = Collections.unmodifiableList(customMetadataDeleted); + this.indicesUpdated = Collections.unmodifiableList(indicesUpdated); + this.indicesDeleted = Collections.unmodifiableList(indicesDeleted); this.clusterBlocksUpdated = clusterBlocksUpdated; this.discoveryNodesUpdated = discoveryNodesUpdated; - this.indicesRoutingUpdated = indicesRoutingUpdated; - this.indicesRoutingDeleted = indicesRoutingDeleted; this.hashesOfConsistentSettingsUpdated = hashesOfConsistentSettingsUpdated; - this.clusterStateCustomUpdated = clusterStateCustomUpdated; - this.clusterStateCustomDeleted = clusterStateCustomDeleted; + this.clusterStateCustomUpdated = Collections.unmodifiableList(clusterStateCustomUpdated); + this.clusterStateCustomDeleted = Collections.unmodifiableList(clusterStateCustomDeleted); + this.indicesRoutingDiffPath = indicesRoutingDiffPath; } public ClusterStateDiffManifest(StreamInput in) throws IOException { @@ -178,11 +165,10 @@ public ClusterStateDiffManifest(StreamInput in) throws IOException { this.customMetadataDeleted = in.readStringList(); this.clusterBlocksUpdated = in.readBoolean(); this.discoveryNodesUpdated = in.readBoolean(); - this.indicesRoutingUpdated = in.readStringList(); - this.indicesRoutingDeleted = in.readStringList(); this.hashesOfConsistentSettingsUpdated = in.readBoolean(); this.clusterStateCustomUpdated = in.readStringList(); this.clusterStateCustomDeleted = in.readStringList(); + this.indicesRoutingDiffPath = in.readOptionalString(); } @Override @@ -226,16 +212,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(DISCOVERY_NODES_UPDATED_FIELD, discoveryNodesUpdated); builder.startObject(ROUTING_TABLE_DIFF); - builder.startArray(UPSERTS_FIELD); - for (String index : indicesRoutingUpdated) { - builder.value(index); + if (indicesRoutingDiffPath != null) { + builder.field(DIFF_FIELD, indicesRoutingDiffPath); } - builder.endArray(); - builder.startArray(DELETES_FIELD); - for (String index : indicesRoutingDeleted) { - builder.value(index); - } - builder.endArray(); builder.endObject(); builder.startObject(CLUSTER_STATE_CUSTOM_DIFF_FIELD); builder.startArray(UPSERTS_FIELD); @@ -252,7 +231,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static ClusterStateDiffManifest fromXContent(XContentParser parser) throws IOException { + public static ClusterStateDiffManifest fromXContent(XContentParser parser, long codec_version) throws IOException { Builder builder = new Builder(); if (parser.currentToken() == null) { // fresh parser? move to next token parser.nextToken(); @@ -334,11 +313,10 @@ public static ClusterStateDiffManifest fromXContent(XContentParser parser) throw currentFieldName = parser.currentName(); parser.nextToken(); switch (currentFieldName) { - case UPSERTS_FIELD: - builder.indicesRoutingUpdated(convertListToString(parser.listOrderedMap())); - break; - case DELETES_FIELD: - builder.indicesRoutingDeleted(convertListToString(parser.listOrderedMap())); + case DIFF_FIELD: + if (codec_version >= CODEC_V3) { + builder.indicesRoutingDiffPath(parser.textOrNull()); + } break; default: throw new XContentParseException("Unexpected field [" + currentFieldName + "]"); @@ -451,12 +429,8 @@ public boolean isHashesOfConsistentSettingsUpdated() { return hashesOfConsistentSettingsUpdated; } - public List getIndicesRoutingUpdated() { - return indicesRoutingUpdated; - } - - public List getIndicesRoutingDeleted() { - return indicesRoutingDeleted; + public String getIndicesRoutingDiffPath() { + return indicesRoutingDiffPath; } public List getClusterStateCustomUpdated() { @@ -467,6 +441,10 @@ public List getClusterStateCustomDeleted() { return clusterStateCustomDeleted; } + public void setIndicesRoutingDiffPath(String indicesRoutingDiffPath) { + this.indicesRoutingDiffPath = indicesRoutingDiffPath; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -485,10 +463,9 @@ public boolean equals(Object o) { && Objects.equals(customMetadataDeleted, that.customMetadataDeleted) && Objects.equals(indicesUpdated, that.indicesUpdated) && Objects.equals(indicesDeleted, that.indicesDeleted) - && Objects.equals(indicesRoutingUpdated, that.indicesRoutingUpdated) - && Objects.equals(indicesRoutingDeleted, that.indicesRoutingDeleted) && Objects.equals(clusterStateCustomUpdated, that.clusterStateCustomUpdated) - && Objects.equals(clusterStateCustomDeleted, that.clusterStateCustomDeleted); + && Objects.equals(clusterStateCustomDeleted, that.clusterStateCustomDeleted) + && Objects.equals(indicesRoutingDiffPath, that.indicesRoutingDiffPath); } @Override @@ -506,11 +483,10 @@ public int hashCode() { indicesDeleted, clusterBlocksUpdated, discoveryNodesUpdated, - indicesRoutingUpdated, - indicesRoutingDeleted, hashesOfConsistentSettingsUpdated, clusterStateCustomUpdated, - clusterStateCustomDeleted + clusterStateCustomDeleted, + indicesRoutingDiffPath ); } @@ -532,11 +508,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(customMetadataDeleted); out.writeBoolean(clusterBlocksUpdated); out.writeBoolean(discoveryNodesUpdated); - out.writeStringCollection(indicesRoutingUpdated); - out.writeStringCollection(indicesRoutingDeleted); out.writeBoolean(hashesOfConsistentSettingsUpdated); out.writeStringCollection(clusterStateCustomUpdated); out.writeStringCollection(clusterStateCustomDeleted); + out.writeOptionalString(indicesRoutingDiffPath); } /** @@ -557,13 +532,19 @@ public static class Builder { private List indicesDeleted; private boolean clusterBlocksUpdated; private boolean discoveryNodesUpdated; - private List indicesRoutingUpdated; - private List indicesRoutingDeleted; + private String indicesRoutingDiff; private boolean hashesOfConsistentSettingsUpdated; private List clusterStateCustomUpdated; private List clusterStateCustomDeleted; - public Builder() {} + public Builder() { + customMetadataUpdated = Collections.emptyList(); + customMetadataDeleted = Collections.emptyList(); + indicesUpdated = Collections.emptyList(); + indicesDeleted = Collections.emptyList(); + clusterStateCustomUpdated = Collections.emptyList(); + clusterStateCustomDeleted = Collections.emptyList(); + } public Builder fromStateUUID(String fromStateUUID) { this.fromStateUUID = fromStateUUID; @@ -630,13 +611,8 @@ public Builder discoveryNodesUpdated(boolean discoveryNodesUpdated) { return this; } - public Builder indicesRoutingUpdated(List indicesRoutingUpdated) { - this.indicesRoutingUpdated = indicesRoutingUpdated; - return this; - } - - public Builder indicesRoutingDeleted(List indicesRoutingDeleted) { - this.indicesRoutingDeleted = indicesRoutingDeleted; + public Builder indicesRoutingDiffPath(String indicesRoutingDiffPath) { + this.indicesRoutingDiff = indicesRoutingDiffPath; return this; } @@ -664,8 +640,7 @@ public ClusterStateDiffManifest build() { indicesDeleted, clusterBlocksUpdated, discoveryNodesUpdated, - indicesRoutingUpdated, - indicesRoutingDeleted, + indicesRoutingDiff, hashesOfConsistentSettingsUpdated, clusterStateCustomUpdated, clusterStateCustomDeleted diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java index 8f986423587d7..877e2585cb1eb 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java @@ -8,17 +8,15 @@ package org.opensearch.gateway.remote; -import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.DiffableUtils.NonDiffableValueSerializer; -import org.opensearch.common.CheckedRunnable; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; -import org.opensearch.common.remote.RemoteWritableEntityStore; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; +import org.opensearch.common.remote.AbstractRemoteWritableEntityManager; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.gateway.remote.model.RemoteClusterBlocks; -import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteClusterStateCustoms; import org.opensearch.gateway.remote.model.RemoteDiscoveryNodes; import org.opensearch.gateway.remote.model.RemoteReadResult; @@ -26,23 +24,19 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.threadpool.ThreadPool; -import java.io.IOException; import java.util.Collections; -import java.util.HashMap; import java.util.Map; /** - * A Manager which provides APIs to upload and download attributes of ClusterState to the {@link RemoteClusterStateBlobStore} + * A Manager which provides APIs to upload and download attributes of ClusterState to the {@link RemoteWriteableEntityBlobStore} * * @opensearch.internal */ -public class RemoteClusterStateAttributesManager { +public class RemoteClusterStateAttributesManager extends AbstractRemoteWritableEntityManager { public static final String CLUSTER_STATE_ATTRIBUTE = "cluster_state_attribute"; public static final String DISCOVERY_NODES = "nodes"; public static final String CLUSTER_BLOCKS = "blocks"; public static final int CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION = 1; - private final Map remoteWritableEntityStores; - private final NamedWriteableRegistry namedWriteableRegistry; RemoteClusterStateAttributesManager( String clusterName, @@ -51,80 +45,63 @@ public class RemoteClusterStateAttributesManager { NamedWriteableRegistry namedWriteableRegistry, ThreadPool threadpool ) { - this.namedWriteableRegistry = namedWriteableRegistry; - this.remoteWritableEntityStores = new HashMap<>(); this.remoteWritableEntityStores.put( RemoteDiscoveryNodes.DISCOVERY_NODES, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteClusterBlocks.CLUSTER_BLOCKS, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteClusterStateCustoms.CLUSTER_STATE_CUSTOM, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); } - /** - * Allows async upload of Cluster State Attribute components to remote - */ - CheckedRunnable getAsyncMetadataWriteAction( + @Override + protected ActionListener getWrappedWriteListener( String component, - AbstractRemoteWritableBlobEntity blobEntity, - LatchedActionListener latchedActionListener - ) { - return () -> getStore(blobEntity).writeAsync(blobEntity, getActionListener(component, blobEntity, latchedActionListener)); - } - - private ActionListener getActionListener( - String component, - AbstractRemoteWritableBlobEntity remoteObject, - LatchedActionListener latchedActionListener + AbstractClusterMetadataWriteableBlobEntity remoteEntity, + ActionListener listener ) { return ActionListener.wrap( - resp -> latchedActionListener.onResponse(remoteObject.getUploadedMetadata()), - ex -> latchedActionListener.onFailure(new RemoteStateTransferException(component, remoteObject, ex)) + resp -> listener.onResponse(remoteEntity.getUploadedMetadata()), + ex -> listener.onFailure(new RemoteStateTransferException("Upload failed for " + component, remoteEntity, ex)) ); } - private RemoteWritableEntityStore getStore(AbstractRemoteWritableBlobEntity entity) { - RemoteWritableEntityStore remoteStore = remoteWritableEntityStores.get(entity.getType()); - if (remoteStore == null) { - throw new IllegalArgumentException("Unknown entity type [" + entity.getType() + "]"); - } - return remoteStore; - } - - public CheckedRunnable getAsyncMetadataReadAction( + @Override + protected ActionListener getWrappedReadListener( String component, - AbstractRemoteWritableBlobEntity blobEntity, - LatchedActionListener listener + AbstractClusterMetadataWriteableBlobEntity remoteEntity, + ActionListener listener ) { - final ActionListener actionListener = ActionListener.wrap( + return ActionListener.wrap( response -> listener.onResponse(new RemoteReadResult(response, CLUSTER_STATE_ATTRIBUTE, component)), - listener::onFailure + ex -> listener.onFailure(new RemoteStateTransferException("Download failed for " + component, remoteEntity, ex)) ); - return () -> getStore(blobEntity).readAsync(blobEntity, actionListener); } public DiffableUtils.MapDiff> getUpdatedCustoms( @@ -158,4 +135,5 @@ public DiffableUtils.MapDiff SKIP_CLEANUP_STATE_CHANGES) { logger.info( "Cleaning up stale remote state files for cluster [{}] with uuid [{}]. Last clean was done before {} updates", @@ -179,6 +179,7 @@ void deleteClusterMetadata( Set staleGlobalMetadataPaths = new HashSet<>(); Set staleEphemeralAttributePaths = new HashSet<>(); Set staleIndexRoutingPaths = new HashSet<>(); + Set staleIndexRoutingDiffPaths = new HashSet<>(); activeManifestBlobMetadata.forEach(blobMetadata -> { ClusterMetadataManifest clusterMetadataManifest = remoteManifestManager.fetchRemoteClusterMetadataManifest( clusterName, @@ -222,6 +223,10 @@ void deleteClusterMetadata( clusterMetadataManifest.getIndicesRouting() .forEach(uploadedIndicesRouting -> filesToKeep.add(uploadedIndicesRouting.getUploadedFilename())); } + if (clusterMetadataManifest.getDiffManifest() != null + && clusterMetadataManifest.getDiffManifest().getIndicesRoutingDiffPath() != null) { + filesToKeep.add(clusterMetadataManifest.getDiffManifest().getIndicesRoutingDiffPath()); + } }); staleManifestBlobMetadata.forEach(blobMetadata -> { ClusterMetadataManifest clusterMetadataManifest = remoteManifestManager.fetchRemoteClusterMetadataManifest( @@ -264,6 +269,18 @@ void deleteClusterMetadata( } }); } + if (clusterMetadataManifest.getDiffManifest() != null + && clusterMetadataManifest.getDiffManifest().getIndicesRoutingDiffPath() != null) { + if (!filesToKeep.contains(clusterMetadataManifest.getDiffManifest().getIndicesRoutingDiffPath())) { + staleIndexRoutingDiffPaths.add(clusterMetadataManifest.getDiffManifest().getIndicesRoutingDiffPath()); + logger.debug( + () -> new ParameterizedMessage( + "Indices routing diff paths in stale manifest: {}", + clusterMetadataManifest.getDiffManifest().getIndicesRoutingDiffPath() + ) + ); + } + } clusterMetadataManifest.getIndices().forEach(uploadedIndexMetadata -> { String fileName = RemoteClusterStateUtils.getFormattedIndexFileName(uploadedIndexMetadata.getUploadedFilename()); @@ -316,6 +333,15 @@ void deleteClusterMetadata( ); remoteStateStats.indexRoutingFilesCleanupAttemptFailed(); } + try { + remoteRoutingTableService.deleteStaleIndexRoutingDiffPaths(new ArrayList<>(staleIndexRoutingDiffPaths)); + } catch (IOException e) { + logger.error( + () -> new ParameterizedMessage("Error while deleting stale index routing diff files {}", staleIndexRoutingDiffPaths), + e + ); + remoteStateStats.indicesRoutingDiffFileCleanupAttemptFailed(); + } } catch (IllegalStateException e) { logger.error("Error while fetching Remote Cluster Metadata manifests", e); } catch (IOException e) { diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index ef14adeb42b68..447a40ae2bf3d 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -14,6 +14,7 @@ import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.Diff; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata; @@ -26,11 +27,10 @@ import org.opensearch.cluster.node.DiscoveryNodes.Builder; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; -import org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService; +import org.opensearch.cluster.routing.StringKeyDiffProvider; import org.opensearch.cluster.routing.remote.RemoteRoutingTableService; import org.opensearch.cluster.routing.remote.RemoteRoutingTableServiceFactory; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.CheckedRunnable; import org.opensearch.common.Nullable; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobStore; @@ -43,7 +43,6 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; @@ -59,6 +58,7 @@ import org.opensearch.gateway.remote.model.RemoteReadResult; import org.opensearch.gateway.remote.model.RemoteTemplatesMetadata; import org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata; +import org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; @@ -98,7 +98,6 @@ import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.UploadedMetadataResults; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.clusterUUIDContainer; -import static org.opensearch.gateway.remote.RemoteClusterStateUtils.getClusterMetadataBasePath; import static org.opensearch.gateway.remote.model.RemoteClusterStateCustoms.CLUSTER_STATE_CUSTOM; import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_DELIMITER; @@ -107,6 +106,7 @@ import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; import static org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA; +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_METADATA_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** @@ -146,7 +146,7 @@ public class RemoteClusterStateService implements Closeable { private final List indexMetadataUploadListeners; private BlobStoreRepository blobStoreRepository; private BlobStoreTransferService blobStoreTransferService; - private final RemoteRoutingTableService remoteRoutingTableService; + private RemoteRoutingTableService remoteRoutingTableService; private volatile TimeValue slowWriteLoggingThreshold; private final RemotePersistenceStats remoteStateStats; @@ -197,16 +197,17 @@ public RemoteClusterStateService( this.remoteStateStats = new RemotePersistenceStats(); this.namedWriteableRegistry = namedWriteableRegistry; this.indexMetadataUploadListeners = indexMetadataUploadListeners; + this.isPublicationEnabled = FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) + && RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled(settings) + && RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled(settings); this.remoteRoutingTableService = RemoteRoutingTableServiceFactory.getService( repositoriesService, settings, clusterSettings, - threadPool + threadpool, + ClusterName.CLUSTER_NAME_SETTING.get(settings).value() ); - this.remoteClusterStateCleanupManager = new RemoteClusterStateCleanupManager(this, clusterService, remoteRoutingTableService); - this.isPublicationEnabled = FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) - && RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled(settings) - && RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled(settings); + remoteClusterStateCleanupManager = new RemoteClusterStateCleanupManager(this, clusterService, remoteRoutingTableService); } /** @@ -236,13 +237,16 @@ public RemoteClusterStateManifestInfo writeFullMetadata(ClusterState clusterStat isPublicationEnabled, isPublicationEnabled ? clusterState.customs() : Collections.emptyMap(), isPublicationEnabled, - remoteRoutingTableService.getIndicesRouting(clusterState.getRoutingTable()) + remoteRoutingTableService.getIndicesRouting(clusterState.getRoutingTable()), + null ); + + ClusterStateDiffManifest clusterStateDiffManifest = new ClusterStateDiffManifest(clusterState, ClusterState.EMPTY_STATE, null); final RemoteClusterStateManifestInfo manifestDetails = remoteManifestManager.uploadManifest( clusterState, uploadedMetadataResults, previousClusterUUID, - new ClusterStateDiffManifest(clusterState, ClusterState.EMPTY_STATE), + clusterStateDiffManifest, false ); @@ -259,7 +263,7 @@ public RemoteClusterStateManifestInfo writeFullMetadata(ClusterState clusterStat uploadedMetadataResults.uploadedIndicesRoutingMetadata.size() ); } else { - logger.info( + logger.debug( "writing cluster state took [{}ms]; " + "wrote full state with [{}] indices, [{}] indicesRouting and global metadata", durationMillis, uploadedMetadataResults.uploadedIndexMetadata.size(), @@ -332,10 +336,19 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( indicesToBeDeletedFromRemote.remove(indexMetadata.getIndex().getName()); } - final DiffableUtils.MapDiff> routingTableDiff = remoteRoutingTableService - .getIndicesRoutingMapDiff(previousClusterState.getRoutingTable(), clusterState.getRoutingTable()); final List indicesRoutingToUpload = new ArrayList<>(); - routingTableDiff.getUpserts().forEach((k, v) -> indicesRoutingToUpload.add(v)); + final List deletedIndicesRouting = new ArrayList<>(); + final StringKeyDiffProvider routingTableDiff = remoteRoutingTableService.getIndicesRoutingMapDiff( + previousClusterState.getRoutingTable(), + clusterState.getRoutingTable() + ); + if (routingTableDiff != null && routingTableDiff.provideDiff() != null) { + routingTableDiff.provideDiff() + .getDiffs() + .forEach((k, v) -> indicesRoutingToUpload.add(clusterState.getRoutingTable().index(k))); + routingTableDiff.provideDiff().getUpserts().forEach((k, v) -> indicesRoutingToUpload.add(v)); + deletedIndicesRouting.addAll(routingTableDiff.provideDiff().getDeletes()); + } UploadedMetadataResults uploadedMetadataResults; // For migration case from codec V0 or V1 to V2, we have added null check on metadata attribute files, @@ -356,7 +369,7 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( && clusterState.getNodes().delta(previousClusterState.getNodes()).hasChanges(); final boolean updateClusterBlocks = isPublicationEnabled && !clusterState.blocks().equals(previousClusterState.blocks()); final boolean updateHashesOfConsistentSettings = isPublicationEnabled - || Metadata.isHashesOfConsistentSettingsEqual(previousClusterState.metadata(), clusterState.metadata()) == false; + && Metadata.isHashesOfConsistentSettingsEqual(previousClusterState.metadata(), clusterState.metadata()) == false; uploadedMetadataResults = writeMetadataInParallel( clusterState, @@ -371,7 +384,8 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( updateTransientSettingsMetadata, clusterStateCustomsDiff.getUpserts(), updateHashesOfConsistentSettings, - indicesRoutingToUpload + indicesRoutingToUpload, + routingTableDiff ); // update the map if the metadata was uploaded @@ -413,14 +427,22 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( uploadedMetadataResults.uploadedIndicesRoutingMetadata = remoteRoutingTableService.getAllUploadedIndicesRouting( previousManifest, uploadedMetadataResults.uploadedIndicesRoutingMetadata, - routingTableDiff.getDeletes() + deletedIndicesRouting + ); + + ClusterStateDiffManifest clusterStateDiffManifest = new ClusterStateDiffManifest( + clusterState, + previousClusterState, + uploadedMetadataResults.uploadedIndicesRoutingDiffMetadata != null + ? uploadedMetadataResults.uploadedIndicesRoutingDiffMetadata.getUploadedFilename() + : null ); final RemoteClusterStateManifestInfo manifestDetails = remoteManifestManager.uploadManifest( clusterState, uploadedMetadataResults, previousManifest.getPreviousClusterUUID(), - new ClusterStateDiffManifest(clusterState, previousClusterState), + clusterStateDiffManifest, false ); @@ -458,8 +480,8 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( customsDiff.getUpserts().size() ); } else { - logger.info("{}; {}", clusterStateUploadTimeMessage, metadataUpdateMessage); - logger.info( + logger.debug("{}; {}", clusterStateUploadTimeMessage, metadataUpdateMessage); + logger.debug( "writing cluster state for version [{}] took [{}ms]; " + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, coordination metadata updated : [{}], " + "settings metadata updated : [{}], templates metadata updated : [{}], custom metadata updated : [{}]", @@ -476,7 +498,8 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( return manifestDetails; } - private UploadedMetadataResults writeMetadataInParallel( + // package private for testing + UploadedMetadataResults writeMetadataInParallel( ClusterState clusterState, List indexToUpload, Map prevIndexMetadataByName, @@ -489,15 +512,21 @@ private UploadedMetadataResults writeMetadataInParallel( boolean uploadTransientSettingMetadata, Map clusterStateCustomToUpload, boolean uploadHashesOfConsistentSettings, - List indicesRoutingToUpload + List indicesRoutingToUpload, + StringKeyDiffProvider routingTableDiff ) throws IOException { assert Objects.nonNull(indexMetadataUploadListeners) : "indexMetadataUploadListeners can not be null"; int totalUploadTasks = indexToUpload.size() + indexMetadataUploadListeners.size() + customToUpload.size() + (uploadCoordinationMetadata ? 1 : 0) + (uploadSettingsMetadata ? 1 : 0) + (uploadTemplateMetadata ? 1 : 0) + (uploadDiscoveryNodes ? 1 : 0) + (uploadClusterBlock ? 1 : 0) + (uploadTransientSettingMetadata ? 1 : 0) - + clusterStateCustomToUpload.size() + (uploadHashesOfConsistentSettings ? 1 : 0) + indicesRoutingToUpload.size(); + + clusterStateCustomToUpload.size() + (uploadHashesOfConsistentSettings ? 1 : 0) + indicesRoutingToUpload.size() + + ((routingTableDiff != null + && routingTableDiff.provideDiff() != null + && (!routingTableDiff.provideDiff().getDiffs().isEmpty() + || !routingTableDiff.provideDiff().getDeletes().isEmpty() + || !routingTableDiff.provideDiff().getUpserts().isEmpty())) ? 1 : 0); CountDownLatch latch = new CountDownLatch(totalUploadTasks); - Map> uploadTasks = new ConcurrentHashMap<>(totalUploadTasks); + List uploadTasks = Collections.synchronizedList(new ArrayList<>(totalUploadTasks)); Map results = new ConcurrentHashMap<>(totalUploadTasks); List exceptionList = Collections.synchronizedList(new ArrayList<>(totalUploadTasks)); @@ -516,169 +545,168 @@ private UploadedMetadataResults writeMetadataInParallel( ); if (uploadSettingsMetadata) { - uploadTasks.put( + uploadTasks.add(SETTING_METADATA); + remoteGlobalMetadataManager.writeAsync( SETTING_METADATA, - remoteGlobalMetadataManager.getAsyncMetadataWriteAction( - new RemotePersistentSettingsMetadata( - clusterState.metadata().persistentSettings(), - clusterState.metadata().version(), - clusterState.metadata().clusterUUID(), - blobStoreRepository.getCompressor(), - blobStoreRepository.getNamedXContentRegistry() - ), - listener - ) + new RemotePersistentSettingsMetadata( + clusterState.metadata().persistentSettings(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + listener ); } if (uploadTransientSettingMetadata) { - uploadTasks.put( + uploadTasks.add(TRANSIENT_SETTING_METADATA); + remoteGlobalMetadataManager.writeAsync( TRANSIENT_SETTING_METADATA, - remoteGlobalMetadataManager.getAsyncMetadataWriteAction( - new RemoteTransientSettingsMetadata( - clusterState.metadata().transientSettings(), - clusterState.metadata().version(), - clusterState.metadata().clusterUUID(), - blobStoreRepository.getCompressor(), - blobStoreRepository.getNamedXContentRegistry() - ), - listener - ) + new RemoteTransientSettingsMetadata( + clusterState.metadata().transientSettings(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + listener ); } if (uploadCoordinationMetadata) { - uploadTasks.put( + uploadTasks.add(COORDINATION_METADATA); + remoteGlobalMetadataManager.writeAsync( COORDINATION_METADATA, - remoteGlobalMetadataManager.getAsyncMetadataWriteAction( - new RemoteCoordinationMetadata( - clusterState.metadata().coordinationMetadata(), - clusterState.metadata().version(), - clusterState.metadata().clusterUUID(), - blobStoreRepository.getCompressor(), - blobStoreRepository.getNamedXContentRegistry() - ), - listener - ) + new RemoteCoordinationMetadata( + clusterState.metadata().coordinationMetadata(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + listener ); } if (uploadTemplateMetadata) { - uploadTasks.put( + uploadTasks.add(TEMPLATES_METADATA); + remoteGlobalMetadataManager.writeAsync( TEMPLATES_METADATA, - remoteGlobalMetadataManager.getAsyncMetadataWriteAction( - new RemoteTemplatesMetadata( - clusterState.metadata().templatesMetadata(), - clusterState.metadata().version(), - clusterState.metadata().clusterUUID(), - blobStoreRepository.getCompressor(), - blobStoreRepository.getNamedXContentRegistry() - ), - listener - ) + new RemoteTemplatesMetadata( + clusterState.metadata().templatesMetadata(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + listener ); } if (uploadDiscoveryNodes) { - uploadTasks.put( - DISCOVERY_NODES, - remoteClusterStateAttributesManager.getAsyncMetadataWriteAction( - RemoteDiscoveryNodes.DISCOVERY_NODES, - new RemoteDiscoveryNodes( - clusterState.nodes(), - clusterState.version(), - clusterState.stateUUID(), - blobStoreRepository.getCompressor() - ), - listener - ) + uploadTasks.add(DISCOVERY_NODES); + remoteClusterStateAttributesManager.writeAsync( + RemoteDiscoveryNodes.DISCOVERY_NODES, + new RemoteDiscoveryNodes( + clusterState.nodes(), + clusterState.version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor() + ), + listener ); } if (uploadClusterBlock) { - uploadTasks.put( - CLUSTER_BLOCKS, - remoteClusterStateAttributesManager.getAsyncMetadataWriteAction( - RemoteClusterBlocks.CLUSTER_BLOCKS, - new RemoteClusterBlocks( - clusterState.blocks(), - clusterState.version(), - clusterState.metadata().clusterUUID(), - blobStoreRepository.getCompressor() - ), - listener - ) + uploadTasks.add(CLUSTER_BLOCKS); + remoteClusterStateAttributesManager.writeAsync( + RemoteClusterBlocks.CLUSTER_BLOCKS, + new RemoteClusterBlocks( + clusterState.blocks(), + clusterState.version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor() + ), + listener ); } if (uploadHashesOfConsistentSettings) { - uploadTasks.put( + uploadTasks.add(HASHES_OF_CONSISTENT_SETTINGS); + remoteGlobalMetadataManager.writeAsync( HASHES_OF_CONSISTENT_SETTINGS, - remoteGlobalMetadataManager.getAsyncMetadataWriteAction( - new RemoteHashesOfConsistentSettings( - (DiffableStringMap) clusterState.metadata().hashesOfConsistentSettings(), - clusterState.metadata().version(), - clusterState.metadata().clusterUUID(), - blobStoreRepository.getCompressor() - ), - listener - ) + new RemoteHashesOfConsistentSettings( + (DiffableStringMap) clusterState.metadata().hashesOfConsistentSettings(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor() + ), + listener ); } customToUpload.forEach((key, value) -> { String customComponent = String.join(CUSTOM_DELIMITER, CUSTOM_METADATA, key); - uploadTasks.put( + uploadTasks.add(customComponent); + remoteGlobalMetadataManager.writeAsync( customComponent, - remoteGlobalMetadataManager.getAsyncMetadataWriteAction( - new RemoteCustomMetadata( - value, - key, - clusterState.metadata().version(), - clusterState.metadata().clusterUUID(), - blobStoreRepository.getCompressor(), - namedWriteableRegistry - ), - listener - ) + new RemoteCustomMetadata( + value, + key, + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + namedWriteableRegistry + ), + listener ); }); indexToUpload.forEach(indexMetadata -> { - uploadTasks.put( + uploadTasks.add(indexMetadata.getIndex().getName()); + remoteIndexMetadataManager.writeAsync( indexMetadata.getIndex().getName(), - remoteIndexMetadataManager.getAsyncIndexMetadataWriteAction(indexMetadata, clusterState.metadata().clusterUUID(), listener) + new RemoteIndexMetadata( + indexMetadata, + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + listener ); }); clusterStateCustomToUpload.forEach((key, value) -> { - uploadTasks.put( - key, - remoteClusterStateAttributesManager.getAsyncMetadataWriteAction( - CLUSTER_STATE_CUSTOM, - new RemoteClusterStateCustoms( - value, - key, - clusterState.version(), - clusterState.metadata().clusterUUID(), - blobStoreRepository.getCompressor(), - namedWriteableRegistry - ), - listener - ) + uploadTasks.add(key); + remoteClusterStateAttributesManager.writeAsync( + CLUSTER_STATE_CUSTOM, + new RemoteClusterStateCustoms( + value, + key, + clusterState.version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + namedWriteableRegistry + ), + listener ); }); indicesRoutingToUpload.forEach(indexRoutingTable -> { - uploadTasks.put( - InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + indexRoutingTable.getIndex().getName(), - remoteRoutingTableService.getIndexRoutingAsyncAction( - clusterState, - indexRoutingTable, - listener, - getClusterMetadataBasePath( - blobStoreRepository, - clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() - ) - ) + uploadTasks.add(INDEX_ROUTING_METADATA_PREFIX + indexRoutingTable.getIndex().getName()); + remoteRoutingTableService.getAsyncIndexRoutingWriteAction( + clusterState.metadata().clusterUUID(), + clusterState.term(), + clusterState.version(), + indexRoutingTable, + listener ); }); - - // start async upload of all required metadata files - for (CheckedRunnable uploadTask : uploadTasks.values()) { - uploadTask.run(); + if (routingTableDiff != null + && routingTableDiff.provideDiff() != null + && (!routingTableDiff.provideDiff().getDiffs().isEmpty() + || !routingTableDiff.provideDiff().getDeletes().isEmpty() + || !routingTableDiff.provideDiff().getUpserts().isEmpty())) { + uploadTasks.add(RemoteRoutingTableDiff.ROUTING_TABLE_DIFF_FILE); + remoteRoutingTableService.getAsyncIndexRoutingDiffWriteAction( + clusterState.metadata().clusterUUID(), + clusterState.term(), + clusterState.version(), + routingTableDiff, + listener + ); } invokeIndexMetadataUploadListeners(indexToUpload, prevIndexMetadataByName, latch, exceptionList); @@ -689,7 +717,7 @@ private UploadedMetadataResults writeMetadataInParallel( String.format( Locale.ROOT, "Timed out waiting for transfer of following metadata to complete - %s", - String.join(", ", uploadTasks.keySet()) + String.join(", ", uploadTasks) ) ); exceptionList.forEach(ex::addSuppressed); @@ -698,11 +726,7 @@ private UploadedMetadataResults writeMetadataInParallel( } catch (InterruptedException ex) { exceptionList.forEach(ex::addSuppressed); RemoteStateTransferException exception = new RemoteStateTransferException( - String.format( - Locale.ROOT, - "Timed out waiting for transfer of metadata to complete - %s", - String.join(", ", uploadTasks.keySet()) - ), + String.format(Locale.ROOT, "Timed out waiting for transfer of metadata to complete - %s", String.join(", ", uploadTasks)), ex ); Thread.currentThread().interrupt(); @@ -710,20 +734,28 @@ private UploadedMetadataResults writeMetadataInParallel( } if (!exceptionList.isEmpty()) { RemoteStateTransferException exception = new RemoteStateTransferException( + String.format(Locale.ROOT, "Exception during transfer of following metadata to Remote - %s", String.join(", ", uploadTasks)) + ); + exceptionList.forEach(exception::addSuppressed); + throw exception; + } + if (results.size() != uploadTasks.size()) { + throw new RemoteStateTransferException( String.format( Locale.ROOT, - "Exception during transfer of following metadata to Remote - %s", - String.join(", ", uploadTasks.keySet()) + "Some metadata components were not uploaded successfully. Objects to be uploaded: %s, uploaded objects: %s", + String.join(", ", uploadTasks), + String.join(", ", results.keySet()) ) ); - exceptionList.forEach(exception::addSuppressed); - throw exception; } UploadedMetadataResults response = new UploadedMetadataResults(); results.forEach((name, uploadedMetadata) -> { if (uploadedMetadata.getClass().equals(UploadedIndexMetadata.class) - && uploadedMetadata.getComponent().contains(InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX)) { + && uploadedMetadata.getComponent().contains(INDEX_ROUTING_METADATA_PREFIX)) { response.uploadedIndicesRoutingMetadata.add((UploadedIndexMetadata) uploadedMetadata); + } else if (RemoteRoutingTableDiff.ROUTING_TABLE_DIFF_FILE.equals(name)) { + response.uploadedIndicesRoutingDiffMetadata = (UploadedMetadataAttribute) uploadedMetadata; } else if (name.startsWith(CUSTOM_METADATA)) { // component name for custom metadata will look like custom-- String custom = name.split(DELIMITER)[0].split(CUSTOM_DELIMITER)[1]; @@ -896,9 +928,8 @@ public void start() { final Repository repository = repositoriesService.get().repository(remoteStoreRepo); assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; blobStoreRepository = (BlobStoreRepository) repository; - this.remoteRoutingTableService.start(); - blobStoreTransferService = new BlobStoreTransferService(getBlobStore(), threadpool); String clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings).value(); + blobStoreTransferService = new BlobStoreTransferService(getBlobStore(), threadpool); remoteGlobalMetadataManager = new RemoteGlobalMetadataManager( clusterSettings, @@ -930,6 +961,8 @@ public void start() { namedWriteableRegistry, threadpool ); + + remoteRoutingTableService.start(); remoteClusterStateCleanupManager.start(); } @@ -975,7 +1008,8 @@ public ClusterState getLatestClusterState(String clusterName, String clusterUUID return getClusterStateForManifest(clusterName, clusterMetadataManifest.get(), nodeId, includeEphemeral); } - private ClusterState readClusterStateInParallel( + // package private for testing + ClusterState readClusterStateInParallel( ClusterState previousState, ClusterMetadataManifest manifest, String clusterUUID, @@ -991,17 +1025,18 @@ private ClusterState readClusterStateInParallel( List indicesRoutingToRead, boolean readHashesOfConsistentSettings, Map clusterStateCustomToRead, + boolean readIndexRoutingTableDiff, boolean includeEphemeral - ) throws IOException { + ) { int totalReadTasks = indicesToRead.size() + customToRead.size() + (readCoordinationMetadata ? 1 : 0) + (readSettingsMetadata ? 1 : 0) + (readTemplatesMetadata ? 1 : 0) + (readDiscoveryNodes ? 1 : 0) + (readClusterBlocks ? 1 : 0) + (readTransientSettingsMetadata ? 1 : 0) + (readHashesOfConsistentSettings ? 1 : 0) + clusterStateCustomToRead.size() - + indicesRoutingToRead.size(); + + indicesRoutingToRead.size() + (readIndexRoutingTableDiff ? 1 : 0); CountDownLatch latch = new CountDownLatch(totalReadTasks); - List> asyncMetadataReadActions = new ArrayList<>(); List readResults = Collections.synchronizedList(new ArrayList<>()); List readIndexRoutingTableResults = Collections.synchronizedList(new ArrayList<>()); + AtomicReference> readIndexRoutingTableDiffResults = new AtomicReference<>(); List exceptionList = Collections.synchronizedList(new ArrayList<>(totalReadTasks)); LatchedActionListener listener = new LatchedActionListener<>(ActionListener.wrap(response -> { @@ -1013,171 +1048,173 @@ private ClusterState readClusterStateInParallel( }), latch); for (UploadedIndexMetadata indexMetadata : indicesToRead) { - asyncMetadataReadActions.add( - remoteIndexMetadataManager.getAsyncIndexMetadataReadAction(clusterUUID, indexMetadata.getUploadedFilename(), listener) + remoteIndexMetadataManager.readAsync( + indexMetadata.getIndexName(), + new RemoteIndexMetadata( + RemoteClusterStateUtils.getFormattedIndexFileName(indexMetadata.getUploadedFilename()), + clusterUUID, + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + listener ); } LatchedActionListener routingTableLatchedActionListener = new LatchedActionListener<>( ActionListener.wrap(response -> { - logger.debug("Successfully read cluster state component from remote"); + logger.debug(() -> new ParameterizedMessage("Successfully read index-routing for index {}", response.getIndex().getName())); readIndexRoutingTableResults.add(response); }, ex -> { - logger.error("Failed to read cluster state from remote", ex); + logger.error(() -> new ParameterizedMessage("Failed to read index-routing from remote"), ex); exceptionList.add(ex); }), latch ); for (UploadedIndexMetadata indexRouting : indicesRoutingToRead) { - asyncMetadataReadActions.add( - remoteRoutingTableService.getAsyncIndexRoutingReadAction( - indexRouting.getUploadedFilename(), - new Index(indexRouting.getIndexName(), indexRouting.getIndexUUID()), - routingTableLatchedActionListener - ) + remoteRoutingTableService.getAsyncIndexRoutingReadAction( + clusterUUID, + indexRouting.getUploadedFilename(), + routingTableLatchedActionListener + ); + } + + LatchedActionListener> routingTableDiffLatchedActionListener = new LatchedActionListener<>( + ActionListener.wrap(response -> { + logger.debug("Successfully read routing table diff component from remote"); + readIndexRoutingTableDiffResults.set(response); + }, ex -> { + logger.error("Failed to read routing table diff from remote", ex); + exceptionList.add(ex); + }), + latch + ); + + if (readIndexRoutingTableDiff) { + remoteRoutingTableService.getAsyncIndexRoutingTableDiffReadAction( + clusterUUID, + manifest.getDiffManifest().getIndicesRoutingDiffPath(), + routingTableDiffLatchedActionListener ); } for (Map.Entry entry : customToRead.entrySet()) { - asyncMetadataReadActions.add( - remoteGlobalMetadataManager.getAsyncMetadataReadAction( - new RemoteCustomMetadata( - entry.getValue().getUploadedFilename(), - entry.getKey(), - clusterUUID, - blobStoreRepository.getCompressor(), - namedWriteableRegistry - ), - entry.getValue().getAttributeName(), - listener - ) + remoteGlobalMetadataManager.readAsync( + entry.getValue().getAttributeName(), + new RemoteCustomMetadata( + entry.getValue().getUploadedFilename(), + entry.getKey(), + clusterUUID, + blobStoreRepository.getCompressor(), + namedWriteableRegistry + ), + listener ); } if (readCoordinationMetadata) { - asyncMetadataReadActions.add( - remoteGlobalMetadataManager.getAsyncMetadataReadAction( - new RemoteCoordinationMetadata( - manifest.getCoordinationMetadata().getUploadedFilename(), - clusterUUID, - blobStoreRepository.getCompressor(), - blobStoreRepository.getNamedXContentRegistry() - ), - COORDINATION_METADATA, - listener - ) + remoteGlobalMetadataManager.readAsync( + COORDINATION_METADATA, + new RemoteCoordinationMetadata( + manifest.getCoordinationMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + listener ); } if (readSettingsMetadata) { - asyncMetadataReadActions.add( - remoteGlobalMetadataManager.getAsyncMetadataReadAction( - new RemotePersistentSettingsMetadata( - manifest.getSettingsMetadata().getUploadedFilename(), - clusterUUID, - blobStoreRepository.getCompressor(), - blobStoreRepository.getNamedXContentRegistry() - ), - SETTING_METADATA, - listener - ) + remoteGlobalMetadataManager.readAsync( + SETTING_METADATA, + new RemotePersistentSettingsMetadata( + manifest.getSettingsMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + listener ); } if (readTransientSettingsMetadata) { - asyncMetadataReadActions.add( - remoteGlobalMetadataManager.getAsyncMetadataReadAction( - new RemoteTransientSettingsMetadata( - manifest.getTransientSettingsMetadata().getUploadedFilename(), - clusterUUID, - blobStoreRepository.getCompressor(), - blobStoreRepository.getNamedXContentRegistry() - ), - TRANSIENT_SETTING_METADATA, - listener - ) + remoteGlobalMetadataManager.readAsync( + TRANSIENT_SETTING_METADATA, + new RemoteTransientSettingsMetadata( + manifest.getTransientSettingsMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + listener ); } if (readTemplatesMetadata) { - asyncMetadataReadActions.add( - remoteGlobalMetadataManager.getAsyncMetadataReadAction( - new RemoteTemplatesMetadata( - manifest.getTemplatesMetadata().getUploadedFilename(), - clusterUUID, - blobStoreRepository.getCompressor(), - blobStoreRepository.getNamedXContentRegistry() - ), - TEMPLATES_METADATA, - listener - ) + remoteGlobalMetadataManager.readAsync( + TEMPLATES_METADATA, + new RemoteTemplatesMetadata( + manifest.getTemplatesMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + listener ); } if (readDiscoveryNodes) { - asyncMetadataReadActions.add( - remoteClusterStateAttributesManager.getAsyncMetadataReadAction( - DISCOVERY_NODES, - new RemoteDiscoveryNodes( - manifest.getDiscoveryNodesMetadata().getUploadedFilename(), - clusterUUID, - blobStoreRepository.getCompressor() - ), - listener - ) + remoteClusterStateAttributesManager.readAsync( + DISCOVERY_NODES, + new RemoteDiscoveryNodes( + manifest.getDiscoveryNodesMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor() + ), + listener ); } if (readClusterBlocks) { - asyncMetadataReadActions.add( - remoteClusterStateAttributesManager.getAsyncMetadataReadAction( - CLUSTER_BLOCKS, - new RemoteClusterBlocks( - manifest.getClusterBlocksMetadata().getUploadedFilename(), - clusterUUID, - blobStoreRepository.getCompressor() - ), - listener - ) + remoteClusterStateAttributesManager.readAsync( + CLUSTER_BLOCKS, + new RemoteClusterBlocks( + manifest.getClusterBlocksMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor() + ), + listener ); } if (readHashesOfConsistentSettings) { - asyncMetadataReadActions.add( - remoteGlobalMetadataManager.getAsyncMetadataReadAction( - new RemoteHashesOfConsistentSettings( - manifest.getHashesOfConsistentSettings().getUploadedFilename(), - clusterUUID, - blobStoreRepository.getCompressor() - ), - HASHES_OF_CONSISTENT_SETTINGS, - listener - ) + remoteGlobalMetadataManager.readAsync( + HASHES_OF_CONSISTENT_SETTINGS, + new RemoteHashesOfConsistentSettings( + manifest.getHashesOfConsistentSettings().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor() + ), + listener ); } for (Map.Entry entry : clusterStateCustomToRead.entrySet()) { - asyncMetadataReadActions.add( - remoteClusterStateAttributesManager.getAsyncMetadataReadAction( - // pass component name as cluster-state-custom--, so that we can interpret it later - String.join(CUSTOM_DELIMITER, CLUSTER_STATE_CUSTOM, entry.getKey()), - new RemoteClusterStateCustoms( - entry.getValue().getUploadedFilename(), - entry.getValue().getAttributeName(), - clusterUUID, - blobStoreRepository.getCompressor(), - namedWriteableRegistry - ), - listener - ) + remoteClusterStateAttributesManager.readAsync( + // pass component name as cluster-state-custom--, so that we can interpret it later + String.join(CUSTOM_DELIMITER, CLUSTER_STATE_CUSTOM, entry.getKey()), + new RemoteClusterStateCustoms( + entry.getValue().getUploadedFilename(), + entry.getValue().getAttributeName(), + clusterUUID, + blobStoreRepository.getCompressor(), + namedWriteableRegistry + ), + listener ); } - for (CheckedRunnable asyncMetadataReadAction : asyncMetadataReadActions) { - asyncMetadataReadAction.run(); - } - try { if (latch.await(this.remoteStateReadTimeout.getMillis(), TimeUnit.MILLISECONDS) == false) { RemoteStateTransferException exception = new RemoteStateTransferException( @@ -1263,7 +1300,12 @@ private ClusterState readClusterStateInParallel( readIndexRoutingTableResults.forEach( indexRoutingTable -> indicesRouting.put(indexRoutingTable.getIndex().getName(), indexRoutingTable) ); - clusterStateBuilder.routingTable(new RoutingTable(manifest.getRoutingTableVersion(), indicesRouting)); + Diff routingTableDiff = readIndexRoutingTableDiffResults.get(); + RoutingTable newRoutingTable = new RoutingTable(manifest.getRoutingTableVersion(), indicesRouting); + if (routingTableDiff != null) { + newRoutingTable = routingTableDiff.apply(previousState.getRoutingTable()); + } + clusterStateBuilder.routingTable(newRoutingTable); return clusterStateBuilder.build(); } @@ -1284,13 +1326,14 @@ public ClusterState getClusterStateForManifest( manifest.getCustomMetadataMap(), manifest.getCoordinationMetadata() != null, manifest.getSettingsMetadata() != null, - manifest.getTransientSettingsMetadata() != null, + includeEphemeral && manifest.getTransientSettingsMetadata() != null, manifest.getTemplatesMetadata() != null, includeEphemeral && manifest.getDiscoveryNodesMetadata() != null, includeEphemeral && manifest.getClusterBlocksMetadata() != null, includeEphemeral ? manifest.getIndicesRouting() : emptyList(), includeEphemeral && manifest.getHashesOfConsistentSettings() != null, includeEphemeral ? manifest.getClusterStateCustomMap() : emptyMap(), + false, includeEphemeral ); } else { @@ -1311,6 +1354,7 @@ public ClusterState getClusterStateForManifest( emptyList(), false, emptyMap(), + false, false ); Metadata.Builder mb = Metadata.builder(remoteGlobalMetadataManager.getGlobalMetadata(manifest.getClusterUUID(), manifest)); @@ -1320,13 +1364,8 @@ public ClusterState getClusterStateForManifest( } - public ClusterState getClusterStateUsingDiff( - String clusterName, - ClusterMetadataManifest manifest, - ClusterState previousState, - String localNodeId - ) throws IOException { - assert manifest.getDiffManifest() != null; + public ClusterState getClusterStateUsingDiff(ClusterMetadataManifest manifest, ClusterState previousState, String localNodeId) { + assert manifest.getDiffManifest() != null : "Diff manifest null which is required for downloading cluster state"; ClusterStateDiffManifest diff = manifest.getDiffManifest(); List updatedIndices = diff.getIndicesUpdated().stream().map(idx -> { Optional uploadedIndexMetadataOptional = manifest.getIndices() @@ -1351,9 +1390,6 @@ public ClusterState getClusterStateUsingDiff( } List updatedIndexRouting = new ArrayList<>(); - updatedIndexRouting.addAll( - remoteRoutingTableService.getUpdatedIndexRoutingTableMetadata(diff.getIndicesRoutingUpdated(), manifest.getIndicesRouting()) - ); ClusterState updatedClusterState = readClusterStateInParallel( previousState, @@ -1371,6 +1407,9 @@ public ClusterState getClusterStateUsingDiff( updatedIndexRouting, diff.isHashesOfConsistentSettingsUpdated(), updatedClusterStateCustom, + manifest.getDiffManifest() != null + && manifest.getDiffManifest().getIndicesRoutingDiffPath() != null + && !manifest.getDiffManifest().getIndicesRoutingDiffPath().isEmpty(), true ); ClusterState.Builder clusterStateBuilder = ClusterState.builder(updatedClusterState); @@ -1395,10 +1434,6 @@ public ClusterState getClusterStateUsingDiff( HashMap indexRoutingTables = new HashMap<>(updatedClusterState.getRoutingTable().getIndicesRouting()); - for (String indexName : diff.getIndicesRoutingDeleted()) { - indexRoutingTables.remove(indexName); - } - return clusterStateBuilder.stateUUID(manifest.getStateUUID()) .version(manifest.getStateVersion()) .metadata(metadataBuilder) @@ -1585,6 +1620,19 @@ private boolean isValidClusterUUID(ClusterMetadataManifest manifest) { return manifest.isClusterUUIDCommitted(); } + // package private setter which are required for injecting mock managers, these setters are not supposed to be used elsewhere + void setRemoteIndexMetadataManager(RemoteIndexMetadataManager remoteIndexMetadataManager) { + this.remoteIndexMetadataManager = remoteIndexMetadataManager; + } + + void setRemoteGlobalMetadataManager(RemoteGlobalMetadataManager remoteGlobalMetadataManager) { + this.remoteGlobalMetadataManager = remoteGlobalMetadataManager; + } + + void setRemoteClusterStateAttributesManager(RemoteClusterStateAttributesManager remoteClusterStateAttributeManager) { + this.remoteClusterStateAttributesManager = remoteClusterStateAttributeManager; + } + public void writeMetadataFailed() { getStats().stateFailed(); } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java index f2b93c3784407..74cb838286961 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java @@ -88,6 +88,7 @@ public static class UploadedMetadataResults { ClusterMetadataManifest.UploadedMetadataAttribute uploadedClusterBlocks; List uploadedIndicesRoutingMetadata; ClusterMetadataManifest.UploadedMetadataAttribute uploadedHashesOfConsistentSettings; + ClusterMetadataManifest.UploadedMetadataAttribute uploadedIndicesRoutingDiffMetadata; public UploadedMetadataResults( List uploadedIndexMetadata, diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java index 2c5aad99adc0c..763a8e3ff4951 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java @@ -8,7 +8,6 @@ package org.opensearch.gateway.remote; -import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.DiffableUtils.NonDiffableValueSerializer; @@ -17,9 +16,9 @@ import org.opensearch.cluster.metadata.Metadata.Custom; import org.opensearch.cluster.metadata.Metadata.XContentContext; import org.opensearch.cluster.metadata.TemplatesMetadata; -import org.opensearch.common.CheckedRunnable; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; -import org.opensearch.common.remote.RemoteWritableEntityStore; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; +import org.opensearch.common.remote.AbstractRemoteWritableEntityManager; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -28,7 +27,6 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteCoordinationMetadata; import org.opensearch.gateway.remote.model.RemoteCustomMetadata; import org.opensearch.gateway.remote.model.RemoteGlobalMetadata; @@ -43,7 +41,6 @@ import java.io.IOException; import java.util.Collections; -import java.util.HashMap; import java.util.Locale; import java.util.Map; import java.util.Map.Entry; @@ -56,7 +53,7 @@ * * @opensearch.internal */ -public class RemoteGlobalMetadataManager { +public class RemoteGlobalMetadataManager extends AbstractRemoteWritableEntityManager { public static final TimeValue GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); @@ -70,7 +67,6 @@ public class RemoteGlobalMetadataManager { public static final int GLOBAL_METADATA_CURRENT_CODEC_VERSION = 1; private volatile TimeValue globalMetadataUploadTimeout; - private Map remoteWritableEntityStores; private final Compressor compressor; private final NamedXContentRegistry namedXContentRegistry; private final NamedWriteableRegistry namedWriteableRegistry; @@ -87,120 +83,108 @@ public class RemoteGlobalMetadataManager { this.compressor = blobStoreRepository.getCompressor(); this.namedXContentRegistry = blobStoreRepository.getNamedXContentRegistry(); this.namedWriteableRegistry = namedWriteableRegistry; - this.remoteWritableEntityStores = new HashMap<>(); this.remoteWritableEntityStores.put( RemoteGlobalMetadata.GLOBAL_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteCoordinationMetadata.COORDINATION_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemotePersistentSettingsMetadata.SETTING_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteHashesOfConsistentSettings.HASHES_OF_CONSISTENT_SETTINGS, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteTemplatesMetadata.TEMPLATES_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteCustomMetadata.CUSTOM_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setGlobalMetadataUploadTimeout); } - /** - * Allows async upload of Metadata components to remote - */ - CheckedRunnable getAsyncMetadataWriteAction( - AbstractRemoteWritableBlobEntity writeEntity, - LatchedActionListener latchedActionListener - ) { - return (() -> getStore(writeEntity).writeAsync(writeEntity, getActionListener(writeEntity, latchedActionListener))); - } - - private RemoteWritableEntityStore getStore(AbstractRemoteWritableBlobEntity entity) { - RemoteWritableEntityStore remoteStore = remoteWritableEntityStores.get(entity.getType()); - if (remoteStore == null) { - throw new IllegalArgumentException("Unknown entity type [" + entity.getType() + "]"); - } - return remoteStore; - } - - private ActionListener getActionListener( - AbstractRemoteWritableBlobEntity remoteBlobStoreObject, - LatchedActionListener latchedActionListener + @Override + protected ActionListener getWrappedWriteListener( + String component, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, + ActionListener listener ) { return ActionListener.wrap( - resp -> latchedActionListener.onResponse(remoteBlobStoreObject.getUploadedMetadata()), - ex -> latchedActionListener.onFailure( - new RemoteStateTransferException("Upload failed for " + remoteBlobStoreObject.getType(), ex) - ) + resp -> listener.onResponse(remoteEntity.getUploadedMetadata()), + ex -> listener.onFailure(new RemoteStateTransferException("Upload failed for " + component, remoteEntity, ex)) ); } - CheckedRunnable getAsyncMetadataReadAction( - AbstractRemoteWritableBlobEntity readEntity, - String componentName, - LatchedActionListener listener + @Override + protected ActionListener getWrappedReadListener( + String component, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, + ActionListener listener ) { - ActionListener actionListener = ActionListener.wrap( - response -> listener.onResponse(new RemoteReadResult(response, readEntity.getType(), componentName)), - listener::onFailure + return ActionListener.wrap( + response -> listener.onResponse(new RemoteReadResult(response, remoteEntity.getType(), component)), + ex -> listener.onFailure(new RemoteStateTransferException("Download failed for " + component, remoteEntity, ex)) ); - return () -> getStore(readEntity).readAsync(readEntity, actionListener); } Metadata getGlobalMetadata(String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java index a84161b202a22..d1f08a7c2a33d 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java @@ -8,17 +8,16 @@ package org.opensearch.gateway.remote; -import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.CheckedRunnable; -import org.opensearch.common.remote.RemoteWritableEntityStore; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; +import org.opensearch.common.remote.AbstractRemoteWritableEntityManager; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteIndexMetadata; import org.opensearch.gateway.remote.model.RemoteReadResult; import org.opensearch.index.translog.transfer.BlobStoreTransferService; @@ -26,17 +25,14 @@ import org.opensearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.HashMap; import java.util.Locale; -import java.util.Map; -import java.util.Objects; /** * A Manager which provides APIs to write and read Index Metadata to remote store * * @opensearch.internal */ -public class RemoteIndexMetadataManager { +public class RemoteIndexMetadataManager extends AbstractRemoteWritableEntityManager { public static final TimeValue INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); @@ -48,7 +44,6 @@ public class RemoteIndexMetadataManager { Setting.Property.Deprecated ); - private final RemoteWritableEntityStore indexMetadataBlobStore; private final Compressor compressor; private final NamedXContentRegistry namedXContentRegistry; @@ -61,12 +56,16 @@ public RemoteIndexMetadataManager( BlobStoreTransferService blobStoreTransferService, ThreadPool threadpool ) { - this.indexMetadataBlobStore = new RemoteClusterStateBlobStore<>( - blobStoreTransferService, - blobStoreRepository, - clusterName, - threadpool, - ThreadPool.Names.REMOTE_STATE_READ + this.remoteWritableEntityStores.put( + RemoteIndexMetadata.INDEX, + new RemoteWriteableEntityBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN + ) ); this.namedXContentRegistry = blobStoreRepository.getNamedXContentRegistry(); this.compressor = blobStoreRepository.getCompressor(); @@ -74,45 +73,6 @@ public RemoteIndexMetadataManager( clusterSettings.addSettingsUpdateConsumer(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, this::setIndexMetadataUploadTimeout); } - /** - * Allows async Upload of IndexMetadata to remote - * - * @param indexMetadata {@link IndexMetadata} to upload - * @param latchedActionListener listener to respond back on after upload finishes - */ - CheckedRunnable getAsyncIndexMetadataWriteAction( - IndexMetadata indexMetadata, - String clusterUUID, - LatchedActionListener latchedActionListener - ) { - RemoteIndexMetadata remoteIndexMetadata = new RemoteIndexMetadata(indexMetadata, clusterUUID, compressor, namedXContentRegistry); - ActionListener completionListener = ActionListener.wrap( - resp -> latchedActionListener.onResponse(remoteIndexMetadata.getUploadedMetadata()), - ex -> latchedActionListener.onFailure(new RemoteStateTransferException(indexMetadata.getIndex().getName(), ex)) - ); - return () -> indexMetadataBlobStore.writeAsync(remoteIndexMetadata, completionListener); - } - - CheckedRunnable getAsyncIndexMetadataReadAction( - String clusterUUID, - String uploadedFilename, - LatchedActionListener latchedActionListener - ) { - RemoteIndexMetadata remoteIndexMetadata = new RemoteIndexMetadata( - RemoteClusterStateUtils.getFormattedIndexFileName(uploadedFilename), - clusterUUID, - compressor, - namedXContentRegistry - ); - ActionListener actionListener = ActionListener.wrap( - response -> latchedActionListener.onResponse( - new RemoteReadResult(response, RemoteIndexMetadata.INDEX, response.getIndex().getName()) - ), - latchedActionListener::onFailure - ); - return () -> indexMetadataBlobStore.readAsync(remoteIndexMetadata, actionListener); - } - /** * Fetch index metadata from remote cluster state * @@ -127,7 +87,7 @@ IndexMetadata getIndexMetadata(ClusterMetadataManifest.UploadedIndexMetadata upl namedXContentRegistry ); try { - return indexMetadataBlobStore.read(remoteIndexMetadata); + return (IndexMetadata) getStore(remoteIndexMetadata).read(remoteIndexMetadata); } catch (IOException e) { throw new IllegalStateException( String.format(Locale.ROOT, "Error while downloading IndexMetadata - %s", uploadedIndexMetadata.getUploadedFilename()), @@ -136,24 +96,6 @@ IndexMetadata getIndexMetadata(ClusterMetadataManifest.UploadedIndexMetadata upl } } - /** - * Fetch latest index metadata from remote cluster state - * - * @param clusterMetadataManifest manifest file of cluster - * @param clusterUUID uuid of cluster state to refer to in remote - * @return {@code Map} latest IndexUUID to IndexMetadata map - */ - Map getIndexMetadataMap(String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { - assert Objects.equals(clusterUUID, clusterMetadataManifest.getClusterUUID()) - : "Corrupt ClusterMetadataManifest found. Cluster UUID mismatch."; - Map remoteIndexMetadata = new HashMap<>(); - for (ClusterMetadataManifest.UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.getIndices()) { - IndexMetadata indexMetadata = getIndexMetadata(uploadedIndexMetadata, clusterUUID); - remoteIndexMetadata.put(uploadedIndexMetadata.getIndexUUID(), indexMetadata); - } - return remoteIndexMetadata; - } - public TimeValue getIndexMetadataUploadTimeout() { return this.indexMetadataUploadTimeout; } @@ -162,4 +104,27 @@ private void setIndexMetadataUploadTimeout(TimeValue newIndexMetadataUploadTimeo this.indexMetadataUploadTimeout = newIndexMetadataUploadTimeout; } + @Override + protected ActionListener getWrappedWriteListener( + String component, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, + ActionListener listener + ) { + return ActionListener.wrap( + resp -> listener.onResponse(remoteEntity.getUploadedMetadata()), + ex -> listener.onFailure(new RemoteStateTransferException("Upload failed for " + component, remoteEntity, ex)) + ); + } + + @Override + protected ActionListener getWrappedReadListener( + String component, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, + ActionListener listener + ) { + return ActionListener.wrap( + response -> listener.onResponse(new RemoteReadResult(response, RemoteIndexMetadata.INDEX, component)), + ex -> listener.onFailure(new RemoteStateTransferException("Download failed for " + component, remoteEntity, ex)) + ); + } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java index cb09de1a6ec44..0ccadd7dd18da 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java @@ -16,6 +16,7 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; @@ -23,7 +24,6 @@ import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest; -import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteClusterStateManifestInfo; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.translog.transfer.BlobStoreTransferService; @@ -62,7 +62,7 @@ public class RemoteManifestManager { private volatile TimeValue metadataManifestUploadTimeout; private final String nodeId; - private final RemoteClusterStateBlobStore manifestBlobStore; + private final RemoteWriteableEntityBlobStore manifestBlobStore; private final Compressor compressor; private final NamedXContentRegistry namedXContentRegistry; // todo remove blobStorerepo from here @@ -78,12 +78,13 @@ public class RemoteManifestManager { ) { this.metadataManifestUploadTimeout = clusterSettings.get(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING); this.nodeId = nodeId; - this.manifestBlobStore = new RemoteClusterStateBlobStore<>( + this.manifestBlobStore = new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ); ; clusterSettings.addSettingsUpdateConsumer(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, this::setMetadataManifestUploadTimeout); diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java index 36d107a99d258..1e7f8f278fb0f 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java @@ -20,15 +20,18 @@ public class RemotePersistenceStats extends PersistedStateStats { static final String CLEANUP_ATTEMPT_FAILED_COUNT = "cleanup_attempt_failed_count"; static final String INDEX_ROUTING_FILES_CLEANUP_ATTEMPT_FAILED_COUNT = "index_routing_files_cleanup_attempt_failed_count"; + static final String INDICES_ROUTING_DIFF_FILES_CLEANUP_ATTEMPT_FAILED_COUNT = "indices_routing_diff_files_cleanup_attempt_failed_count"; static final String REMOTE_UPLOAD = "remote_upload"; private AtomicLong cleanupAttemptFailedCount = new AtomicLong(0); private AtomicLong indexRoutingFilesCleanupAttemptFailedCount = new AtomicLong(0); + private AtomicLong indicesRoutingDiffFilesCleanupAttemptFailedCount = new AtomicLong(0); public RemotePersistenceStats() { super(REMOTE_UPLOAD); addToExtendedFields(CLEANUP_ATTEMPT_FAILED_COUNT, cleanupAttemptFailedCount); addToExtendedFields(INDEX_ROUTING_FILES_CLEANUP_ATTEMPT_FAILED_COUNT, indexRoutingFilesCleanupAttemptFailedCount); + addToExtendedFields(INDICES_ROUTING_DIFF_FILES_CLEANUP_ATTEMPT_FAILED_COUNT, indicesRoutingDiffFilesCleanupAttemptFailedCount); } public void cleanUpAttemptFailed() { @@ -46,4 +49,12 @@ public void indexRoutingFilesCleanupAttemptFailed() { public long getIndexRoutingFilesCleanupAttemptFailedCount() { return indexRoutingFilesCleanupAttemptFailedCount.get(); } + + public void indicesRoutingDiffFileCleanupAttemptFailed() { + indicesRoutingDiffFilesCleanupAttemptFailedCount.incrementAndGet(); + } + + public long getIndicesRoutingDiffFileCleanupAttemptFailedCount() { + return indicesRoutingDiffFilesCleanupAttemptFailedCount.get(); + } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java index 9c5fbd5941640..101daaa143a66 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; @@ -29,7 +29,7 @@ /** * Wrapper class for uploading/downloading {@link ClusterBlocks} to/from remote blob store */ -public class RemoteClusterBlocks extends AbstractRemoteWritableBlobEntity { +public class RemoteClusterBlocks extends AbstractClusterMetadataWriteableBlobEntity { public static final String CLUSTER_BLOCKS = "blocks"; public static final ChecksumWritableBlobStoreFormat CLUSTER_BLOCKS_FORMAT = new ChecksumWritableBlobStoreFormat<>( diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java index 1dc56712d4ab5..5f79b690af574 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java @@ -9,7 +9,7 @@ package org.opensearch.gateway.remote.model; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -29,13 +29,13 @@ /** * Wrapper class for uploading/downloading {@link ClusterMetadataManifest} to/from remote blob store */ -public class RemoteClusterMetadataManifest extends AbstractRemoteWritableBlobEntity { +public class RemoteClusterMetadataManifest extends AbstractClusterMetadataWriteableBlobEntity { public static final String MANIFEST = "manifest"; public static final int SPLITTED_MANIFEST_FILE_LENGTH = 6; public static final String METADATA_MANIFEST_NAME_FORMAT = "%s"; - public static final int MANIFEST_CURRENT_CODEC_VERSION = ClusterMetadataManifest.CODEC_V2; + public static final int MANIFEST_CURRENT_CODEC_VERSION = ClusterMetadataManifest.CODEC_V3; public static final String COMMITTED = "C"; public static final String PUBLISHED = "P"; @@ -50,6 +50,9 @@ public class RemoteClusterMetadataManifest extends AbstractRemoteWritableBlobEnt public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT_V1 = new ChecksumBlobStoreFormat<>("cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContentV1); + public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT_V2 = + new ChecksumBlobStoreFormat<>("cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContentV2); + /** * Manifest format compatible with codec v2, where we introduced codec versions/global metadata. */ @@ -149,6 +152,8 @@ private ChecksumBlobStoreFormat getClusterMetadataManif long codecVersion = getManifestCodecVersion(); if (codecVersion == MANIFEST_CURRENT_CODEC_VERSION) { return CLUSTER_METADATA_MANIFEST_FORMAT; + } else if (codecVersion == ClusterMetadataManifest.CODEC_V2) { + return CLUSTER_METADATA_MANIFEST_FORMAT_V2; } else if (codecVersion == ClusterMetadataManifest.CODEC_V1) { return CLUSTER_METADATA_MANIFEST_FORMAT_V1; } else if (codecVersion == ClusterMetadataManifest.CODEC_V0) { diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java index f384908bc6b65..e5e44525520f4 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java @@ -11,7 +11,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterState.Custom; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; @@ -32,14 +32,14 @@ /** * Wrapper class for uploading/downloading {@link Custom} to/from remote blob store */ -public class RemoteClusterStateCustoms extends AbstractRemoteWritableBlobEntity { +public class RemoteClusterStateCustoms extends AbstractClusterMetadataWriteableBlobEntity { public static final String CLUSTER_STATE_CUSTOM = "cluster-state-custom"; + public final ChecksumWritableBlobStoreFormat clusterStateCustomsFormat; private long stateVersion; private final String customType; private ClusterState.Custom custom; private final NamedWriteableRegistry namedWriteableRegistry; - private final ChecksumWritableBlobStoreFormat clusterStateCustomsFormat; public RemoteClusterStateCustoms( final ClusterState.Custom custom, diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java index a90721ab59f66..63cc96e3e02c4 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -31,7 +31,7 @@ /** * Wrapper class for uploading/downloading {@link CoordinationMetadata} to/from remote blob store */ -public class RemoteCoordinationMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteCoordinationMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String COORDINATION_METADATA = "coordination"; public static final ChecksumBlobStoreFormat COORDINATION_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java index 4c7069ee8be9e..8e850e903954a 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java @@ -10,8 +10,9 @@ import org.opensearch.cluster.metadata.Metadata.Custom; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.compress.Compressor; @@ -31,7 +32,7 @@ /** * Wrapper class for uploading/downloading {@link Custom} to/from remote blob store */ -public class RemoteCustomMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteCustomMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String CUSTOM_METADATA = "custom"; public static final String CUSTOM_DELIMITER = "--"; @@ -122,6 +123,8 @@ public UploadedMetadata getUploadedMetadata() { public static Custom readFrom(StreamInput streamInput, NamedWriteableRegistry namedWriteableRegistry, String customType) throws IOException { - return namedWriteableRegistry.getReader(Custom.class, customType).read(streamInput); + try (StreamInput in = new NamedWriteableAwareStreamInput(streamInput, namedWriteableRegistry)) { + return namedWriteableRegistry.getReader(Custom.class, customType).read(in); + } } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java index fb399e2899cdd..446207a767009 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; @@ -29,7 +29,7 @@ /** * Wrapper class for uploading/downloading {@link DiscoveryNodes} to/from remote blob store */ -public class RemoteDiscoveryNodes extends AbstractRemoteWritableBlobEntity { +public class RemoteDiscoveryNodes extends AbstractClusterMetadataWriteableBlobEntity { public static final String DISCOVERY_NODES = "nodes"; public static final ChecksumWritableBlobStoreFormat DISCOVERY_NODES_FORMAT = new ChecksumWritableBlobStoreFormat<>( diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java index 09f07de0d5c24..0082f873f8dba 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -25,7 +25,7 @@ /** * Wrapper class for uploading/downloading global metadata ({@link Metadata}) to/from remote blob store */ -public class RemoteGlobalMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteGlobalMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String GLOBAL_METADATA = "global_metadata"; public static final ChecksumBlobStoreFormat GLOBAL_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java index 1debf75cdfec9..dee48237e5c4c 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.metadata.DiffableStringMap; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest; @@ -28,7 +28,7 @@ /** * Wrapper class for uploading/downloading {@link DiffableStringMap} to/from remote blob store */ -public class RemoteHashesOfConsistentSettings extends AbstractRemoteWritableBlobEntity { +public class RemoteHashesOfConsistentSettings extends AbstractClusterMetadataWriteableBlobEntity { public static final String HASHES_OF_CONSISTENT_SETTINGS = "hashes-of-consistent-settings"; public static final ChecksumWritableBlobStoreFormat HASHES_OF_CONSISTENT_SETTINGS_FORMAT = new ChecksumWritableBlobStoreFormat<>("hashes-of-consistent-settings", DiffableStringMap::readFrom); diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java index 830b09b92e2cb..5308f92c633b1 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -29,7 +29,7 @@ /** * Wrapper class for uploading/downloading {@link IndexMetadata} to/from remote blob store */ -public class RemoteIndexMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteIndexMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 2; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java index 9ee3db8f289e3..81042f289254c 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java @@ -9,7 +9,7 @@ package org.opensearch.gateway.remote.model; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.common.settings.Settings; import org.opensearch.core.compress.Compressor; @@ -31,7 +31,7 @@ /** * Wrapper class for uploading/downloading persistent {@link Settings} to/from remote blob store */ -public class RemotePersistentSettingsMetadata extends AbstractRemoteWritableBlobEntity { +public class RemotePersistentSettingsMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String SETTING_METADATA = "settings"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemotePinnedTimestamps.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemotePinnedTimestamps.java new file mode 100644 index 0000000000000..030491cf8b7b9 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemotePinnedTimestamps.java @@ -0,0 +1,144 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.common.remote.RemoteWriteableBlobEntity; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.compress.Compressor; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; + +/** + * Wrapper class for uploading/downloading {@link RemotePinnedTimestamps} to/from remote blob store + * + * @opensearch.internal + */ +public class RemotePinnedTimestamps extends RemoteWriteableBlobEntity { + private static final Logger logger = LogManager.getLogger(RemotePinnedTimestamps.class); + + /** + * Represents a collection of pinned timestamps and their associated pinning entities. + * This class is thread-safe and implements the Writeable interface for serialization. + */ + public static class PinnedTimestamps implements Writeable { + private final Map> pinnedTimestampPinningEntityMap; + + public PinnedTimestamps(Map> pinnedTimestampPinningEntityMap) { + this.pinnedTimestampPinningEntityMap = new ConcurrentHashMap<>(pinnedTimestampPinningEntityMap); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(pinnedTimestampPinningEntityMap, StreamOutput::writeLong, StreamOutput::writeStringCollection); + } + + public static PinnedTimestamps readFrom(StreamInput in) throws IOException { + return new PinnedTimestamps(in.readMap(StreamInput::readLong, StreamInput::readStringList)); + } + + /** + * Pins a timestamp against a pinning entity. + * + * @param timestamp The timestamp to pin. + * @param pinningEntity The entity pinning the timestamp. + */ + public void pin(Long timestamp, String pinningEntity) { + logger.debug("Pinning timestamp = {} against entity = {}", timestamp, pinningEntity); + pinnedTimestampPinningEntityMap.computeIfAbsent(timestamp, k -> new ArrayList<>()).add(pinningEntity); + } + + /** + * Unpins a timestamp for a specific pinning entity. + * + * @param timestamp The timestamp to unpin. + * @param pinningEntity The entity unpinning the timestamp. + */ + public void unpin(Long timestamp, String pinningEntity) { + logger.debug("Unpinning timestamp = {} against entity = {}", timestamp, pinningEntity); + if (pinnedTimestampPinningEntityMap.containsKey(timestamp) == false + || pinnedTimestampPinningEntityMap.get(timestamp).contains(pinningEntity) == false) { + logger.warn("Timestamp: {} is not pinned by entity: {}", timestamp, pinningEntity); + } + pinnedTimestampPinningEntityMap.compute(timestamp, (k, v) -> { + v.remove(pinningEntity); + return v.isEmpty() ? null : v; + }); + } + + public Map> getPinnedTimestampPinningEntityMap() { + return new HashMap<>(pinnedTimestampPinningEntityMap); + } + } + + public static final String PINNED_TIMESTAMPS = "pinned_timestamps"; + public static final ChecksumWritableBlobStoreFormat PINNED_TIMESTAMPS_FORMAT = new ChecksumWritableBlobStoreFormat<>( + PINNED_TIMESTAMPS, + PinnedTimestamps::readFrom + ); + + private PinnedTimestamps pinnedTimestamps; + + public RemotePinnedTimestamps(String clusterUUID, Compressor compressor) { + super(clusterUUID, compressor); + pinnedTimestamps = new PinnedTimestamps(new HashMap<>()); + } + + @Override + public BlobPathParameters getBlobPathParameters() { + return new BlobPathParameters(List.of(PINNED_TIMESTAMPS), PINNED_TIMESTAMPS); + } + + @Override + public String getType() { + return PINNED_TIMESTAMPS; + } + + @Override + public String generateBlobFileName() { + return this.blobFileName = String.join(DELIMITER, PINNED_TIMESTAMPS, RemoteStoreUtils.invertLong(System.currentTimeMillis())); + } + + @Override + public InputStream serialize() throws IOException { + return PINNED_TIMESTAMPS_FORMAT.serialize(pinnedTimestamps, generateBlobFileName(), getCompressor()).streamInput(); + } + + @Override + public PinnedTimestamps deserialize(InputStream inputStream) throws IOException { + return PINNED_TIMESTAMPS_FORMAT.deserialize(blobName, Streams.readFully(inputStream)); + } + + public void setBlobFileName(String blobFileName) { + this.blobFileName = blobFileName; + } + + public void setPinnedTimestamps(PinnedTimestamps pinnedTimestamps) { + this.pinnedTimestamps = pinnedTimestamps; + } + + public PinnedTimestamps getPinnedTimestamps() { + return pinnedTimestamps; + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java new file mode 100644 index 0000000000000..1a28c97dc8a77 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java @@ -0,0 +1,118 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; +import org.opensearch.common.remote.RemoteWriteableBlobEntity; +import org.opensearch.common.remote.RemoteWriteableEntity; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; +import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.index.remote.RemoteStorePathStrategy; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_TABLE; + +/** + * Extends the RemoteClusterStateBlobStore to support {@link RemoteIndexRoutingTable} + * + * @param which can be uploaded to / downloaded from blob store + * @param The concrete class implementing {@link RemoteWriteableEntity} which is used as a wrapper for IndexRoutingTable entity. + */ +public class RemoteRoutingTableBlobStore> extends + RemoteWriteableEntityBlobStore { + + /** + * This setting is used to set the remote routing table store blob store path type strategy. + */ + public static final Setting REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING = new Setting<>( + "cluster.remote_store.routing_table.path_type", + RemoteStoreEnums.PathType.HASHED_PREFIX.toString(), + RemoteStoreEnums.PathType::parseString, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + /** + * This setting is used to set the remote routing table store blob store path hash algorithm strategy. + * This setting will come to effect if the {@link #REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING} + * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. + */ + public static final Setting REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING = new Setting<>( + "cluster.remote_store.routing_table.path_hash_algo", + RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64.toString(), + RemoteStoreEnums.PathHashAlgorithm::parseString, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private RemoteStoreEnums.PathType pathType; + private RemoteStoreEnums.PathHashAlgorithm pathHashAlgo; + + public RemoteRoutingTableBlobStore( + BlobStoreTransferService blobStoreTransferService, + BlobStoreRepository blobStoreRepository, + String clusterName, + ThreadPool threadPool, + String executor, + ClusterSettings clusterSettings + ) { + super( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadPool, + executor, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN + ); + this.pathType = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING); + this.pathHashAlgo = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING); + clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING, this::setPathTypeSetting); + clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING, this::setPathHashAlgoSetting); + } + + @Override + public BlobPath getBlobPathForUpload(final RemoteWriteableBlobEntity obj) { + assert obj.getBlobPathParameters().getPathTokens().size() == 1 : "Unexpected tokens in RemoteRoutingTableObject"; + BlobPath indexRoutingPath = getBlobPathPrefix(obj.clusterUUID()).add(INDEX_ROUTING_TABLE); + + BlobPath path = pathType.path( + RemoteStorePathStrategy.PathInput.builder() + .basePath(indexRoutingPath) + .indexUUID(String.join("", obj.getBlobPathParameters().getPathTokens())) + .build(), + pathHashAlgo + ); + return path; + } + + private void setPathTypeSetting(RemoteStoreEnums.PathType pathType) { + this.pathType = pathType; + } + + private void setPathHashAlgoSetting(RemoteStoreEnums.PathHashAlgorithm pathHashAlgo) { + this.pathHashAlgo = pathHashAlgo; + } + + // For testing only + protected RemoteStoreEnums.PathType getPathTypeSetting() { + return pathType; + } + + // For testing only + protected RemoteStoreEnums.PathHashAlgorithm getPathHashAlgoSetting() { + return pathHashAlgo; + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteStorePinnedTimestampsBlobStore.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteStorePinnedTimestampsBlobStore.java new file mode 100644 index 0000000000000..2a65dd993d0af --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteStorePinnedTimestampsBlobStore.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.remote.RemoteWriteableBlobEntity; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +/** + * Extends the RemoteClusterStateBlobStore to support {@link RemotePinnedTimestamps} + */ +public class RemoteStorePinnedTimestampsBlobStore extends RemoteWriteableEntityBlobStore< + RemotePinnedTimestamps.PinnedTimestamps, + RemotePinnedTimestamps> { + + public static final String PINNED_TIMESTAMPS_PATH_TOKEN = "pinned_timestamps"; + private final BlobStoreRepository blobStoreRepository; + + public RemoteStorePinnedTimestampsBlobStore( + BlobStoreTransferService blobStoreTransferService, + BlobStoreRepository blobStoreRepository, + String clusterName, + ThreadPool threadPool, + String executor + ) { + super(blobStoreTransferService, blobStoreRepository, clusterName, threadPool, executor, PINNED_TIMESTAMPS_PATH_TOKEN); + this.blobStoreRepository = blobStoreRepository; + } + + @Override + public BlobPath getBlobPathForUpload(final RemoteWriteableBlobEntity obj) { + return blobStoreRepository.basePath().add(PINNED_TIMESTAMPS_PATH_TOKEN); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java index 4513d35aef5e8..6ae8a533f9a21 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.metadata.TemplatesMetadata; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -31,7 +31,7 @@ /** * Wrapper class for uploading/downloading {@link TemplatesMetadata} to/from remote blob store */ -public class RemoteTemplatesMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteTemplatesMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String TEMPLATES_METADATA = "templates"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java index fd0526f05d015..d4f3837f80084 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java @@ -9,7 +9,7 @@ package org.opensearch.gateway.remote.model; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.common.settings.Settings; import org.opensearch.core.compress.Compressor; @@ -32,7 +32,7 @@ /** * Wrapper class for uploading/downloading transient {@link Settings} to/from remote blob store */ -public class RemoteTransientSettingsMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteTransientSettingsMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String TRANSIENT_SETTING_METADATA = "transient-settings"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeader.java b/server/src/main/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeader.java deleted file mode 100644 index 5baea6adba0c7..0000000000000 --- a/server/src/main/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeader.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.gateway.remote.routingtable; - -import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.index.CorruptIndexException; -import org.apache.lucene.index.IndexFormatTooNewException; -import org.apache.lucene.index.IndexFormatTooOldException; -import org.apache.lucene.store.InputStreamDataInput; -import org.apache.lucene.store.OutputStreamDataOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; - -import java.io.EOFException; -import java.io.IOException; - -/** - * The stored header information for the individual index routing table - */ -public class IndexRoutingTableHeader implements Writeable { - - public static final String INDEX_ROUTING_HEADER_CODEC = "index_routing_header_codec"; - public static final int INITIAL_VERSION = 1; - public static final int CURRENT_VERSION = INITIAL_VERSION; - private final String indexName; - - public IndexRoutingTableHeader(String indexName) { - this.indexName = indexName; - } - - /** - * Reads the contents on the stream into the corresponding {@link IndexRoutingTableHeader} - * - * @param in streamInput - * @throws IOException exception thrown on failing to read from stream. - */ - public IndexRoutingTableHeader(StreamInput in) throws IOException { - try { - readHeaderVersion(in); - indexName = in.readString(); - } catch (EOFException e) { - throw new IOException("index routing header truncated", e); - } - } - - private void readHeaderVersion(final StreamInput in) throws IOException { - try { - CodecUtil.checkHeader(new InputStreamDataInput(in), INDEX_ROUTING_HEADER_CODEC, INITIAL_VERSION, CURRENT_VERSION); - } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) { - throw new IOException("index routing table header corrupted", e); - } - } - - /** - * Write the IndexRoutingTable to given stream. - * - * @param out stream to write - * @throws IOException exception thrown on failing to write to stream. - */ - public void writeTo(StreamOutput out) throws IOException { - try { - CodecUtil.writeHeader(new OutputStreamDataOutput(out), INDEX_ROUTING_HEADER_CODEC, CURRENT_VERSION); - out.writeString(indexName); - out.flush(); - } catch (IOException e) { - throw new IOException("Failed to write IndexRoutingTable header", e); - } - } - - public String getIndexName() { - return indexName; - } - -} diff --git a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java index 17c55190da07f..46c5074c48eb8 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java @@ -9,92 +9,106 @@ package org.opensearch.gateway.remote.routingtable; import org.opensearch.cluster.routing.IndexRoutingTable; -import org.opensearch.cluster.routing.IndexShardRoutingTable; -import org.opensearch.core.common.io.stream.BufferedChecksumStreamInput; -import org.opensearch.core.common.io.stream.BufferedChecksumStreamOutput; -import org.opensearch.core.common.io.stream.InputStreamStreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.compress.Compressor; import org.opensearch.core.index.Index; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; -import java.io.EOFException; import java.io.IOException; import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; /** * Remote store object for IndexRoutingTable */ -public class RemoteIndexRoutingTable implements Writeable { +public class RemoteIndexRoutingTable extends AbstractClusterMetadataWriteableBlobEntity { - private final IndexRoutingTable indexRoutingTable; + public static final String INDEX_ROUTING_TABLE = "index-routing"; + public static final String INDEX_ROUTING_METADATA_PREFIX = "indexRouting--"; + public static final String INDEX_ROUTING_FILE = "index_routing"; + private IndexRoutingTable indexRoutingTable; + private final Index index; + private long term; + private long version; + private BlobPathParameters blobPathParameters; + public static final ChecksumWritableBlobStoreFormat INDEX_ROUTING_TABLE_FORMAT = + new ChecksumWritableBlobStoreFormat<>("index-routing-table", IndexRoutingTable::readFrom); - public RemoteIndexRoutingTable(IndexRoutingTable indexRoutingTable) { + public RemoteIndexRoutingTable( + IndexRoutingTable indexRoutingTable, + String clusterUUID, + Compressor compressor, + long term, + long version + ) { + super(clusterUUID, compressor); + this.index = indexRoutingTable.getIndex(); this.indexRoutingTable = indexRoutingTable; + this.term = term; + this.version = version; } /** * Reads data from inputStream and creates RemoteIndexRoutingTable object with the {@link IndexRoutingTable} - * @param inputStream input stream with index routing data - * @param index index for the current routing data - * @throws IOException exception thrown on failing to read from stream. + * @param blobName name of the blob, which contains the index routing data + * @param clusterUUID UUID of the cluster + * @param compressor Compressor object */ - public RemoteIndexRoutingTable(InputStream inputStream, Index index) throws IOException { - try { - try (BufferedChecksumStreamInput in = new BufferedChecksumStreamInput(new InputStreamStreamInput(inputStream), "assertion")) { - // Read the Table Header first and confirm the index - IndexRoutingTableHeader indexRoutingTableHeader = new IndexRoutingTableHeader(in); - assert indexRoutingTableHeader.getIndexName().equals(index.getName()); + public RemoteIndexRoutingTable(String blobName, String clusterUUID, Compressor compressor) { + super(clusterUUID, compressor); + this.index = null; + this.term = -1; + this.version = -1; + this.blobName = blobName; + } - int numberOfShardRouting = in.readVInt(); - IndexRoutingTable.Builder indicesRoutingTable = IndexRoutingTable.builder(index); - for (int idx = 0; idx < numberOfShardRouting; idx++) { - IndexShardRoutingTable indexShardRoutingTable = IndexShardRoutingTable.Builder.readFrom(in); - indicesRoutingTable.addIndexShard(indexShardRoutingTable); - } - verifyCheckSum(in); - indexRoutingTable = indicesRoutingTable.build(); - } - } catch (EOFException e) { - throw new IOException("Indices Routing table is corrupted", e); + @Override + public BlobPathParameters getBlobPathParameters() { + if (blobPathParameters == null) { + blobPathParameters = new BlobPathParameters(List.of(indexRoutingTable.getIndex().getUUID()), INDEX_ROUTING_FILE); } + return blobPathParameters; } - public IndexRoutingTable getIndexRoutingTable() { - return indexRoutingTable; + @Override + public String getType() { + return INDEX_ROUTING_TABLE; } - /** - * Writes {@link IndexRoutingTable} to the given stream - * @param streamOutput output stream to write - * @throws IOException exception thrown on failing to write to stream. - */ @Override - public void writeTo(StreamOutput streamOutput) throws IOException { - try { - BufferedChecksumStreamOutput out = new BufferedChecksumStreamOutput(streamOutput); - IndexRoutingTableHeader indexRoutingTableHeader = new IndexRoutingTableHeader(indexRoutingTable.getIndex().getName()); - indexRoutingTableHeader.writeTo(out); - out.writeVInt(indexRoutingTable.shards().size()); - for (IndexShardRoutingTable next : indexRoutingTable) { - IndexShardRoutingTable.Builder.writeTo(next, out); - } - out.writeLong(out.getChecksum()); - out.flush(); - } catch (IOException e) { - throw new IOException("Failed to write IndexRoutingTable to stream", e); + public String generateBlobFileName() { + if (blobFileName == null) { + blobFileName = String.join( + DELIMITER, + getBlobPathParameters().getFilePrefix(), + RemoteStoreUtils.invertLong(term), + RemoteStoreUtils.invertLong(version), + RemoteStoreUtils.invertLong(System.currentTimeMillis()) + ); } + return blobFileName; } - private void verifyCheckSum(BufferedChecksumStreamInput in) throws IOException { - long expectedChecksum = in.getChecksum(); - long readChecksum = in.readLong(); - if (readChecksum != expectedChecksum) { - throw new IOException( - "checksum verification failed - expected: 0x" - + Long.toHexString(expectedChecksum) - + ", got: 0x" - + Long.toHexString(readChecksum) - ); - } + @Override + public ClusterMetadataManifest.UploadedMetadata getUploadedMetadata() { + assert blobName != null; + assert index != null; + return new ClusterMetadataManifest.UploadedIndexMetadata(index.getName(), index.getUUID(), blobName, INDEX_ROUTING_METADATA_PREFIX); + } + + @Override + public InputStream serialize() throws IOException { + return INDEX_ROUTING_TABLE_FORMAT.serialize(indexRoutingTable, generateBlobFileName(), getCompressor()).streamInput(); + } + + @Override + public IndexRoutingTable deserialize(InputStream in) throws IOException { + return INDEX_ROUTING_TABLE_FORMAT.deserialize(blobName, Streams.readFully(in)); } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java new file mode 100644 index 0000000000000..b3e0e9e5763b7 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java @@ -0,0 +1,138 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.routingtable; + +import org.opensearch.cluster.Diff; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.RoutingTableIncrementalDiff; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.compress.Compressor; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; + +/** + * Represents a incremental difference between {@link org.opensearch.cluster.routing.RoutingTable} objects that can be serialized and deserialized. + * This class is responsible for writing and reading the differences between RoutingTables to and from an input/output stream. + */ +public class RemoteRoutingTableDiff extends AbstractClusterMetadataWriteableBlobEntity> { + + private final RoutingTableIncrementalDiff routingTableIncrementalDiff; + + private long term; + private long version; + + public static final String ROUTING_TABLE_DIFF = "routing-table-diff"; + + public static final String ROUTING_TABLE_DIFF_METADATA_PREFIX = "routingTableDiff--"; + + public static final String ROUTING_TABLE_DIFF_FILE = "routing_table_diff"; + private static final String codec = "RemoteRoutingTableDiff"; + public static final String ROUTING_TABLE_DIFF_PATH_TOKEN = "routing-table-diff"; + + public static final int VERSION = 1; + + public static final ChecksumWritableBlobStoreFormat REMOTE_ROUTING_TABLE_DIFF_FORMAT = + new ChecksumWritableBlobStoreFormat<>(codec, RoutingTableIncrementalDiff::readFrom); + + /** + * Constructs a new RemoteRoutingTableDiff with the given differences. + * + * @param routingTableIncrementalDiff a RoutingTableIncrementalDiff object containing the differences of {@link IndexRoutingTable}. + * @param clusterUUID the cluster UUID. + * @param compressor the compressor to be used. + * @param term the term of the routing table. + * @param version the version of the routing table. + */ + public RemoteRoutingTableDiff( + RoutingTableIncrementalDiff routingTableIncrementalDiff, + String clusterUUID, + Compressor compressor, + long term, + long version + ) { + super(clusterUUID, compressor); + this.routingTableIncrementalDiff = routingTableIncrementalDiff; + this.term = term; + this.version = version; + } + + /** + * Constructs a new RemoteIndexRoutingTableDiff with the given blob name, cluster UUID, and compressor. + * + * @param blobName the name of the blob. + * @param clusterUUID the cluster UUID. + * @param compressor the compressor to be used. + */ + public RemoteRoutingTableDiff(String blobName, String clusterUUID, Compressor compressor) { + super(clusterUUID, compressor); + this.routingTableIncrementalDiff = null; + this.blobName = blobName; + } + + /** + * Gets the map of differences of {@link IndexRoutingTable}. + * + * @return a map containing the differences. + */ + public Diff getDiffs() { + return routingTableIncrementalDiff; + } + + @Override + public BlobPathParameters getBlobPathParameters() { + return new BlobPathParameters(List.of(ROUTING_TABLE_DIFF_PATH_TOKEN), ROUTING_TABLE_DIFF_METADATA_PREFIX); + } + + @Override + public String getType() { + return ROUTING_TABLE_DIFF; + } + + @Override + public String generateBlobFileName() { + if (blobFileName == null) { + blobFileName = String.join( + DELIMITER, + getBlobPathParameters().getFilePrefix(), + RemoteStoreUtils.invertLong(term), + RemoteStoreUtils.invertLong(version), + RemoteStoreUtils.invertLong(System.currentTimeMillis()) + ); + } + return blobFileName; + } + + @Override + public ClusterMetadataManifest.UploadedMetadata getUploadedMetadata() { + assert blobName != null; + return new ClusterMetadataManifest.UploadedMetadataAttribute(ROUTING_TABLE_DIFF_FILE, blobName); + } + + @Override + public InputStream serialize() throws IOException { + assert routingTableIncrementalDiff != null; + return REMOTE_ROUTING_TABLE_DIFF_FORMAT.serialize(routingTableIncrementalDiff, generateBlobFileName(), getCompressor()) + .streamInput(); + } + + @Override + public Diff deserialize(InputStream in) throws IOException { + return REMOTE_ROUTING_TABLE_DIFF_FORMAT.deserialize(blobName, Streams.readFully(in)); + } +} diff --git a/server/src/main/java/org/opensearch/http/HttpChannel.java b/server/src/main/java/org/opensearch/http/HttpChannel.java index ed20ec89a9099..7048f08faff9f 100644 --- a/server/src/main/java/org/opensearch/http/HttpChannel.java +++ b/server/src/main/java/org/opensearch/http/HttpChannel.java @@ -77,7 +77,7 @@ default void handleException(Exception ex) {} /** * Returns the contextual property associated with this specific HTTP channel (the - * implementation of how such properties are managed depends on the the particular + * implementation of how such properties are managed depends on the particular * transport engine). * * @param name the name of the property diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 4c494a6b35153..dc1bf94662385 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -48,6 +48,7 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.SetOnce; import org.opensearch.common.TriFunction; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Setting; @@ -66,6 +67,7 @@ import org.opensearch.index.cache.query.DisabledQueryCache; import org.opensearch.index.cache.query.IndexQueryCache; import org.opensearch.index.cache.query.QueryCache; +import org.opensearch.index.compositeindex.CompositeIndexSettings; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineConfigFactory; import org.opensearch.index.engine.EngineFactory; @@ -95,8 +97,8 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -107,8 +109,6 @@ import java.util.function.Function; import java.util.function.Supplier; -import static org.apache.logging.log4j.util.Strings.toRootUpperCase; - /** * IndexModule represents the central extension point for index level custom implementations like: *