From eeff6184519e5479eebacabfaa23cb7bbc01dcdc Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 19 Feb 2024 21:50:16 +0100 Subject: [PATCH 01/49] Save allocations and copying in TimeSeriesIdFieldMapper#buildTsidHash (#105582) No point in copying the bytes multiple times here. Just presize the array correctly (at most wasting a single byte) and serialize into it. Saving a couple GB of allocations during the TSDB rally track indexing step. --- .../common/io/stream/StreamOutput.java | 2 +- .../index/mapper/TimeSeriesIdFieldMapper.java | 25 +++++++++++++------ 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index b67879510b108..69a5135215eba 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -216,7 +216,7 @@ public void writeVInt(int i) throws IOException { writeBytes(buffer, 0, index); } - private static int putVInt(byte[] buffer, int i, int off) { + public static int putVInt(byte[] buffer, int i, int off) { if (Integer.numberOfLeadingZeros(i) >= 25) { buffer[off] = (byte) i; return 1; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java index fb26debab2acc..1ee7caff497ad 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.core.Nullable; @@ -208,6 +209,12 @@ public BytesReference buildLegacyTsid() throws IOException { } } + private static final int MAX_HASH_LEN_BYTES = 2; + + static { + assert MAX_HASH_LEN_BYTES == StreamOutput.putVInt(new byte[2], tsidHashLen(MAX_DIMENSIONS), 0); + } + /** * Here we build the hash of the tsid using a similarity function so that we have a result * with the following pattern: @@ -219,11 +226,13 @@ public BytesReference buildLegacyTsid() throws IOException { * The idea is to be able to place 'similar' time series close to each other. Two time series * are considered 'similar' if they share the same dimensions (names and values). */ - public BytesReference buildTsidHash() throws IOException { + public BytesReference buildTsidHash() { // NOTE: hash all dimension field names int numberOfDimensions = Math.min(MAX_DIMENSIONS, dimensions.size()); - int tsidHashIndex = 0; - byte[] tsidHash = new byte[16 + 16 + 4 * numberOfDimensions]; + int len = tsidHashLen(numberOfDimensions); + // either one or two bytes are occupied by the vint since we're bounded by #MAX_DIMENSIONS + byte[] tsidHash = new byte[MAX_HASH_LEN_BYTES + len]; + int tsidHashIndex = StreamOutput.putVInt(tsidHash, len, 0); tsidHasher.reset(); for (final Dimension dimension : dimensions) { @@ -258,11 +267,11 @@ public BytesReference buildTsidHash() throws IOException { } tsidHashIndex = writeHash128(tsidHasher.digestHash(), tsidHash, tsidHashIndex); - assert tsidHashIndex == tsidHash.length; - try (BytesStreamOutput out = new BytesStreamOutput(tsidHash.length)) { - out.writeBytesRef(new BytesRef(tsidHash, 0, tsidHash.length)); - return out.bytes(); - } + return new BytesArray(tsidHash, 0, tsidHashIndex); + } + + private static int tsidHashLen(int numberOfDimensions) { + return 16 + 16 + 4 * numberOfDimensions; } private int writeHash128(final MurmurHash3.Hash128 hash128, byte[] buffer, int tsidHashIndex) { From 67e36f176c7c1802be113b1287e81b9dfce63fef Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 19 Feb 2024 21:52:54 +0100 Subject: [PATCH 02/49] Speedup deserialization in InternalAggregations (#105596) We can use the faster list reader here because the category is a constant. --- .../elasticsearch/search/aggregations/InternalAggregations.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java index 9467c5b40e83e..b65f6b01de348 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java @@ -165,7 +165,7 @@ public static InternalAggregations from(List aggregations) } public static InternalAggregations readFrom(StreamInput in) throws IOException { - return from(in.readCollectionAsList(stream -> stream.readNamedWriteable(InternalAggregation.class))); + return from(in.readNamedWriteableCollectionAsList(InternalAggregation.class)); } @Override From 61264682f5c05bb97559c60d34ee45ff044637be Mon Sep 17 00:00:00 2001 From: Felix Barnsteiner Date: Tue, 20 Feb 2024 07:57:44 +0100 Subject: [PATCH 03/49] Fix parsing of flattened fields within subobjects: false (#105373) --- docs/changelog/105373.yaml | 5 ++ .../index/mapper/DocumentParser.java | 3 +- .../index/mapper/DocumentParserTests.java | 33 +++++++++++++ .../index/mapper/DynamicTemplatesTests.java | 47 +++++++++++++++++++ 4 files changed, 87 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/105373.yaml diff --git a/docs/changelog/105373.yaml b/docs/changelog/105373.yaml new file mode 100644 index 0000000000000..f9d3c718f7ae3 --- /dev/null +++ b/docs/changelog/105373.yaml @@ -0,0 +1,5 @@ +pr: 105373 +summary: "Fix parsing of flattened fields within subobjects: false" +area: Mapping +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 58ccd6025013f..9a0e391102708 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -455,11 +455,12 @@ private static void parseObject(final DocumentParserContext context, String curr private static void doParseObject(DocumentParserContext context, String currentFieldName, Mapper objectMapper) throws IOException { context.path().add(currentFieldName); + boolean withinLeafObject = context.path().isWithinLeafObject(); if (objectMapper instanceof ObjectMapper objMapper && objMapper.subobjects() == false) { context.path().setWithinLeafObject(true); } parseObjectOrField(context, objectMapper); - context.path().setWithinLeafObject(false); + context.path().setWithinLeafObject(withinLeafObject); context.path().remove(); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index ed2efb4728b8d..d3dd585788867 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -2273,6 +2273,39 @@ public void testSubobjectsFalseParentDynamicFalse() throws Exception { assertNull(doc.dynamicMappingsUpdate()); } + public void testSubobjectsFalseFlattened() throws Exception { + DocumentMapper mapper = createDocumentMapper(mapping(b -> { + b.startObject("attributes"); + { + b.field("dynamic", false); + b.field("subobjects", false); + b.startObject("properties"); + { + b.startObject("simple.attribute"); + b.field("type", "keyword"); + b.endObject(); + b.startObject("complex.attribute"); + b.field("type", "flattened"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + ParsedDocument doc = mapper.parse(source(""" + { + "attributes": { + "complex.attribute": { + "foo" : "bar" + }, + "simple.attribute": "foo" + } + } + """)); + assertNotNull(doc.rootDoc().getField("attributes.complex.attribute")); + assertNotNull(doc.rootDoc().getField("attributes.simple.attribute")); + } + public void testWriteToFieldAlias() throws Exception { DocumentMapper mapper = createDocumentMapper(mapping(b -> { b.startObject("alias-field"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java index 07bcc6d564bc7..38960597647e9 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java @@ -1807,6 +1807,53 @@ public void testSubobjectsFalseDocWithEmptyObject() throws IOException { assertFalse(leaf.subobjects()); } + public void testSubobjectsFalseFlattened() throws IOException { + String mapping = """ + { + "_doc": { + "properties": { + "attributes": { + "type": "object", + "subobjects": false + } + }, + "dynamic_templates": [ + { + "test": { + "path_match": "attributes.*", + "match_mapping_type": "object", + "mapping": { + "type": "flattened" + } + } + } + ] + } + } + """; + String docJson = """ + { + "attributes": { + "complex.attribute": { + "a": "b" + }, + "foo.bar": "baz" + } + } + """; + + MapperService mapperService = createMapperService(mapping); + ParsedDocument parsedDoc = mapperService.documentMapper().parse(source(docJson)); + merge(mapperService, dynamicMapping(parsedDoc.dynamicMappingsUpdate())); + + Mapper fooBarMapper = mapperService.documentMapper().mappers().getMapper("attributes.foo.bar"); + assertNotNull(fooBarMapper); + assertEquals("text", fooBarMapper.typeName()); + Mapper fooStructuredMapper = mapperService.documentMapper().mappers().getMapper("attributes.complex.attribute"); + assertNotNull(fooStructuredMapper); + assertEquals("flattened", fooStructuredMapper.typeName()); + } + public void testMatchWithArrayOfFieldNames() throws IOException { String mapping = """ { From 369096365cfebfc947874631fc269ea94c8af8cf Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 20 Feb 2024 08:43:18 +0000 Subject: [PATCH 04/49] Expand docs about max-shards-per-node (#105607) Adds a little more detail on what sorts of problems may occur if you exceed the default limits. --- .../how-to/size-your-shards.asciidoc | 9 +++ docs/reference/modules/cluster/misc.asciidoc | 64 +++++++++++-------- 2 files changed, 47 insertions(+), 26 deletions(-) diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index 8b631dbbaa5ce..4e2e9e0061b31 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -221,6 +221,15 @@ GET _cat/shards?v=true ---- // TEST[setup:my_index] +[discrete] +[[shard-count-per-node-recommendation]] +==== Add enough nodes to stay within the cluster shard limits + +The <> prevent creation of more than +1000 non-frozen shards per node, and 3000 frozen shards per dedicated frozen +node. Make sure you have enough nodes of each type in your cluster to handle +the number of shards you need. + [discrete] [[field-count-recommendation]] ==== Allow enough heap for field mappers and overheads diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index acaf2dea056fd..7eb1cf357498f 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -24,35 +24,46 @@ API can make the cluster read-write again. [discrete] [[cluster-shard-limit]] -==== Cluster shard limit +==== Cluster shard limits -There is a soft limit on the number of shards in a cluster, based on the number -of nodes in the cluster. This is intended to prevent operations which may -unintentionally destabilize the cluster. +There is a limit on the number of shards in a cluster, based on the number of +nodes in the cluster. This is intended to prevent a runaway process from +creating too many shards which can harm performance and in extreme cases may +destabilize your cluster. -IMPORTANT: This limit is intended as a safety net, not a sizing recommendation. The -exact number of shards your cluster can safely support depends on your hardware -configuration and workload, but should remain well below this limit in almost -all cases, as the default limit is set quite high. +[IMPORTANT] +==== -If an operation, such as creating a new index, restoring a snapshot of an index, -or opening a closed index would lead to the number of shards in the cluster -going over this limit, the operation will fail with an error indicating the -shard limit. +These limits are intended as a safety net to protect against runaway shard +creation and are not a sizing recommendation. The exact number of shards your +cluster can safely support depends on your hardware configuration and workload, +and may be smaller than the default limits. -If the cluster is already over the limit, due to changes in node membership or -setting changes, all operations that create or open indices will fail until -either the limit is increased as described below, or some indices are -<> or <> to bring the -number of shards below the limit. +We do not recommend increasing these limits beyond the defaults. Clusters with +more shards may appear to run well in normal operation, but may take a very +long time to recover from temporary disruptions such as a network partition or +an unexpected node restart, and may encounter problems when performing +maintenance activities such as a rolling restart or upgrade. -The cluster shard limit defaults to 1,000 shards per non-frozen data node for +==== + +If an operation, such as creating a new index, restoring a snapshot of an +index, or opening a closed index would lead to the number of shards in the +cluster going over this limit, the operation will fail with an error indicating +the shard limit. To resolve this, either scale out your cluster by adding +nodes, or <> to bring the number of +shards below the limit. + +If a cluster is already over the limit, perhaps due to changes in node +membership or setting changes, all operations that create or open indices will +fail. + +The cluster shard limit defaults to 1000 shards per non-frozen data node for normal (non-frozen) indices and 3000 shards per frozen data node for frozen -indices. -Both primary and replica shards of all open indices count toward the limit, -including unassigned shards. -For example, an open index with 5 primary shards and 2 replicas counts as 15 shards. -Closed indices do not contribute to the shard count. +indices. Both primary and replica shards of all open indices count toward the +limit, including unassigned shards. For example, an open index with 5 primary +shards and 2 replicas counts as 15 shards. Closed indices do not contribute to +the shard count. You can dynamically adjust the cluster shard limit with the following setting: @@ -99,12 +110,13 @@ For example, a cluster with a `cluster.max_shards_per_node.frozen` setting of `100` and three frozen data nodes has a frozen shard limit of 300. If the cluster already contains 296 shards, {es} rejects any request that adds five or more frozen shards to the cluster. +-- -NOTE: These setting do not limit shards for individual nodes. To limit the -number of shards for each node, use the +NOTE: These limits only apply to actions which create shards and do not limit +the number of shards assigned to each node. To limit the number of shards +assigned to each node, use the <> setting. --- [discrete] [[user-defined-data]] From b6c3cef82936e49854fd652c4ba57cb4e14d17e1 Mon Sep 17 00:00:00 2001 From: Jedr Blaszyk Date: Tue, 20 Feb 2024 11:18:16 +0100 Subject: [PATCH 05/49] [Connector API] Bugfix: support list type in filtering advanced snippet value (#105633) --- docs/changelog/105633.yaml | 6 ++ .../332_connector_update_filtering.yml | 13 +++- .../filtering/FilteringAdvancedSnippet.java | 29 ++++---- .../connector/ConnectorFilteringTests.java | 67 +++++++++++++++++++ .../application/connector/ConnectorTests.java | 18 ++++- 5 files changed, 116 insertions(+), 17 deletions(-) create mode 100644 docs/changelog/105633.yaml diff --git a/docs/changelog/105633.yaml b/docs/changelog/105633.yaml new file mode 100644 index 0000000000000..b19ec67f4602a --- /dev/null +++ b/docs/changelog/105633.yaml @@ -0,0 +1,6 @@ +pr: 105633 +summary: "[Connector API] Bugfix: support list type in filtering advenced snippet\ + \ value" +area: Application +type: bug +issues: [] diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/332_connector_update_filtering.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/332_connector_update_filtering.yml index c5634365db3ec..a693ba5431d4b 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/332_connector_update_filtering.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/332_connector_update_filtering.yml @@ -23,7 +23,10 @@ setup: advanced_snippet: created_at: "2023-05-25T12:30:00.000Z" updated_at: "2023-05-25T12:30:00.000Z" - value: {} + value: + - tables: + - some_table + query: 'SELECT id, st_geohash(coordinates) FROM my_db.some_table;' rules: - created_at: "2023-05-25T12:30:00.000Z" field: _ @@ -41,7 +44,13 @@ setup: advanced_snippet: created_at: "2023-05-25T12:30:00.000Z" updated_at: "2023-05-25T12:30:00.000Z" - value: {} + value: + - tables: + - some_table + query: 'SELECT id, st_geohash(coordinates) FROM my_db.some_table;' + - tables: + - another_table + query: 'SELECT id, st_geohash(coordinates) FROM my_db.another_table;' rules: - created_at: "2023-05-25T12:30:00.000Z" field: _ diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringAdvancedSnippet.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringAdvancedSnippet.java index 480eaf91bb23b..384fbc7bb5340 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringAdvancedSnippet.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringAdvancedSnippet.java @@ -15,12 +15,12 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.connector.ConnectorUtils; import java.io.IOException; import java.time.Instant; -import java.util.Map; import java.util.Objects; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; @@ -34,18 +34,14 @@ public class FilteringAdvancedSnippet implements Writeable, ToXContentObject { private final Instant advancedSnippetCreatedAt; private final Instant advancedSnippetUpdatedAt; - private final Map advancedSnippetValue; + private final Object advancedSnippetValue; /** * @param advancedSnippetCreatedAt The creation timestamp of the advanced snippet. * @param advancedSnippetUpdatedAt The update timestamp of the advanced snippet. * @param advancedSnippetValue The value of the advanced snippet. */ - private FilteringAdvancedSnippet( - Instant advancedSnippetCreatedAt, - Instant advancedSnippetUpdatedAt, - Map advancedSnippetValue - ) { + private FilteringAdvancedSnippet(Instant advancedSnippetCreatedAt, Instant advancedSnippetUpdatedAt, Object advancedSnippetValue) { this.advancedSnippetCreatedAt = advancedSnippetCreatedAt; this.advancedSnippetUpdatedAt = advancedSnippetUpdatedAt; this.advancedSnippetValue = advancedSnippetValue; @@ -54,7 +50,7 @@ private FilteringAdvancedSnippet( public FilteringAdvancedSnippet(StreamInput in) throws IOException { this.advancedSnippetCreatedAt = in.readInstant(); this.advancedSnippetUpdatedAt = in.readInstant(); - this.advancedSnippetValue = in.readMap(StreamInput::readString, StreamInput::readGenericValue); + this.advancedSnippetValue = in.readGenericValue(); } private static final ParseField CREATED_AT_FIELD = new ParseField("created_at"); @@ -67,7 +63,7 @@ public FilteringAdvancedSnippet(StreamInput in) throws IOException { true, args -> new Builder().setAdvancedSnippetCreatedAt((Instant) args[0]) .setAdvancedSnippetUpdatedAt((Instant) args[1]) - .setAdvancedSnippetValue((Map) args[2]) + .setAdvancedSnippetValue(args[2]) .build() ); @@ -84,7 +80,14 @@ public FilteringAdvancedSnippet(StreamInput in) throws IOException { UPDATED_AT_FIELD, ObjectParser.ValueType.STRING ); - PARSER.declareField(constructorArg(), (p, c) -> p.map(), VALUE_FIELD, ObjectParser.ValueType.OBJECT); + PARSER.declareField(constructorArg(), (p, c) -> { + if (p.currentToken() == XContentParser.Token.START_ARRAY) { + return p.list(); + } else if (p.currentToken() == XContentParser.Token.START_OBJECT) { + return p.map(); + } + throw new XContentParseException("Unsupported token [" + p.currentToken() + "]. Expected an array or an object."); + }, VALUE_FIELD, ObjectParser.ValueType.OBJECT_ARRAY); } @Override @@ -107,7 +110,7 @@ public static FilteringAdvancedSnippet fromXContent(XContentParser parser) throw public void writeTo(StreamOutput out) throws IOException { out.writeInstant(advancedSnippetCreatedAt); out.writeInstant(advancedSnippetUpdatedAt); - out.writeMap(advancedSnippetValue, StreamOutput::writeString, StreamOutput::writeGenericValue); + out.writeGenericValue(advancedSnippetValue); } @Override @@ -129,7 +132,7 @@ public static class Builder { private Instant advancedSnippetCreatedAt; private Instant advancedSnippetUpdatedAt; - private Map advancedSnippetValue; + private Object advancedSnippetValue; public Builder setAdvancedSnippetCreatedAt(Instant advancedSnippetCreatedAt) { this.advancedSnippetCreatedAt = advancedSnippetCreatedAt; @@ -141,7 +144,7 @@ public Builder setAdvancedSnippetUpdatedAt(Instant advancedSnippetUpdatedAt) { return this; } - public Builder setAdvancedSnippetValue(Map advancedSnippetValue) { + public Builder setAdvancedSnippetValue(Object advancedSnippetValue) { this.advancedSnippetValue = advancedSnippetValue; return this; } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFilteringTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFilteringTests.java index e65236e90d928..20c2200b26f2b 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFilteringTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFilteringTests.java @@ -110,6 +110,73 @@ public void testToXContent() throws IOException { } + public void testToXContent_WithAdvancedSnippetPopulated() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "active": { + "advanced_snippet": { + "created_at": "2023-11-09T15:13:08.231Z", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": [ + {"service": "Incident", "query": "user_nameSTARTSWITHa"}, + {"service": "Incident", "query": "user_nameSTARTSWITHj"} + ] + }, + "rules": [ + { + "created_at": "2023-11-09T15:13:08.231Z", + "field": "_", + "id": "DEFAULT", + "order": 0, + "policy": "include", + "rule": "regex", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": ".*" + } + ], + "validation": { + "errors": [], + "state": "valid" + } + }, + "domain": "DEFAULT", + "draft": { + "advanced_snippet": { + "created_at": "2023-11-09T15:13:08.231Z", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": {} + }, + "rules": [ + { + "created_at": "2023-11-09T15:13:08.231Z", + "field": "_", + "id": "DEFAULT", + "order": 0, + "policy": "include", + "rule": "regex", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": ".*" + } + ], + "validation": { + "errors": [], + "state": "valid" + } + } + } + """); + + ConnectorFiltering filtering = ConnectorFiltering.fromXContentBytes(new BytesArray(content), XContentType.JSON); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(filtering, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + ConnectorFiltering parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = ConnectorFiltering.fromXContent(parser); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + + } + private void assertTransportSerialization(ConnectorFiltering testInstance) throws IOException { ConnectorFiltering deserializedInstance = copyInstance(testInstance); assertNotSame(testInstance, deserializedInstance); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java index 0fd590a4ce106..5525b4694ef04 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java @@ -147,7 +147,14 @@ public void testToXContent() throws IOException { "advanced_snippet":{ "created_at":"2023-11-09T15:13:08.231Z", "updated_at":"2023-11-09T15:13:08.231Z", - "value":{} + "value":[ + { + "tables": [ + "some_table" + ], + "query": "SELECT id, st_geohash(coordinates) FROM my_db.some_table;" + } + ] }, "rules":[ { @@ -171,7 +178,14 @@ public void testToXContent() throws IOException { "advanced_snippet":{ "created_at":"2023-11-09T15:13:08.231Z", "updated_at":"2023-11-09T15:13:08.231Z", - "value":{} + "value":[ + { + "tables": [ + "some_table" + ], + "query": "SELECT id, st_geohash(coordinates) FROM my_db.some_table;" + } + ] }, "rules":[ { From 1b7f3a0eae0e1f936f3c23e9e61548e5594adcff Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Tue, 20 Feb 2024 12:01:04 +0100 Subject: [PATCH 06/49] Extract common inference ServiceSettings methods (#105553) --- .../inference/SemanticTextModelSettings.java | 91 +++++++++++++++++++ .../inference/ServiceSettings.java | 19 ++++ .../inference}/SimilarityMeasure.java | 7 +- .../inference/services/ServiceUtils.java | 2 +- .../services/cohere/CohereService.java | 2 +- .../cohere/CohereServiceSettings.java | 2 +- .../HuggingFaceServiceSettings.java | 2 +- .../services/openai/OpenAiService.java | 2 +- .../OpenAiEmbeddingsServiceSettings.java | 2 +- ...lingualE5SmallInternalServiceSettings.java | 5 + .../TextEmbeddingInternalService.java | 3 +- .../cohere/CohereServiceSettingsTests.java | 2 +- .../CohereEmbeddingsModelTests.java | 2 +- .../CohereEmbeddingsServiceSettingsTests.java | 2 +- .../HuggingFaceServiceSettingsTests.java | 2 +- .../services/openai/OpenAiServiceTests.java | 2 +- .../OpenAiEmbeddingsModelTests.java | 2 +- .../OpenAiEmbeddingsServiceSettingsTests.java | 2 +- 18 files changed, 133 insertions(+), 18 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/inference/SemanticTextModelSettings.java rename {x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common => server/src/main/java/org/elasticsearch/inference}/SimilarityMeasure.java (68%) diff --git a/server/src/main/java/org/elasticsearch/inference/SemanticTextModelSettings.java b/server/src/main/java/org/elasticsearch/inference/SemanticTextModelSettings.java new file mode 100644 index 0000000000000..78773bfb72a95 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/inference/SemanticTextModelSettings.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.inference; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Model settings that are interesting for semantic_text inference fields. This class is used to serialize common + * ServiceSettings methods when building inference for semantic_text fields. + * + * @param taskType task type + * @param inferenceId inference id + * @param dimensions number of dimensions. May be null if not applicable + * @param similarity similarity used by the service. May be null if not applicable + */ +public record SemanticTextModelSettings( + TaskType taskType, + String inferenceId, + @Nullable Integer dimensions, + @Nullable SimilarityMeasure similarity +) { + + public static final String NAME = "model_settings"; + private static final ParseField TASK_TYPE_FIELD = new ParseField("task_type"); + private static final ParseField INFERENCE_ID_FIELD = new ParseField("inference_id"); + private static final ParseField DIMENSIONS_FIELD = new ParseField("dimensions"); + private static final ParseField SIMILARITY_FIELD = new ParseField("similarity"); + + public SemanticTextModelSettings(TaskType taskType, String inferenceId, Integer dimensions, SimilarityMeasure similarity) { + Objects.requireNonNull(taskType, "task type must not be null"); + Objects.requireNonNull(inferenceId, "inferenceId must not be null"); + this.taskType = taskType; + this.inferenceId = inferenceId; + this.dimensions = dimensions; + this.similarity = similarity; + } + + public SemanticTextModelSettings(Model model) { + this( + model.getTaskType(), + model.getInferenceEntityId(), + model.getServiceSettings().dimensions(), + model.getServiceSettings().similarity() + ); + } + + public static SemanticTextModelSettings parse(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, args -> { + TaskType taskType = TaskType.fromString((String) args[0]); + String inferenceId = (String) args[1]; + Integer dimensions = (Integer) args[2]; + SimilarityMeasure similarity = args[3] == null ? null : SimilarityMeasure.fromString((String) args[2]); + return new SemanticTextModelSettings(taskType, inferenceId, dimensions, similarity); + }); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), TASK_TYPE_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), INFERENCE_ID_FIELD); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), DIMENSIONS_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), SIMILARITY_FIELD); + } + + public Map asMap() { + Map attrsMap = new HashMap<>(); + attrsMap.put(TASK_TYPE_FIELD.getPreferredName(), taskType.toString()); + attrsMap.put(INFERENCE_ID_FIELD.getPreferredName(), inferenceId); + if (dimensions != null) { + attrsMap.put(DIMENSIONS_FIELD.getPreferredName(), dimensions); + } + if (similarity != null) { + attrsMap.put(SIMILARITY_FIELD.getPreferredName(), similarity); + } + return Map.of(NAME, attrsMap); + } +} diff --git a/server/src/main/java/org/elasticsearch/inference/ServiceSettings.java b/server/src/main/java/org/elasticsearch/inference/ServiceSettings.java index 6fed8bb7239e5..2e745635d0fd9 100644 --- a/server/src/main/java/org/elasticsearch/inference/ServiceSettings.java +++ b/server/src/main/java/org/elasticsearch/inference/ServiceSettings.java @@ -17,4 +17,23 @@ public interface ServiceSettings extends ToXContentObject, VersionedNamedWriteab * Returns a {@link ToXContentObject} that only writes the exposed fields. Any hidden fields are not written. */ ToXContentObject getFilteredXContentObject(); + + /** + * Similarity used in the service. Will be null if not applicable. + * + * @return similarity + */ + default SimilarityMeasure similarity() { + return null; + } + + /** + * Number of dimensions the service works with. Will be null if not applicable. + * + * @return number of dimensions + */ + default Integer dimensions() { + return null; + } + } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/SimilarityMeasure.java b/server/src/main/java/org/elasticsearch/inference/SimilarityMeasure.java similarity index 68% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/SimilarityMeasure.java rename to server/src/main/java/org/elasticsearch/inference/SimilarityMeasure.java index 3028ecd078597..cd81cc461bd1d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/SimilarityMeasure.java +++ b/server/src/main/java/org/elasticsearch/inference/SimilarityMeasure.java @@ -1,11 +1,12 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. */ -package org.elasticsearch.xpack.inference.common; +package org.elasticsearch.inference; import java.util.Locale; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java index 532fd2359ac2b..cfbb07cb940e7 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java @@ -16,11 +16,11 @@ import org.elasticsearch.inference.InferenceService; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.inference.results.TextEmbedding; import org.elasticsearch.xpack.core.inference.results.TextEmbeddingResults; -import org.elasticsearch.xpack.inference.common.SimilarityMeasure; import java.net.URI; import java.net.URISyntaxException; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java index bc511c043fdf3..35b245e9a657a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java @@ -20,9 +20,9 @@ import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.inference.common.SimilarityMeasure; import org.elasticsearch.xpack.inference.external.action.cohere.CohereActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; import org.elasticsearch.xpack.inference.services.SenderService; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java index eb6dbc352d36d..97ad1b575caa9 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java @@ -17,9 +17,9 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.inference.common.SimilarityMeasure; import java.io.IOException; import java.net.URI; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceSettings.java index 92920e0b9224f..f176cf7580567 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceSettings.java @@ -15,9 +15,9 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.inference.common.SimilarityMeasure; import java.io.IOException; import java.net.URI; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java index 7010b59990cd3..03781450fc08c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java @@ -21,9 +21,9 @@ import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.inference.common.SimilarityMeasure; import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; import org.elasticsearch.xpack.inference.services.SenderService; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java index 229e45a024458..468e82d4f0866 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java @@ -15,9 +15,9 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.inference.common.SimilarityMeasure; import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.openai.OpenAiParseContext; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/MultilingualE5SmallInternalServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/MultilingualE5SmallInternalServiceSettings.java index cab9d9d863885..aa1de0e0beddc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/MultilingualE5SmallInternalServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/MultilingualE5SmallInternalServiceSettings.java @@ -103,4 +103,9 @@ public TransportVersion getMinimalSupportedVersion() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); } + + @Override + public Integer dimensions() { + return 384; + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingInternalService.java index 59228c6dcbddf..06d6545a381bd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/textembedding/TextEmbeddingInternalService.java @@ -37,7 +37,6 @@ import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfigUpdate; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextExpansionConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TokenizationConfigUpdate; import org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings; @@ -252,7 +251,7 @@ public void chunkedInfer( var configUpdate = chunkingOptions.settingsArePresent() ? new TokenizationConfigUpdate(chunkingOptions.windowSize(), chunkingOptions.span()) - : TextExpansionConfigUpdate.EMPTY_UPDATE; + : TextEmbeddingConfigUpdate.EMPTY_INSTANCE; var request = InferTrainedModelDeploymentAction.Request.forTextInput( model.getConfigurations().getInferenceEntityId(), diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettingsTests.java index 321567dfa32e8..bbee8aa1de577 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettingsTests.java @@ -11,11 +11,11 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.inference.common.SimilarityMeasure; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.hamcrest.CoreMatchers; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsModelTests.java index 5570731dbe8d9..ec36ac5ce58d5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsModelTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsModelTests.java @@ -10,9 +10,9 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.inference.common.SimilarityMeasure; import org.elasticsearch.xpack.inference.services.cohere.CohereServiceSettings; import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; import org.hamcrest.MatcherAssert; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettingsTests.java index 39aba9c281a0c..2f5eba676a314 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettingsTests.java @@ -13,10 +13,10 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; import org.elasticsearch.xpack.inference.InferenceNamedWriteablesProvider; -import org.elasticsearch.xpack.inference.common.SimilarityMeasure; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.cohere.CohereServiceSettings; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceSettingsTests.java index 7e2a333685321..f32fafd493395 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceSettingsTests.java @@ -10,8 +10,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.inference.common.SimilarityMeasure; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.ServiceUtils; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index 3fd4d17d7a6e4..b3d9a98bad189 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -22,13 +22,13 @@ import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.inference.common.SimilarityMeasure; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; import org.elasticsearch.xpack.inference.external.http.sender.Sender; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java index 60ed5a13d9c58..01b60fdb896d0 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java @@ -9,9 +9,9 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.inference.common.SimilarityMeasure; import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; import java.util.Map; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java index 51069b46afb94..18b5ab44f59ca 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java @@ -11,11 +11,11 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.inference.common.SimilarityMeasure; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.openai.OpenAiParseContext; From b8dc5c3041be8085f45cb2ff3cb7e01d5e65aa2c Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Tue, 20 Feb 2024 13:50:02 +0200 Subject: [PATCH 07/49] Fix for SearchServiceTests#testWaitOnRefreshFailsIfCheckpointNotIndexed - increasing timeout for randomly failing test (#105395) --- .../java/org/elasticsearch/search/SearchServiceTests.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 551874a2d271a..b0c4ef00230d5 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -1767,7 +1767,9 @@ public void testWaitOnRefreshFailsIfCheckpointNotIndexed() { final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); final IndexShard indexShard = indexService.getShard(0); SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueMillis(randomIntBetween(10, 100))); + // Increased timeout to avoid cancelling the search task prior to its completion, + // as we expect to raise an Exception. Timeout itself is tested on the following `testWaitOnRefreshTimeout` test. + searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueMillis(randomIntBetween(200, 300))); searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 1 })); final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); From 410efb6fb6d71433bc242d3aed5d3fa51a5acf29 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Tue, 20 Feb 2024 15:10:22 +0200 Subject: [PATCH 08/49] Fixing NPE when requesting [_none_] for stored_fields (#104711) --- docs/changelog/104711.yaml | 5 ++++ .../search/builder/SearchSourceBuilder.java | 8 +++++-- .../builder/SearchSourceBuilderTests.java | 23 +++++++++++++++++++ 3 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/104711.yaml diff --git a/docs/changelog/104711.yaml b/docs/changelog/104711.yaml new file mode 100644 index 0000000000000..f0f9bf7f10e45 --- /dev/null +++ b/docs/changelog/104711.yaml @@ -0,0 +1,5 @@ +pr: 104711 +summary: "Fixing NPE when requesting [_none_] for `stored_fields`" +area: Search +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 649c40c856fe8..72fd84cda760b 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -1337,7 +1337,10 @@ private SearchSourceBuilder parseXContent( SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), parser ); - searchUsage.trackSectionUsage(STORED_FIELDS_FIELD.getPreferredName()); + if (storedFieldsContext.fetchFields() == false + || (storedFieldsContext.fieldNames() != null && storedFieldsContext.fieldNames().size() > 0)) { + searchUsage.trackSectionUsage(STORED_FIELDS_FIELD.getPreferredName()); + } } else if (SORT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { sort(parser.text()); } else if (PROFILE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { @@ -1493,7 +1496,8 @@ private SearchSourceBuilder parseXContent( } else if (token == XContentParser.Token.START_ARRAY) { if (STORED_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { storedFieldsContext = StoredFieldsContext.fromXContent(STORED_FIELDS_FIELD.getPreferredName(), parser); - if (storedFieldsContext.fieldNames().size() > 0 || storedFieldsContext.fetchFields() == false) { + if (storedFieldsContext.fetchFields() == false + || (storedFieldsContext.fieldNames() != null && storedFieldsContext.fieldNames().size() > 0)) { searchUsage.trackSectionUsage(STORED_FIELDS_FIELD.getPreferredName()); } } else if (DOCVALUE_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { diff --git a/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 8ee1c64ddbb22..26eefe850fc8f 100644 --- a/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -65,6 +65,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import java.util.function.ToLongFunction; @@ -592,6 +593,28 @@ public void testNegativeTrackTotalHits() throws IOException { } } + public void testStoredFieldsUsage() throws IOException { + Set storedFieldRestVariations = Set.of( + "{\"stored_fields\" : [\"_none_\"]}", + "{\"stored_fields\" : \"_none_\"}", + "{\"stored_fields\" : [\"field\"]}", + "{\"stored_fields\" : \"field\"}" + ); + for (String storedFieldRest : storedFieldRestVariations) { + SearchUsageHolder searchUsageHolder = new UsageService().getSearchUsageHolder(); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, storedFieldRest)) { + new SearchSourceBuilder().parseXContent(parser, true, searchUsageHolder); + SearchUsageStats searchUsageStats = searchUsageHolder.getSearchUsageStats(); + Map sectionsUsage = searchUsageStats.getSectionsUsage(); + assertEquals( + "Failed to correctly parse and record usage of '" + storedFieldRest + "'", + 1L, + sectionsUsage.get("stored_fields").longValue() + ); + } + } + } + public void testEmptySectionsAreNotTracked() throws IOException { SearchUsageHolder searchUsageHolder = new UsageService().getSearchUsageHolder(); From 7fb4b74b95da14e947c47d6ec3c2542e3bc1f1be Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Tue, 20 Feb 2024 08:22:27 -0500 Subject: [PATCH 09/49] [Transform] Test waits for next iteration (#105560) Currently, the `testStopWaitForCheckpoint` only verifies that the transform state is `stopped`, which might be the previous iteration's state. There is a small chance that we may exit the loop before the transform starts and stops for that iteration, where the test might fail the final `stopped` check. Now, we check the `trigger_count` to verify that the transform has at least had a chance to move from the `STARTED` state into the `INDEXING` and eventually `STOPPED` state before we finish the iteration. Fix #105388 Co-authored-by: Elastic Machine --- .../transform/integration/TransformIT.java | 34 +++++++++++++------ 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformIT.java b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformIT.java index 394732742e528..073f604e608da 100644 --- a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformIT.java +++ b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformIT.java @@ -37,7 +37,9 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -375,7 +377,7 @@ public void testStopWaitForCheckpoint() throws Exception { // wait until transform has been triggered and indexed at least 1 document assertBusy(() -> { - var stateAndStats = getBasicTransformStats(config.getId()); + var stateAndStats = getBasicTransformStats(transformId); assertThat((Integer) XContentMapValues.extractValue("stats.documents_indexed", stateAndStats), greaterThan(1)); }); @@ -384,39 +386,51 @@ public void testStopWaitForCheckpoint() throws Exception { // Wait until the first checkpoint waitUntilCheckpoint(config.getId(), 1L); + var previousTriggerCount = new AtomicInteger(0); // Even though we are continuous, we should be stopped now as we needed to stop at the first checkpoint assertBusy(() -> { - var stateAndStats = getBasicTransformStats(config.getId()); + var stateAndStats = getBasicTransformStats(transformId); assertThat(stateAndStats.get("state"), equalTo("stopped")); assertThat((Integer) XContentMapValues.extractValue("stats.documents_indexed", stateAndStats), equalTo(1000)); + previousTriggerCount.set((int) XContentMapValues.extractValue("stats.trigger_count", stateAndStats)); }); + // Create N additional runs of starting and stopping int additionalRuns = randomIntBetween(1, 10); for (int i = 0; i < additionalRuns; ++i) { + var testFailureMessage = format("Can't determine if Transform ran for iteration number [%d] out of [%d].", i, additionalRuns); // index some more docs using a new user - long timeStamp = Instant.now().toEpochMilli() - 1_000; - long user = 42 + i; + var timeStamp = Instant.now().toEpochMilli() - 1_000; + var user = 42 + i; indexMoreDocs(timeStamp, user, indexName); - startTransformWithRetryOnConflict(config.getId(), RequestOptions.DEFAULT); + startTransformWithRetryOnConflict(transformId, RequestOptions.DEFAULT); - boolean waitForCompletion = randomBoolean(); - stopTransform(transformId, waitForCompletion, null, true); + assertBusy(() -> { + var stateAndStats = getBasicTransformStats(transformId); + var currentTriggerCount = (int) XContentMapValues.extractValue("stats.trigger_count", stateAndStats); + // We should verify that we are retrieving the stats *after* this run had been started. + // If the trigger_count has increased, we know we have started this test iteration. + assertThat(testFailureMessage, previousTriggerCount.get(), lessThan(currentTriggerCount)); + }); + var waitForCompletion = randomBoolean(); + stopTransform(transformId, waitForCompletion, null, true); assertBusy(() -> { - var stateAndStats = getBasicTransformStats(config.getId()); + var stateAndStats = getBasicTransformStats(transformId); assertThat(stateAndStats.get("state"), equalTo("stopped")); + previousTriggerCount.set((int) XContentMapValues.extractValue("stats.trigger_count", stateAndStats)); }); } - var stateAndStats = getBasicTransformStats(config.getId()); + var stateAndStats = getBasicTransformStats(transformId); assertThat(stateAndStats.get("state"), equalTo("stopped")); // Despite indexing new documents into the source index, the number of documents in the destination index stays the same. assertThat((Integer) XContentMapValues.extractValue("stats.documents_indexed", stateAndStats), equalTo(1000)); stopTransform(transformId); - deleteTransform(config.getId()); + deleteTransform(transformId); } public void testContinuousTransformRethrottle() throws Exception { From 0d0b319bf9714395aa7439b6fe8cea5d52a2bceb Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Tue, 20 Feb 2024 16:28:27 +0200 Subject: [PATCH 10/49] Fixing compilation error in SearchSourceBuilderTests#testStoredFieldsUsage (#105656) --- .../elasticsearch/search/builder/SearchSourceBuilderTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 26eefe850fc8f..7b67bc5b94f7f 100644 --- a/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -603,7 +603,7 @@ public void testStoredFieldsUsage() throws IOException { for (String storedFieldRest : storedFieldRestVariations) { SearchUsageHolder searchUsageHolder = new UsageService().getSearchUsageHolder(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, storedFieldRest)) { - new SearchSourceBuilder().parseXContent(parser, true, searchUsageHolder); + new SearchSourceBuilder().parseXContent(parser, true, searchUsageHolder, nf -> false); SearchUsageStats searchUsageStats = searchUsageHolder.getSearchUsageStats(); Map sectionsUsage = searchUsageStats.getSectionsUsage(); assertEquals( From 5920c917aa933bf8078e3c88f43c217957bf9dd0 Mon Sep 17 00:00:00 2001 From: Felix Barnsteiner Date: Tue, 20 Feb 2024 15:53:14 +0100 Subject: [PATCH 11/49] Encapsulate Mapper.Builder#name and make it private (#105648) This is in preparation to make the field mutable, which is needed in the context of https://github.com/elastic/elasticsearch/pull/103542 --- .../legacygeo/mapper/LegacyGeoShapeFieldMapper.java | 4 ++-- .../index/mapper/extras/MatchOnlyTextFieldMapper.java | 4 ++-- .../index/mapper/extras/RankFeatureFieldMapper.java | 4 ++-- .../index/mapper/extras/RankFeaturesFieldMapper.java | 4 ++-- .../index/mapper/extras/ScaledFloatFieldMapper.java | 4 ++-- .../mapper/extras/SearchAsYouTypeFieldMapper.java | 8 ++++---- .../index/mapper/extras/TokenCountFieldMapper.java | 6 +++--- .../join/mapper/ParentJoinFieldMapper.java | 8 ++++---- .../percolator/PercolatorFieldMapper.java | 4 ++-- .../analysis/icu/ICUCollationKeywordFieldMapper.java | 4 ++-- .../mapper/annotatedtext/AnnotatedTextFieldMapper.java | 6 +++--- .../index/mapper/murmur3/Murmur3FieldMapper.java | 4 ++-- .../elasticsearch/index/mapper/BinaryFieldMapper.java | 4 ++-- .../elasticsearch/index/mapper/BooleanFieldMapper.java | 6 +++--- .../index/mapper/CompletionFieldMapper.java | 4 ++-- .../elasticsearch/index/mapper/DateFieldMapper.java | 4 ++-- .../index/mapper/GeoPointFieldMapper.java | 10 +++++----- .../index/mapper/GeoShapeFieldMapper.java | 6 +++--- .../org/elasticsearch/index/mapper/IpFieldMapper.java | 6 +++--- .../elasticsearch/index/mapper/KeywordFieldMapper.java | 8 ++++---- .../java/org/elasticsearch/index/mapper/Mapper.java | 2 +- .../elasticsearch/index/mapper/NestedObjectMapper.java | 6 +++--- .../elasticsearch/index/mapper/NumberFieldMapper.java | 6 +++--- .../org/elasticsearch/index/mapper/ObjectMapper.java | 6 +++--- .../index/mapper/PassThroughObjectMapper.java | 6 +++--- .../index/mapper/PlaceHolderFieldMapper.java | 4 ++-- .../elasticsearch/index/mapper/RangeFieldMapper.java | 4 ++-- .../elasticsearch/index/mapper/RootObjectMapper.java | 2 +- .../elasticsearch/index/mapper/TextFieldMapper.java | 10 +++++----- .../index/mapper/flattened/FlattenedFieldMapper.java | 8 ++++---- .../index/mapper/vectors/DenseVectorFieldMapper.java | 4 ++-- .../index/mapper/vectors/SparseVectorFieldMapper.java | 4 ++-- .../index/mapper/ParametrizedMapperTests.java | 2 +- .../xpack/analytics/mapper/HistogramFieldMapper.java | 4 ++-- .../mapper/AggregateDoubleMetricFieldMapper.java | 6 +++--- .../mapper/ConstantKeywordFieldMapper.java | 4 ++-- .../countedkeyword/CountedKeywordFieldMapper.java | 6 +++--- .../xpack/unsignedlong/UnsignedLongFieldMapper.java | 4 ++-- .../xpack/versionfield/VersionStringFieldMapper.java | 4 ++-- .../index/mapper/GeoShapeWithDocValuesFieldMapper.java | 8 ++++---- .../xpack/spatial/index/mapper/PointFieldMapper.java | 6 +++--- .../xpack/spatial/index/mapper/ShapeFieldMapper.java | 4 ++-- .../xpack/wildcard/mapper/WildcardFieldMapper.java | 4 ++-- 43 files changed, 111 insertions(+), 111 deletions(-) diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java index afd969cc17ad4..4ef2b2e07bb26 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java @@ -324,7 +324,7 @@ private static void setupPrefixTrees(GeoShapeFieldType ft) { private GeoShapeFieldType buildFieldType(LegacyGeoShapeParser parser, MapperBuilderContext context) { GeoShapeFieldType ft = new GeoShapeFieldType( - context.buildFullName(name), + context.buildFullName(name()), indexed.get(), orientation.get().value(), parser, @@ -353,7 +353,7 @@ private static int getLevels(int treeLevels, double precisionInMeters, int defau public LegacyGeoShapeFieldMapper build(MapperBuilderContext context) { LegacyGeoShapeParser parser = new LegacyGeoShapeParser(); GeoShapeFieldType ft = buildFieldType(parser, context); - return new LegacyGeoShapeFieldMapper(name, ft, multiFieldsBuilder.build(this, context), copyTo, parser, this); + return new LegacyGeoShapeFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, parser, this); } } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java index fa83e2600de9b..a965b9a2bbce4 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java @@ -127,7 +127,7 @@ private MatchOnlyTextFieldType buildFieldType(MapperBuilderContext context) { NamedAnalyzer indexAnalyzer = analyzers.getIndexAnalyzer(); TextSearchInfo tsi = new TextSearchInfo(Defaults.FIELD_TYPE, null, searchAnalyzer, searchQuoteAnalyzer); MatchOnlyTextFieldType ft = new MatchOnlyTextFieldType( - context.buildFullName(name), + context.buildFullName(name()), tsi, indexAnalyzer, context.isSourceSynthetic(), @@ -140,7 +140,7 @@ private MatchOnlyTextFieldType buildFieldType(MapperBuilderContext context) { public MatchOnlyTextFieldMapper build(MapperBuilderContext context) { MatchOnlyTextFieldType tft = buildFieldType(context); MultiFields multiFields = multiFieldsBuilder.build(this, context); - return new MatchOnlyTextFieldMapper(name, Defaults.FIELD_TYPE, tft, multiFields, copyTo, context.isSourceSynthetic(), this); + return new MatchOnlyTextFieldMapper(name(), Defaults.FIELD_TYPE, tft, multiFields, copyTo, context.isSourceSynthetic(), this); } } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java index b5a5ce87d5096..f63f290bf58fc 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java @@ -92,9 +92,9 @@ protected Parameter[] getParameters() { @Override public RankFeatureFieldMapper build(MapperBuilderContext context) { return new RankFeatureFieldMapper( - name, + name(), new RankFeatureFieldType( - context.buildFullName(name), + context.buildFullName(name()), meta.getValue(), positiveScoreImpact.getValue(), nullValue.getValue() diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java index f36dfb5605633..5f0d44d1fb796 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java @@ -64,8 +64,8 @@ protected Parameter[] getParameters() { @Override public RankFeaturesFieldMapper build(MapperBuilderContext context) { return new RankFeaturesFieldMapper( - name, - new RankFeaturesFieldType(context.buildFullName(name), meta.getValue(), positiveScoreImpact.getValue()), + name(), + new RankFeaturesFieldType(context.buildFullName(name()), meta.getValue(), positiveScoreImpact.getValue()), multiFieldsBuilder.build(this, context), copyTo, positiveScoreImpact.getValue() diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index cc2ceb3c017ba..e2b932b01a516 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -186,7 +186,7 @@ protected Parameter[] getParameters() { @Override public ScaledFloatFieldMapper build(MapperBuilderContext context) { ScaledFloatFieldType type = new ScaledFloatFieldType( - context.buildFullName(name), + context.buildFullName(name()), indexed.getValue(), stored.getValue(), hasDocValues.getValue(), @@ -196,7 +196,7 @@ public ScaledFloatFieldMapper build(MapperBuilderContext context) { metric.getValue(), indexMode ); - return new ScaledFloatFieldMapper(name, type, multiFieldsBuilder.build(this, context), copyTo, this); + return new ScaledFloatFieldMapper(name(), type, multiFieldsBuilder.build(this, context), copyTo, this); } } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java index ca8231c46736f..a5e011d5772f0 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java @@ -187,7 +187,7 @@ public SearchAsYouTypeFieldMapper build(MapperBuilderContext context) { NamedAnalyzer searchAnalyzer = analyzers.getSearchAnalyzer(); SearchAsYouTypeFieldType ft = new SearchAsYouTypeFieldType( - context.buildFullName(name), + context.buildFullName(name()), fieldType, similarity.getValue(), analyzers.getSearchAnalyzer(), @@ -202,7 +202,7 @@ public SearchAsYouTypeFieldMapper build(MapperBuilderContext context) { prefixft.setIndexOptions(fieldType.indexOptions()); prefixft.setOmitNorms(true); prefixft.setStored(false); - final String fullName = context.buildFullName(name); + final String fullName = context.buildFullName(name()); // wrap the root field's index analyzer with shingles and edge ngrams final Analyzer prefixIndexWrapper = SearchAsYouTypeAnalyzer.withShingleAndPrefix( indexAnalyzer.analyzer(), @@ -228,7 +228,7 @@ public SearchAsYouTypeFieldMapper build(MapperBuilderContext context) { final int shingleSize = i + 2; FieldType shingleft = new FieldType(fieldType); shingleft.setStored(false); - String fieldName = getShingleFieldName(context.buildFullName(name), shingleSize); + String fieldName = getShingleFieldName(context.buildFullName(name()), shingleSize); // wrap the root field's index, search, and search quote analyzers with shingles final SearchAsYouTypeAnalyzer shingleIndexWrapper = SearchAsYouTypeAnalyzer.withShingle( indexAnalyzer.analyzer(), @@ -260,7 +260,7 @@ public SearchAsYouTypeFieldMapper build(MapperBuilderContext context) { ft.setPrefixField(prefixFieldType); ft.setShingleFields(shingleFieldTypes); return new SearchAsYouTypeFieldMapper( - name, + name(), ft, copyTo, indexAnalyzers, diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java index 4d04e83361252..831306a8e8594 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java @@ -77,17 +77,17 @@ protected Parameter[] getParameters() { @Override public TokenCountFieldMapper build(MapperBuilderContext context) { if (analyzer.getValue() == null) { - throw new MapperParsingException("Analyzer must be set for field [" + name + "] but wasn't."); + throw new MapperParsingException("Analyzer must be set for field [" + name() + "] but wasn't."); } MappedFieldType ft = new TokenCountFieldType( - context.buildFullName(name), + context.buildFullName(name()), index.getValue(), store.getValue(), hasDocValues.getValue(), nullValue.getValue(), meta.getValue() ); - return new TokenCountFieldMapper(name, ft, multiFieldsBuilder.build(this, context), copyTo, this); + return new TokenCountFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, this); } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java index 2bbd5e81444b7..d6b7ccad4f3c5 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java @@ -112,16 +112,16 @@ protected Parameter[] getParameters() { @Override public ParentJoinFieldMapper build(MapperBuilderContext context) { - checkObjectOrNested(context, name); + checkObjectOrNested(context, name()); final Map parentIdFields = new HashMap<>(); relations.get() .stream() - .map(relation -> new ParentIdFieldMapper(name + "#" + relation.parent(), eagerGlobalOrdinals.get())) + .map(relation -> new ParentIdFieldMapper(name() + "#" + relation.parent(), eagerGlobalOrdinals.get())) .forEach(mapper -> parentIdFields.put(mapper.name(), mapper)); Joiner joiner = new Joiner(name(), relations.get()); return new ParentJoinFieldMapper( - name, - new JoinFieldType(context.buildFullName(name), joiner, meta.get()), + name(), + new JoinFieldType(context.buildFullName(name()), joiner, meta.get()), Collections.unmodifiableMap(parentIdFields), eagerGlobalOrdinals.get(), relations.get() diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index be8d342254afd..7ba83f9ce71b5 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -135,10 +135,10 @@ protected Parameter[] getParameters() { @Override public PercolatorFieldMapper build(MapperBuilderContext context) { - PercolatorFieldType fieldType = new PercolatorFieldType(context.buildFullName(name), meta.getValue()); + PercolatorFieldType fieldType = new PercolatorFieldType(context.buildFullName(name()), meta.getValue()); // TODO should percolator even allow multifields? MultiFields multiFields = multiFieldsBuilder.build(this, context); - context = context.createChildContext(name); + context = context.createChildContext(name()); KeywordFieldMapper extractedTermsField = createExtractQueryFieldBuilder( EXTRACTED_TERMS_FIELD_NAME, context, diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java index 19f1d0455630d..1da274ff236da 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java @@ -327,7 +327,7 @@ public ICUCollationKeywordFieldMapper build(MapperBuilderContext context) { final CollatorParams params = collatorParams(); final Collator collator = params.buildCollator(); CollationFieldType ft = new CollationFieldType( - context.buildFullName(name), + context.buildFullName(name()), indexed.getValue(), stored.getValue(), hasDocValues.getValue(), @@ -337,7 +337,7 @@ public ICUCollationKeywordFieldMapper build(MapperBuilderContext context) { meta.getValue() ); return new ICUCollationKeywordFieldMapper( - name, + name(), buildFieldType(), ft, multiFieldsBuilder.build(this, context), diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index 7153fcf4d46b3..fae2ab19aee39 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -122,7 +122,7 @@ private AnnotatedTextFieldType buildFieldType(FieldType fieldType, MapperBuilder wrapAnalyzer(analyzers.getSearchQuoteAnalyzer()) ); return new AnnotatedTextFieldType( - context.buildFullName(name), + context.buildFullName(name()), store.getValue(), tsi, context.isSourceSynthetic(), @@ -139,12 +139,12 @@ public AnnotatedTextFieldMapper build(MapperBuilderContext context) { if (analyzers.positionIncrementGap.isConfigured()) { if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) { throw new IllegalArgumentException( - "Cannot set position_increment_gap on field [" + name + "] without positions enabled" + "Cannot set position_increment_gap on field [" + name() + "] without positions enabled" ); } } return new AnnotatedTextFieldMapper( - name, + name(), fieldType, buildFieldType(fieldType, context), multiFieldsBuilder.build(this, context), diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index c1e2888c47c62..08a133bcb69c8 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -55,8 +55,8 @@ protected Parameter[] getParameters() { @Override public Murmur3FieldMapper build(MapperBuilderContext context) { return new Murmur3FieldMapper( - name, - new Murmur3FieldType(context.buildFullName(name), stored.getValue(), meta.getValue()), + name(), + new Murmur3FieldType(context.buildFullName(name()), stored.getValue(), meta.getValue()), multiFieldsBuilder.build(this, context), copyTo ); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java index 403156c95540e..948baf0dff830 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java @@ -64,8 +64,8 @@ public Parameter[] getParameters() { @Override public BinaryFieldMapper build(MapperBuilderContext context) { return new BinaryFieldMapper( - name, - new BinaryFieldType(context.buildFullName(name), stored.getValue(), hasDocValues.getValue(), meta.getValue()), + name(), + new BinaryFieldType(context.buildFullName(name()), stored.getValue(), hasDocValues.getValue(), meta.getValue()), multiFieldsBuilder.build(this, context), copyTo, this diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 43e6e662dc8f2..cc01a487ad7b8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -115,7 +115,7 @@ protected Parameter[] getParameters() { @Override public BooleanFieldMapper build(MapperBuilderContext context) { MappedFieldType ft = new BooleanFieldType( - context.buildFullName(name), + context.buildFullName(name()), indexed.getValue() && indexCreatedVersion.isLegacyIndexVersion() == false, stored.getValue(), docValues.getValue(), @@ -123,7 +123,7 @@ public BooleanFieldMapper build(MapperBuilderContext context) { scriptValues(), meta.getValue() ); - return new BooleanFieldMapper(name, ft, multiFieldsBuilder.build(this, context), copyTo, context.isSourceSynthetic(), this); + return new BooleanFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, context.isSourceSynthetic(), this); } private FieldValues scriptValues() { @@ -133,7 +133,7 @@ private FieldValues scriptValues() { BooleanFieldScript.Factory scriptFactory = scriptCompiler.compile(script.get(), BooleanFieldScript.CONTEXT); return scriptFactory == null ? null - : (lookup, ctx, doc, consumer) -> scriptFactory.newFactory(name, script.get().getParams(), lookup, OnScriptError.FAIL) + : (lookup, ctx, doc, consumer) -> scriptFactory.newFactory(name(), script.get().getParams(), lookup, OnScriptError.FAIL) .newInstance(ctx) .runForDoc(doc, consumer); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index 94b937c534491..5d5ef076852a8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -205,9 +205,9 @@ public CompletionFieldMapper build(MapperBuilderContext context) { new CompletionAnalyzer(this.searchAnalyzer.getValue(), preserveSeparators.getValue(), preservePosInc.getValue()) ); - CompletionFieldType ft = new CompletionFieldType(context.buildFullName(name), completionAnalyzer, meta.getValue()); + CompletionFieldType ft = new CompletionFieldType(context.buildFullName(name()), completionAnalyzer, meta.getValue()); ft.setContextMappings(contexts.getValue()); - return new CompletionFieldMapper(name, ft, multiFieldsBuilder.build(this, context), copyTo, this); + return new CompletionFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, this); } private void checkCompletionContextsLimit() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 0c54b58aae0e3..1b926734c1713 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -306,7 +306,7 @@ private FieldValues scriptValues() { return factory == null ? null : (lookup, ctx, doc, consumer) -> factory.newFactory( - name, + name(), script.get().getParams(), lookup, buildFormatter(), @@ -364,7 +364,7 @@ public DateFieldMapper build(MapperBuilderContext context) { && ignoreMalformed.isConfigured() == false) { ignoreMalformed.setValue(false); } - return new DateFieldMapper(name, ft, multiFieldsBuilder.build(this, context), copyTo, nullTimestamp, resolution, this); + return new DateFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, nullTimestamp, resolution, this); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 4effc380646ff..85a9b8377e6f0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -186,7 +186,7 @@ private FieldValues scriptValues() { GeoPointFieldScript.Factory factory = scriptCompiler.compile(this.script.get(), GeoPointFieldScript.CONTEXT); return factory == null ? null - : (lookup, ctx, doc, consumer) -> factory.newFactory(name, script.get().getParams(), lookup, OnScriptError.FAIL) + : (lookup, ctx, doc, consumer) -> factory.newFactory(name(), script.get().getParams(), lookup, OnScriptError.FAIL) .newInstance(ctx) .runForDoc(doc, consumer); } @@ -194,7 +194,7 @@ private FieldValues scriptValues() { @Override public FieldMapper build(MapperBuilderContext context) { Parser geoParser = new GeoPointParser( - name, + name(), (parser) -> GeoUtils.parseGeoPoint(parser, ignoreZValue.get().value()), nullValue.get(), ignoreZValue.get().value(), @@ -202,7 +202,7 @@ public FieldMapper build(MapperBuilderContext context) { metric.get() != TimeSeriesParams.MetricType.POSITION ); GeoPointFieldType ft = new GeoPointFieldType( - context.buildFullName(name), + context.buildFullName(name()), indexed.get() && indexCreatedVersion.isLegacyIndexVersion() == false, stored.get(), hasDocValues.get(), @@ -214,9 +214,9 @@ public FieldMapper build(MapperBuilderContext context) { indexMode ); if (this.script.get() == null) { - return new GeoPointFieldMapper(name, ft, multiFieldsBuilder.build(this, context), copyTo, geoParser, this); + return new GeoPointFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, geoParser, this); } - return new GeoPointFieldMapper(name, ft, geoParser, this); + return new GeoPointFieldMapper(name(), ft, geoParser, this); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index e39684705e26a..541538f65a550 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -99,18 +99,18 @@ public GeoShapeFieldMapper build(MapperBuilderContext context) { ); GeoShapeParser geoShapeParser = new GeoShapeParser(geometryParser, orientation.get().value()); GeoShapeFieldType ft = new GeoShapeFieldType( - context.buildFullName(name), + context.buildFullName(name()), indexed.get(), orientation.get().value(), geoShapeParser, meta.get() ); return new GeoShapeFieldMapper( - name, + name(), ft, multiFieldsBuilder.build(this, context), copyTo, - new GeoShapeIndexer(orientation.get().value(), context.buildFullName(name)), + new GeoShapeIndexer(orientation.get().value(), context.buildFullName(name())), geoShapeParser, this ); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 8ce726b49ff66..355b38d4dcb96 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -154,7 +154,7 @@ private FieldValues scriptValues() { IpFieldScript.Factory factory = scriptCompiler.compile(this.script.get(), IpFieldScript.CONTEXT); return factory == null ? null - : (lookup, ctx, doc, consumer) -> factory.newFactory(name, script.get().getParams(), lookup, OnScriptError.FAIL) + : (lookup, ctx, doc, consumer) -> factory.newFactory(name(), script.get().getParams(), lookup, OnScriptError.FAIL) .newInstance(ctx) .runForDoc(doc, consumer); } @@ -170,9 +170,9 @@ public IpFieldMapper build(MapperBuilderContext context) { dimension.setValue(true); } return new IpFieldMapper( - name, + name(), new IpFieldType( - context.buildFullName(name), + context.buildFullName(name()), indexed.getValue() && indexCreatedVersion.isLegacyIndexVersion() == false, stored.getValue(), hasDocValues.getValue(), diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index a5a571fb82d85..06e689784b087 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -254,7 +254,7 @@ private FieldValues scriptValues() { StringFieldScript.Factory scriptFactory = scriptCompiler.compile(script.get(), StringFieldScript.CONTEXT); return scriptFactory == null ? null - : (lookup, ctx, doc, consumer) -> scriptFactory.newFactory(name, script.get().getParams(), lookup, OnScriptError.FAIL) + : (lookup, ctx, doc, consumer) -> scriptFactory.newFactory(name(), script.get().getParams(), lookup, OnScriptError.FAIL) .newInstance(ctx) .runForDoc(doc, consumer); } @@ -294,7 +294,7 @@ private KeywordFieldType buildFieldType(MapperBuilderContext context, FieldType ); normalizer = Lucene.KEYWORD_ANALYZER; } else { - throw new MapperParsingException("normalizer [" + normalizerName + "] not found for field [" + name + "]"); + throw new MapperParsingException("normalizer [" + normalizerName + "] not found for field [" + name() + "]"); } } searchAnalyzer = quoteAnalyzer = normalizer; @@ -308,7 +308,7 @@ private KeywordFieldType buildFieldType(MapperBuilderContext context, FieldType dimension(true); } return new KeywordFieldType( - context.buildFullName(name), + context.buildFullName(name()), fieldType, normalizer, searchAnalyzer, @@ -330,7 +330,7 @@ public KeywordFieldMapper build(MapperBuilderContext context) { fieldtype = Defaults.FIELD_TYPE; } return new KeywordFieldMapper( - name, + name(), fieldtype, buildFieldType(context, fieldtype), multiFieldsBuilder.build(this, context), diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 397f99f63030c..cf4025150584f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -24,7 +24,7 @@ public abstract class Mapper implements ToXContentFragment, Iterable { public abstract static class Builder { - protected final String name; + private final String name; protected Builder(String name) { this.name = internFieldName(name); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index a654819811621..1216618b1e986 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -62,8 +62,8 @@ public NestedObjectMapper build(MapperBuilderContext context) { this.includeInRoot = Explicit.IMPLICIT_FALSE; } } - NestedMapperBuilderContext nestedContext = new NestedMapperBuilderContext(context.buildFullName(name), parentIncludedInRoot); - final String fullPath = context.buildFullName(name); + NestedMapperBuilderContext nestedContext = new NestedMapperBuilderContext(context.buildFullName(name()), parentIncludedInRoot); + final String fullPath = context.buildFullName(name()); final String nestedTypePath; if (indexCreatedVersion.before(IndexVersions.V_8_0_0)) { nestedTypePath = "__" + fullPath; @@ -71,7 +71,7 @@ public NestedObjectMapper build(MapperBuilderContext context) { nestedTypePath = fullPath; } return new NestedObjectMapper( - name, + name(), fullPath, buildMappers(nestedContext), enabled, diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 5935eaf2c3d14..2245e527c2aa2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -227,7 +227,7 @@ private FieldValues scriptValues() { if (this.script.get() == null) { return null; } - return type.compile(name, script.get(), scriptCompiler); + return type.compile(name(), script.get(), scriptCompiler); } public Builder dimension(boolean dimension) { @@ -271,8 +271,8 @@ public NumberFieldMapper build(MapperBuilderContext context) { dimension.setValue(true); } - MappedFieldType ft = new NumberFieldType(context.buildFullName(name), this); - return new NumberFieldMapper(name, ft, multiFieldsBuilder.build(this, context), copyTo, context.isSourceSynthetic(), this); + MappedFieldType ft = new NumberFieldType(context.buildFullName(name()), this); + return new NumberFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, context.isSourceSynthetic(), this); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 7a807f767611b..a9de4bdd1467a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -172,12 +172,12 @@ protected final Map buildMappers(MapperBuilderContext mapperBuil @Override public ObjectMapper build(MapperBuilderContext context) { return new ObjectMapper( - name, - context.buildFullName(name), + name(), + context.buildFullName(name()), enabled, subobjects, dynamic, - buildMappers(context.createChildContext(name)) + buildMappers(context.createChildContext(name())) ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java index b49c9328fcc79..4ce7f51ed7386 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java @@ -56,11 +56,11 @@ public PassThroughObjectMapper.Builder setContainsDimensions() { @Override public PassThroughObjectMapper build(MapperBuilderContext context) { return new PassThroughObjectMapper( - name, - context.buildFullName(name), + name(), + context.buildFullName(name()), enabled, dynamic, - buildMappers(context.createChildContext(name)), + buildMappers(context.createChildContext(name())), timeSeriesDimensionSubFields ); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/PlaceHolderFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/PlaceHolderFieldMapper.java index 98f8f21be704a..67260273bc5a5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/PlaceHolderFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/PlaceHolderFieldMapper.java @@ -90,8 +90,8 @@ protected Parameter[] getParameters() { @Override public PlaceHolderFieldMapper build(MapperBuilderContext context) { - PlaceHolderFieldType mappedFieldType = new PlaceHolderFieldType(context.buildFullName(name), type, Map.of()); - return new PlaceHolderFieldMapper(name, mappedFieldType, multiFieldsBuilder.build(this, context), copyTo, unknownParams); + PlaceHolderFieldType mappedFieldType = new PlaceHolderFieldType(context.buildFullName(name()), type, Map.of()); + return new PlaceHolderFieldMapper(name(), mappedFieldType, multiFieldsBuilder.build(this, context), copyTo, unknownParams); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index fcd2a425a6625..3836915e65753 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -116,7 +116,7 @@ protected Parameter[] getParameters() { } protected RangeFieldType setupFieldType(MapperBuilderContext context) { - String fullName = context.buildFullName(name); + String fullName = context.buildFullName(name()); if (format.isConfigured()) { if (type != RangeType.DATE) { throw new IllegalArgumentException( @@ -163,7 +163,7 @@ protected RangeFieldType setupFieldType(MapperBuilderContext context) { @Override public RangeFieldMapper build(MapperBuilderContext context) { RangeFieldType ft = setupFieldType(context); - return new RangeFieldMapper(name, ft, multiFieldsBuilder.build(this, context), copyTo, type, this); + return new RangeFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, type, this); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index d7cc9e8f7e71f..a730d8c2da89e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -114,7 +114,7 @@ public RootObjectMapper build(MapperBuilderContext context) { Map mappers = buildMappers(context); mappers.putAll(getAliasMappers(mappers, context)); return new RootObjectMapper( - name, + name(), enabled, subobjects, dynamic, diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 1885869073711..faa840dacc732 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -355,18 +355,18 @@ private TextFieldType buildFieldType( if (analyzers.positionIncrementGap.isConfigured()) { if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) { throw new IllegalArgumentException( - "Cannot set position_increment_gap on field [" + name + "] without positions enabled" + "Cannot set position_increment_gap on field [" + name() + "] without positions enabled" ); } } TextSearchInfo tsi = new TextSearchInfo(fieldType, similarity.getValue(), searchAnalyzer, searchQuoteAnalyzer); TextFieldType ft; if (indexCreatedVersion.isLegacyIndexVersion()) { - ft = new LegacyTextFieldType(context.buildFullName(name), index.getValue(), store.getValue(), tsi, meta.getValue()); + ft = new LegacyTextFieldType(context.buildFullName(name()), index.getValue(), store.getValue(), tsi, meta.getValue()); // ignore fieldData and eagerGlobalOrdinals } else { ft = new TextFieldType( - context.buildFullName(name), + context.buildFullName(name()), index.getValue(), store.getValue(), tsi, @@ -412,7 +412,7 @@ private SubFieldInfo buildPrefixInfo(MapperBuilderContext context, FieldType fie * or a multi-field). This way search will continue to work on old indices and new indices * will use the expected full name. */ - String fullName = indexCreatedVersion.before(IndexVersions.V_7_2_1) ? name() : context.buildFullName(name); + String fullName = indexCreatedVersion.before(IndexVersions.V_7_2_1) ? name() : context.buildFullName(name()); // Copy the index options of the main field to allow phrase queries on // the prefix field. FieldType pft = new FieldType(fieldType); @@ -476,7 +476,7 @@ public TextFieldMapper build(MapperBuilderContext context) { throw new MapperParsingException("Cannot use reserved field name [" + mapper.name() + "]"); } } - return new TextFieldMapper(name, fieldType, tft, prefixFieldInfo, phraseFieldInfo, multiFields, copyTo, this); + return new TextFieldMapper(name(), fieldType, tft, prefixFieldInfo, phraseFieldInfo, multiFields, copyTo, this); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java index c15adfb3be116..5a8efb6c8ed59 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java @@ -202,13 +202,13 @@ protected Parameter[] getParameters() { public FlattenedFieldMapper build(MapperBuilderContext context) { MultiFields multiFields = multiFieldsBuilder.build(this, context); if (multiFields.iterator().hasNext()) { - throw new IllegalArgumentException(CONTENT_TYPE + " field [" + name + "] does not support [fields]"); + throw new IllegalArgumentException(CONTENT_TYPE + " field [" + name() + "] does not support [fields]"); } if (copyTo.copyToFields().isEmpty() == false) { - throw new IllegalArgumentException(CONTENT_TYPE + " field [" + name + "] does not support [copy_to]"); + throw new IllegalArgumentException(CONTENT_TYPE + " field [" + name() + "] does not support [copy_to]"); } MappedFieldType ft = new RootFlattenedFieldType( - context.buildFullName(name), + context.buildFullName(name()), indexed.get(), hasDocValues.get(), meta.get(), @@ -216,7 +216,7 @@ public FlattenedFieldMapper build(MapperBuilderContext context) { eagerGlobalOrdinals.get(), dimensions.get() ); - return new FlattenedFieldMapper(name, ft, this); + return new FlattenedFieldMapper(name(), ft, this); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index d36ca9e0b25c1..598a6383bfdaa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -213,9 +213,9 @@ protected Parameter[] getParameters() { @Override public DenseVectorFieldMapper build(MapperBuilderContext context) { return new DenseVectorFieldMapper( - name, + name(), new DenseVectorFieldType( - context.buildFullName(name), + context.buildFullName(name()), indexVersionCreated, elementType.getValue(), dims.getValue(), diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java index 3b892fc1647b6..6532abed19044 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java @@ -66,8 +66,8 @@ protected Parameter[] getParameters() { @Override public SparseVectorFieldMapper build(MapperBuilderContext context) { return new SparseVectorFieldMapper( - name, - new SparseVectorFieldType(context.buildFullName(name), meta.getValue()), + name(), + new SparseVectorFieldType(context.buildFullName(name()), meta.getValue()), multiFieldsBuilder.build(this, context), copyTo ); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java index 562a30ba4f389..b1b7f80ba865f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java @@ -175,7 +175,7 @@ protected Parameter[] getParameters() { @Override public FieldMapper build(MapperBuilderContext context) { - return new TestMapper(name(), context.buildFullName(name), multiFieldsBuilder.build(this, context), copyTo, this); + return new TestMapper(name(), context.buildFullName(name()), multiFieldsBuilder.build(this, context), copyTo, this); } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java index 421973723837d..b8e4f77f7da7b 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java @@ -94,8 +94,8 @@ protected Parameter[] getParameters() { @Override public HistogramFieldMapper build(MapperBuilderContext context) { return new HistogramFieldMapper( - name, - new HistogramFieldType(context.buildFullName(name), meta.getValue()), + name(), + new HistogramFieldType(context.buildFullName(name()), meta.getValue()), multiFieldsBuilder.build(this, context), copyTo, this diff --git a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java index b5c35e758a65c..1581803920cdc 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java @@ -209,7 +209,7 @@ public AggregateDoubleMetricFieldMapper build(MapperBuilderContext context) { EnumMap metricMappers = new EnumMap<>(Metric.class); // Instantiate one NumberFieldMapper instance for each metric for (Metric m : this.metrics.getValue()) { - String fieldName = subfieldName(name, m); + String fieldName = subfieldName(name(), m); NumberFieldMapper.Builder builder; if (m == Metric.value_count) { @@ -245,14 +245,14 @@ public AggregateDoubleMetricFieldMapper build(MapperBuilderContext context) { }, () -> new EnumMap<>(Metric.class))); AggregateDoubleMetricFieldType metricFieldType = new AggregateDoubleMetricFieldType( - context.buildFullName(name), + context.buildFullName(name()), meta.getValue(), timeSeriesMetric.getValue() ); metricFieldType.setMetricFields(metricFields); metricFieldType.setDefaultMetric(defaultMetric.getValue()); - return new AggregateDoubleMetricFieldMapper(name, metricFieldType, metricMappers, this); + return new AggregateDoubleMetricFieldMapper(name(), metricFieldType, metricMappers, this); } } diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java index cee397d906149..f2b1f013212db 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java @@ -99,8 +99,8 @@ protected Parameter[] getParameters() { @Override public ConstantKeywordFieldMapper build(MapperBuilderContext context) { return new ConstantKeywordFieldMapper( - name, - new ConstantKeywordFieldType(context.buildFullName(name), value.getValue(), meta.getValue()) + name(), + new ConstantKeywordFieldType(context.buildFullName(name()), value.getValue(), meta.getValue()) ); } } diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java index ad5e224efd5db..878a949a69841 100644 --- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java @@ -289,14 +289,14 @@ protected Parameter[] getParameters() { @Override public FieldMapper build(MapperBuilderContext context) { - BinaryFieldMapper countFieldMapper = new BinaryFieldMapper.Builder(name + COUNT_FIELD_NAME_SUFFIX, true).build(context); + BinaryFieldMapper countFieldMapper = new BinaryFieldMapper.Builder(name() + COUNT_FIELD_NAME_SUFFIX, true).build(context); boolean isIndexed = indexed.getValue(); FieldType ft = isIndexed ? FIELD_TYPE_INDEXED : FIELD_TYPE_NOT_INDEXED; return new CountedKeywordFieldMapper( - name, + name(), ft, new CountedKeywordFieldType( - context.buildFullName(name), + context.buildFullName(name()), isIndexed, false, true, diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java index c468d7bcd6718..955d658b01bab 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java @@ -199,7 +199,7 @@ public UnsignedLongFieldMapper build(MapperBuilderContext context) { dimension.setValue(true); } UnsignedLongFieldType fieldType = new UnsignedLongFieldType( - context.buildFullName(name), + context.buildFullName(name()), indexed.getValue(), stored.getValue(), hasDocValues.getValue(), @@ -209,7 +209,7 @@ public UnsignedLongFieldMapper build(MapperBuilderContext context) { metric.getValue(), indexMode ); - return new UnsignedLongFieldMapper(name, fieldType, multiFieldsBuilder.build(this, context), copyTo, this); + return new UnsignedLongFieldMapper(name(), fieldType, multiFieldsBuilder.build(this, context), copyTo, this); } } diff --git a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java index e233df2af3fbd..40b8bcf208a2d 100644 --- a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java +++ b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java @@ -112,14 +112,14 @@ static class Builder extends FieldMapper.Builder { } private VersionStringFieldType buildFieldType(MapperBuilderContext context, FieldType fieldtype) { - return new VersionStringFieldType(context.buildFullName(name), fieldtype, meta.getValue()); + return new VersionStringFieldType(context.buildFullName(name()), fieldtype, meta.getValue()); } @Override public VersionStringFieldMapper build(MapperBuilderContext context) { FieldType fieldtype = new FieldType(Defaults.FIELD_TYPE); return new VersionStringFieldMapper( - name, + name(), fieldtype, buildFieldType(context, fieldtype), multiFieldsBuilder.build(this, context), diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java index 71fb9b0f3126a..a8f437f476ada 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java @@ -173,7 +173,7 @@ private FieldValues scriptValues() { GeometryFieldScript.Factory factory = scriptCompiler.compile(this.script.get(), GeometryFieldScript.CONTEXT); return factory == null ? null - : (lookup, ctx, doc, consumer) -> factory.newFactory(name, script.get().getParams(), lookup, OnScriptError.FAIL) + : (lookup, ctx, doc, consumer) -> factory.newFactory(name(), script.get().getParams(), lookup, OnScriptError.FAIL) .newInstance(ctx) .runForDoc(doc, consumer); } @@ -194,7 +194,7 @@ public GeoShapeWithDocValuesFieldMapper build(MapperBuilderContext context) { ); GeoShapeParser parser = new GeoShapeParser(geometryParser, orientation.get().value()); GeoShapeWithDocValuesFieldType ft = new GeoShapeWithDocValuesFieldType( - context.buildFullName(name), + context.buildFullName(name()), indexed.get(), hasDocValues.get(), stored.get(), @@ -206,7 +206,7 @@ public GeoShapeWithDocValuesFieldMapper build(MapperBuilderContext context) { ); if (script.get() == null) { return new GeoShapeWithDocValuesFieldMapper( - name, + name(), ft, multiFieldsBuilder.build(this, context), copyTo, @@ -216,7 +216,7 @@ public GeoShapeWithDocValuesFieldMapper build(MapperBuilderContext context) { ); } return new GeoShapeWithDocValuesFieldMapper( - name, + name(), ft, multiFieldsBuilder.build(this, context), copyTo, diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java index 01a2b5f0e5598..1657a3bf7fbce 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java @@ -105,14 +105,14 @@ public FieldMapper build(MapperBuilderContext context) { ); } CartesianPointParser parser = new CartesianPointParser( - name, + name(), p -> CartesianPoint.parsePoint(p, ignoreZValue.get().value()), nullValue.get(), ignoreZValue.get().value(), ignoreMalformed.get().value() ); PointFieldType ft = new PointFieldType( - context.buildFullName(name), + context.buildFullName(name()), indexed.get(), stored.get(), hasDocValues.get(), @@ -120,7 +120,7 @@ public FieldMapper build(MapperBuilderContext context) { nullValue.get(), meta.get() ); - return new PointFieldMapper(name, ft, multiFieldsBuilder.build(this, context), copyTo, parser, this); + return new PointFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, parser, this); } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java index 0a1c0278d88d7..83e434f829591 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java @@ -118,14 +118,14 @@ public ShapeFieldMapper build(MapperBuilderContext context) { ); Parser parser = new ShapeParser(geometryParser); ShapeFieldType ft = new ShapeFieldType( - context.buildFullName(name), + context.buildFullName(name()), indexed.get(), hasDocValues.get(), orientation.get().value(), parser, meta.get() ); - return new ShapeFieldMapper(name, ft, multiFieldsBuilder.build(this, context), copyTo, parser, this); + return new ShapeFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, parser, this); } } diff --git a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java index 1954e291b1a7f..62306a18d946b 100644 --- a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java +++ b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java @@ -240,8 +240,8 @@ Builder nullValue(String nullValue) { @Override public WildcardFieldMapper build(MapperBuilderContext context) { return new WildcardFieldMapper( - name, - new WildcardFieldType(context.buildFullName(name), nullValue.get(), ignoreAbove.get(), indexVersionCreated, meta.get()), + name(), + new WildcardFieldType(context.buildFullName(name()), nullValue.get(), ignoreAbove.get(), indexVersionCreated, meta.get()), ignoreAbove.get(), context.isSourceSynthetic(), multiFieldsBuilder.build(this, context), From 2dc9e89ed65dc2ecddedc87ae6e88a0c846e4fb7 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Tue, 20 Feb 2024 17:35:06 +0100 Subject: [PATCH 12/49] Make name getter method in Mapper.Builder final (#105661) FieldAliasMapper used to override it but it would not change any behaviour, it can rather call the existing getter for it. --- .../elasticsearch/index/mapper/FieldAliasMapper.java | 10 ++-------- .../java/org/elasticsearch/index/mapper/Mapper.java | 2 +- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java index 97d1b9368a6c9..8aa29e6317d51 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java @@ -138,16 +138,10 @@ public boolean supportsVersion(IndexVersion indexCreatedVersion) { } public static class Builder extends Mapper.Builder { - private String name; private String path; protected Builder(String name) { super(name); - this.name = name; - } - - public String name() { - return this.name; } public Builder path(String path) { @@ -157,8 +151,8 @@ public Builder path(String path) { @Override public FieldAliasMapper build(MapperBuilderContext context) { - String fullName = context.buildFullName(name); - return new FieldAliasMapper(name, fullName, path); + String fullName = context.buildFullName(name()); + return new FieldAliasMapper(name(), fullName, path); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index cf4025150584f..14a71531c6abb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -31,7 +31,7 @@ protected Builder(String name) { } // TODO rename this to leafName? - public String name() { + public final String name() { return this.name; } From ac08fe6076b12fa223e8a204e62bf59085ed0634 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 20 Feb 2024 17:13:49 +0000 Subject: [PATCH 13/49] Fix HTTP corner-case response leaks (#105617) Enhances `Netty4PipeliningIT` to demonstrate that the pipelined requests do run concurrently, and to explore some corner cases around failures (both client-side and server-side). This extra testing found two response leaks: one when the channel has closed before even starting to process a request, and a second when we throw an exception during serialization of a chunk in a chunked response with other pipelined responses enqueued for transmission behind it. --- docs/changelog/105617.yaml | 5 + .../http/netty4/Netty4PipeliningIT.java | 245 ++++++++++++++++-- .../netty4/Netty4HttpPipeliningHandler.java | 92 +++---- .../http/netty4/Netty4HttpClient.java | 28 +- .../http/AbstractHttpServerTransport.java | 9 +- .../rest/action/RestActionListener.java | 4 +- 6 files changed, 308 insertions(+), 75 deletions(-) create mode 100644 docs/changelog/105617.yaml diff --git a/docs/changelog/105617.yaml b/docs/changelog/105617.yaml new file mode 100644 index 0000000000000..7fd8203336fff --- /dev/null +++ b/docs/changelog/105617.yaml @@ -0,0 +1,5 @@ +pr: 105617 +summary: Fix HTTP corner-case response leaks +area: Network +type: bug +issues: [] diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java index b381e0ea8bfb7..653733b064ba9 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java @@ -8,43 +8,134 @@ package org.elasticsearch.http.netty4; -import io.netty.handler.codec.http.FullHttpResponse; import io.netty.util.ReferenceCounted; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.ESNetty4IntegTestCase; -import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.CountDownActionListener; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.bytes.ReleasableBytesReference; +import org.elasticsearch.common.bytes.ZeroBytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.recycler.Recycler; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Strings; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.http.HttpServerTransport; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.ChunkedRestResponseBody; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContentObject; +import java.io.IOException; +import java.util.Arrays; import java.util.Collection; +import java.util.List; +import java.util.function.Predicate; +import java.util.function.Supplier; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; +import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; -@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class Netty4PipeliningIT extends ESNetty4IntegTestCase { + @Override + protected Collection> nodePlugins() { + return CollectionUtils.concatLists(List.of(CountDown3Plugin.class, ChunkAndFailPlugin.class), super.nodePlugins()); + } + + private static final int MAX_PIPELINE_EVENTS = 10; + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(SETTING_PIPELINING_MAX_EVENTS.getKey(), MAX_PIPELINE_EVENTS) + .build(); + } + @Override protected boolean addMockHttpTransport() { return false; // enable http } public void testThatNettyHttpServerSupportsPipelining() throws Exception { - String[] requests = new String[] { "/", "/_nodes/stats", "/", "/_cluster/state", "/" }; + runPipeliningTest( + CountDown3Plugin.ROUTE, + "/_nodes", + "/_nodes/stats", + CountDown3Plugin.ROUTE, + "/_cluster/health", + "/_cluster/state", + CountDown3Plugin.ROUTE, + "/_cat/shards" + ); + } + + public void testChunkingFailures() throws Exception { + runPipeliningTest(0, ChunkAndFailPlugin.randomRequestUri()); + runPipeliningTest(0, ChunkAndFailPlugin.randomRequestUri(), "/_cluster/state"); + runPipeliningTest( + -1, // typically get the first 2 responses, but we can hit the failing chunk and close the channel soon enough to lose them too + CountDown3Plugin.ROUTE, + CountDown3Plugin.ROUTE, + ChunkAndFailPlugin.randomRequestUri(), + "/_cluster/health", + CountDown3Plugin.ROUTE + ); + } - HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); - TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); - TransportAddress transportAddress = randomFrom(boundAddresses); + public void testPipelineOverflow() throws Exception { + final var routes = new String[1 // the first request which never returns a response so doesn't consume a spot in the queue + + MAX_PIPELINE_EVENTS // the responses which fill up the queue + + 1 // to cause the overflow + + between(0, 5) // for good measure, to e.g. make sure we don't leak these responses + ]; + Arrays.fill(routes, "/_cluster/health"); + routes[0] = CountDown3Plugin.ROUTE; // never returns + runPipeliningTest(0, routes); + } - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { - Collection responses = nettyHttpClient.get(transportAddress.address(), requests); - try { - assertThat(responses, hasSize(5)); + private void runPipeliningTest(String... routes) throws InterruptedException { + runPipeliningTest(routes.length, routes); + } - Collection opaqueIds = Netty4HttpClient.returnOpaqueIds(responses); - assertOpaqueIdsInOrder(opaqueIds); + private void runPipeliningTest(int expectedResponseCount, String... routes) throws InterruptedException { + try (var client = new Netty4HttpClient()) { + final var responses = client.get( + randomFrom(internalCluster().getInstance(HttpServerTransport.class).boundAddress().boundAddresses()).address(), + routes + ); + try { + logger.info("response codes: {}", responses.stream().mapToInt(r -> r.status().code()).toArray()); + if (expectedResponseCount >= 0) { + assertThat(responses, hasSize(expectedResponseCount)); + } + assertThat(responses.size(), lessThanOrEqualTo(routes.length)); + assertTrue(responses.stream().allMatch(r -> r.status().code() == 200)); + assertOpaqueIdsInOrder(Netty4HttpClient.returnOpaqueIds(responses)); } finally { responses.forEach(ReferenceCounted::release); } @@ -60,4 +151,128 @@ private void assertOpaqueIdsInOrder(Collection opaqueIds) { } } + private static final ToXContentObject EMPTY_RESPONSE = (builder, params) -> builder.startObject().endObject(); + + /** + * Adds an HTTP route that waits for 3 concurrent executions before returning any of them + */ + public static class CountDown3Plugin extends Plugin implements ActionPlugin { + + static final String ROUTE = "/_test/countdown_3"; + + @Override + public Collection getRestHandlers( + Settings settings, + NamedWriteableRegistry namedWriteableRegistry, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster, + Predicate clusterSupportsFeature + ) { + return List.of(new BaseRestHandler() { + private final SubscribableListener subscribableListener = new SubscribableListener<>(); + private final CountDownActionListener countDownActionListener = new CountDownActionListener( + 3, + subscribableListener.map(v -> EMPTY_RESPONSE) + ); + + private void addListener(ActionListener listener) { + subscribableListener.addListener(listener); + countDownActionListener.onResponse(null); + } + + @Override + public String getName() { + return ROUTE; + } + + @Override + public List routes() { + return List.of(new Route(GET, ROUTE)); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + return channel -> addListener(new RestToXContentListener<>(channel)); + } + }); + } + } + + /** + * Adds an HTTP route that waits for 3 concurrent executions before returning any of them + */ + public static class ChunkAndFailPlugin extends Plugin implements ActionPlugin { + + static final String ROUTE = "/_test/chunk_and_fail"; + static final String FAIL_AFTER_BYTES_PARAM = "fail_after_bytes"; + + static String randomRequestUri() { + return ROUTE + '?' + FAIL_AFTER_BYTES_PARAM + '=' + between(0, ByteSizeUnit.MB.toIntBytes(2)); + } + + @Override + public Collection getRestHandlers( + Settings settings, + NamedWriteableRegistry namedWriteableRegistry, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster, + Predicate clusterSupportsFeature + ) { + return List.of(new BaseRestHandler() { + @Override + public String getName() { + return ROUTE; + } + + @Override + public List routes() { + return List.of(new Route(GET, ROUTE)); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + final var failAfterBytes = request.paramAsInt(FAIL_AFTER_BYTES_PARAM, -1); + if (failAfterBytes < 0) { + throw new IllegalArgumentException("[" + FAIL_AFTER_BYTES_PARAM + "] must be present and non-negative"); + } + return channel -> client.threadPool() + .executor(randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC)) + .execute(() -> channel.sendResponse(RestResponse.chunked(RestStatus.OK, new ChunkedRestResponseBody() { + int bytesRemaining = failAfterBytes; + + @Override + public boolean isDone() { + return false; + } + + @Override + public ReleasableBytesReference encodeChunk(int sizeHint, Recycler recycler) throws IOException { + assert bytesRemaining >= 0 : "already failed"; + if (bytesRemaining == 0) { + bytesRemaining = -1; + throw new IOException("simulated failure"); + } else { + final var bytesToSend = between(1, bytesRemaining); + bytesRemaining -= bytesToSend; + return ReleasableBytesReference.wrap(new ZeroBytesReference(bytesToSend)); + } + } + + @Override + public String getResponseContentTypeString() { + return RestResponse.TEXT_CONTENT_TYPE; + } + }, null))); + } + }); + } + } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java index 908bfa8a9fc3b..86fa635078d4f 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java @@ -28,6 +28,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.Nullable; @@ -41,9 +42,7 @@ import java.io.IOException; import java.nio.channels.ClosedChannelException; import java.util.ArrayDeque; -import java.util.ArrayList; import java.util.Comparator; -import java.util.List; import java.util.PriorityQueue; import java.util.Queue; @@ -165,8 +164,10 @@ public void write(final ChannelHandlerContext ctx, final Object msg, final Chann } catch (IllegalStateException e) { ctx.channel().close(); } finally { - if (success == false) { - promise.setFailure(new ClosedChannelException()); + if (success == false && promise.isDone() == false) { + // The preceding failure may already have failed the promise; use tryFailure() to avoid log noise about double-completion, + // but also check isDone() first to avoid even constructing another exception in most cases. + promise.tryFailure(new ClosedChannelException()); } } } @@ -190,7 +191,7 @@ private void doWriteQueued(ChannelHandlerContext ctx) throws IOException { SPLIT_THRESHOLD = (int) (NettyAllocator.suggestedMaxAllocationSize() * 0.99); } - private void doWrite(ChannelHandlerContext ctx, Netty4HttpResponse readyResponse, ChannelPromise promise) throws IOException { + private void doWrite(ChannelHandlerContext ctx, Netty4HttpResponse readyResponse, ChannelPromise promise) { assert currentChunkedWrite == null : "unexpected existing write [" + currentChunkedWrite + "]"; assert readyResponse != null : "cannot write null response"; assert readyResponse.getSequence() == writeSequence; @@ -216,8 +217,7 @@ private void doWriteFullResponse(ChannelHandlerContext ctx, Netty4FullHttpRespon writeSequence++; } - private void doWriteChunkedResponse(ChannelHandlerContext ctx, Netty4ChunkedHttpResponse readyResponse, ChannelPromise promise) - throws IOException { + private void doWriteChunkedResponse(ChannelHandlerContext ctx, Netty4ChunkedHttpResponse readyResponse, ChannelPromise promise) { final PromiseCombiner combiner = new PromiseCombiner(ctx.executor()); final ChannelPromise first = ctx.newPromise(); combiner.add((Future) first); @@ -228,7 +228,7 @@ private void doWriteChunkedResponse(ChannelHandlerContext ctx, Netty4ChunkedHttp // We were able to write out the first chunk directly, try writing out subsequent chunks until the channel becomes unwritable. // NB "writable" means there's space in the downstream ChannelOutboundBuffer, we aren't trying to saturate the physical channel. while (ctx.channel().isWritable()) { - if (writeChunk(ctx, combiner, responseBody)) { + if (writeChunk(ctx, currentChunkedWrite)) { finishChunkedWrite(); return; } @@ -237,12 +237,15 @@ private void doWriteChunkedResponse(ChannelHandlerContext ctx, Netty4ChunkedHttp } private void finishChunkedWrite() { - assert currentChunkedWrite != null; + if (currentChunkedWrite == null) { + // failure during chunked response serialization, we're closing the channel + return; + } assert currentChunkedWrite.responseBody().isDone(); final var finishingWrite = currentChunkedWrite; currentChunkedWrite = null; writeSequence++; - finishingWrite.combiner.finish(finishingWrite.onDone()); + finishingWrite.combiner().finish(finishingWrite.onDone()); } private void splitAndWrite(ChannelHandlerContext ctx, Netty4FullHttpResponse msg, ChannelPromise promise) { @@ -286,7 +289,7 @@ private boolean doFlush(ChannelHandlerContext ctx) throws IOException { assert ctx.executor().inEventLoop(); final Channel channel = ctx.channel(); if (channel.isActive() == false) { - failQueuedWrites(); + failQueuedWrites(ctx); return false; } while (channel.isWritable()) { @@ -302,7 +305,7 @@ private boolean doFlush(ChannelHandlerContext ctx) throws IOException { if (currentWrite == null) { // no bytes were found queued, check if a chunked message might have become writable if (currentChunkedWrite != null) { - if (writeChunk(ctx, currentChunkedWrite.combiner, currentChunkedWrite.responseBody())) { + if (writeChunk(ctx, currentChunkedWrite)) { finishChunkedWrite(); } continue; @@ -313,17 +316,21 @@ private boolean doFlush(ChannelHandlerContext ctx) throws IOException { } ctx.flush(); if (channel.isActive() == false) { - failQueuedWrites(); + failQueuedWrites(ctx); } return true; } - private boolean writeChunk(ChannelHandlerContext ctx, PromiseCombiner combiner, ChunkedRestResponseBody body) throws IOException { + private boolean writeChunk(ChannelHandlerContext ctx, ChunkedWrite chunkedWrite) { + final var body = chunkedWrite.responseBody(); + final var combiner = chunkedWrite.combiner(); assert body.isDone() == false : "should not continue to try and serialize once done"; - final ReleasableBytesReference bytes = body.encodeChunk( - Netty4WriteThrottlingHandler.MAX_BYTES_PER_WRITE, - serverTransport.recycler() - ); + final ReleasableBytesReference bytes; + try { + bytes = body.encodeChunk(Netty4WriteThrottlingHandler.MAX_BYTES_PER_WRITE, serverTransport.recycler()); + } catch (Exception e) { + return handleChunkingFailure(ctx, chunkedWrite, e); + } final ByteBuf content = Netty4Utils.toByteBuf(bytes); final boolean done = body.isDone(); final ChannelFuture f = ctx.write(done ? new DefaultLastHttpContent(content) : new DefaultHttpContent(content)); @@ -332,39 +339,30 @@ private boolean writeChunk(ChannelHandlerContext ctx, PromiseCombiner combiner, return done; } - private void failQueuedWrites() { + private boolean handleChunkingFailure(ChannelHandlerContext ctx, ChunkedWrite chunkedWrite, Exception e) { + logger.error(Strings.format("caught exception while encoding response chunk, closing connection %s", ctx.channel()), e); + assert currentChunkedWrite == chunkedWrite; + currentChunkedWrite = null; + chunkedWrite.combiner().add(ctx.channel().close()); + chunkedWrite.combiner().add(ctx.newFailedFuture(e)); + chunkedWrite.combiner().finish(chunkedWrite.onDone()); + return true; + } + + private void failQueuedWrites(ChannelHandlerContext ctx) { WriteOperation queuedWrite; while ((queuedWrite = queuedWrites.poll()) != null) { queuedWrite.failAsClosedChannel(); } if (currentChunkedWrite != null) { - safeFailPromise(currentChunkedWrite.onDone, new ClosedChannelException()); - currentChunkedWrite = null; - } - } - - @Override - public void close(ChannelHandlerContext ctx, ChannelPromise promise) { - if (currentChunkedWrite != null) { - safeFailPromise(currentChunkedWrite.onDone, new ClosedChannelException()); + final var chunkedWrite = currentChunkedWrite; currentChunkedWrite = null; + chunkedWrite.combiner().add(ctx.newFailedFuture(new ClosedChannelException())); + chunkedWrite.combiner().finish(chunkedWrite.onDone()); } - List> inflightResponses = removeAllInflightResponses(); - - if (inflightResponses.isEmpty() == false) { - ClosedChannelException closedChannelException = new ClosedChannelException(); - for (Tuple inflightResponse : inflightResponses) { - safeFailPromise(inflightResponse.v2(), closedChannelException); - } - } - ctx.close(promise); - } - - private void safeFailPromise(ChannelPromise promise, Exception ex) { - try { - promise.setFailure(ex); - } catch (RuntimeException e) { - logger.error("unexpected error while releasing pipelined http responses", e); + Tuple pipelinedWrite; + while ((pipelinedWrite = outboundHoldingQueue.poll()) != null) { + pipelinedWrite.v2().tryFailure(new ClosedChannelException()); } } @@ -398,12 +396,6 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { } } - private List> removeAllInflightResponses() { - ArrayList> responses = new ArrayList<>(outboundHoldingQueue); - outboundHoldingQueue.clear(); - return responses; - } - private record WriteOperation(HttpObject msg, ChannelPromise promise) { void failAsClosedChannel() { diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java index 2524be154414e..d6ee096b8dfd8 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java @@ -18,6 +18,7 @@ import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; +import io.netty.handler.codec.PrematureChannelClosureException; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; @@ -31,8 +32,8 @@ import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpVersion; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.netty4.NettyAllocator; @@ -173,10 +174,9 @@ private static class CountDownLatchHandler extends ChannelInitializer() { @Override protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) { @@ -189,9 +189,25 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) { } @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - super.exceptionCaught(ctx, cause); - latch.countDown(); + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + if (cause instanceof PrematureChannelClosureException) { + // no more requests coming, so fast-forward the latch + fastForward(); + } else { + ExceptionsHelper.maybeDieOnAnotherThread(new AssertionError(cause)); + } + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) throws Exception { + fastForward(); + super.channelInactive(ctx); + } + + private void fastForward() { + while (latch.getCount() > 0) { + latch.countDown(); + } } }); } diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index cfd72bf6ae4a5..f9005f6e37889 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -424,7 +424,14 @@ public void incomingRequest(final HttpRequest httpRequest, final HttpChannel htt // The channel may not be present if the close listener (set in serverAcceptedChannel) runs before this method because the // connection closed early if (trackingChannel == null) { - logger.warn("http channel [{}] missing tracking channel", httpChannel); + httpRequest.release(); + logger.warn( + "http channel [{}] closed before starting to handle [{}][{}][{}]", + httpChannel, + httpRequest.header(Task.X_OPAQUE_ID_HTTP_HEADER), + httpRequest.method(), + httpRequest.uri() + ); return; } trackingChannel.incomingRequest(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestActionListener.java b/server/src/main/java/org/elasticsearch/rest/action/RestActionListener.java index c893a43417069..13155d00c8368 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestActionListener.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestActionListener.java @@ -21,9 +21,7 @@ */ public abstract class RestActionListener implements ActionListener { - // we use static here so we won't have to pass the actual logger each time for a very rare case of logging - // where the settings don't matter that much - private static final Logger logger = LogManager.getLogger(RestResponseListener.class); + private static final Logger logger = LogManager.getLogger(RestActionListener.class); protected final RestChannel channel; From f4702fa2f0b97afbfbc37999e6f082ad36a47186 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 20 Feb 2024 09:32:55 -0800 Subject: [PATCH 14/49] Fix dynamic mapping condition when create tsid (#105636) We accidentally reversed the dynamicMappersExists condition. The impact of this bug is minor, primarily resulting in the return of a different error message. --- docs/changelog/105636.yaml | 5 ++ .../rest-api-spec/test/tsdb/20_mapping.yml | 6 +- .../mapper/TsidExtractingIdFieldMapper.java | 2 +- .../mapper/TimeSeriesIdFieldMapperTests.java | 72 +++++++++++++++++++ 4 files changed, 81 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/105636.yaml diff --git a/docs/changelog/105636.yaml b/docs/changelog/105636.yaml new file mode 100644 index 0000000000000..01f27199771d4 --- /dev/null +++ b/docs/changelog/105636.yaml @@ -0,0 +1,5 @@ +pr: 105636 +summary: Flip dynamic mapping condition when create tsid +area: TSDB +type: bug +issues: [] diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml index 7edae8f264c76..1ff32192b9e08 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml @@ -241,8 +241,8 @@ runtime field matching routing path: --- "dynamic: runtime matches routing_path": - skip: - version: " - 8.7.99" - reason: routing_path error message updated in 8.8.0 + version: " - 8.13.99" + reason: routing_path error message updated in 8.8.0 and has_dynamic_mapping condition fixed in 8.14.0 - do: indices.create: @@ -272,7 +272,7 @@ runtime field matching routing path: index: test body: - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:04.467Z", "dim_kw": "dim", "dim": {"foo": "a"}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "dim_kw": "dim", "dim": {"foo": "a"}, "extra_field": 100}' - match: {items.0.index.error.reason: "All fields that match routing_path must be keywords with [time_series_dimension: true] or flattened fields with a list of dimensions in [time_series_dimensions] and without the [script] parameter. [dim.foo] was a runtime [keyword]."} --- diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java index 54c495a2c9a6c..1e613767c2c89 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java @@ -53,7 +53,7 @@ public static void createField(DocumentParserContext context, IndexRouting.Extra } long timestamp = timestampField.numericValue().longValue(); byte[] suffix = new byte[16]; - String id = createId(context.hasDynamicMappers() == false, routingBuilder, tsid, timestamp, suffix); + String id = createId(context.hasDynamicMappers(), routingBuilder, tsid, timestamp, suffix); /* * Make sure that _id from extracting the tsid matches that _id * from extracting the _source. This should be true for all valid diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java index d4dc03d22441b..94a0f2296bbfb 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java @@ -12,16 +12,23 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.ByteArrayStreamInput; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -653,4 +660,69 @@ public void testFewerDimensions() throws IOException { ParsedDocument doc2 = parseDocument(docMapper, d -> d.field("a", a).field("b", b).field("c", c)); assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, not(doc2.rootDoc().getBinaryValue("_tsid").bytes)); } + + public void testParseWithDynamicMapping() { + Settings indexSettings = Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "dim") + .build(); + // without _id + { + MapperService mapper = createMapperService(IndexVersion.current(), indexSettings, () -> false); + SourceToParse source = new SourceToParse(null, new BytesArray(""" + { + "@timestamp": 1609459200000, + "dim": "6a841a21", + "value": 100 + }"""), XContentType.JSON); + Engine.Index index = IndexShard.prepareIndex( + mapper, + source, + UNASSIGNED_SEQ_NO, + randomNonNegativeLong(), + Versions.MATCH_ANY, + VersionType.INTERNAL, + Engine.Operation.Origin.PRIMARY, + -1, + false, + UNASSIGNED_SEQ_NO, + 0, + System.nanoTime() + ); + assertNotNull(index.parsedDoc().dynamicMappingsUpdate()); + } + // with _id + { + MapperService mapper = createMapperService(IndexVersion.current(), indexSettings, () -> false); + SourceToParse source = new SourceToParse("no-such-tsid", new BytesArray(""" + { + "@timestamp": 1609459200000, + "dim": "6a841a21", + "value": 100 + }"""), XContentType.JSON); + var failure = expectThrows(DocumentParsingException.class, () -> { + IndexShard.prepareIndex( + mapper, + source, + UNASSIGNED_SEQ_NO, + randomNonNegativeLong(), + Versions.MATCH_ANY, + VersionType.INTERNAL, + Engine.Operation.Origin.PRIMARY, + -1, + false, + UNASSIGNED_SEQ_NO, + 0, + System.nanoTime() + ); + }); + assertThat( + failure.getMessage(), + equalTo( + "[5:1] failed to parse: _id must be unset or set to [AAAAAMpxfIC8Wpr0AAABdrs-cAA]" + + " but was [no-such-tsid] because [index] is in time_series mode" + ) + ); + } + } } From 065158e2229d02f4fe71b07a3a197195417ab312 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 20 Feb 2024 20:55:27 +0200 Subject: [PATCH 15/49] Expose owner realm_type in the returned API key information (#105629) When querying or getting API key information, ES returns the key owner's username and realm (i.e. the realm name that authenticated the username that last updated the API key). This PR adds the realm_type to the information on the key's owner. --- docs/changelog/105629.yaml | 5 + .../rest-api/security/get-api-keys.asciidoc | 2 + .../rest-api/security/query-api-key.asciidoc | 2 + .../core/security/action/apikey/ApiKey.java | 36 ++++- .../security/action/apikey/ApiKeyTests.java | 3 + .../action/apikey/GetApiKeyResponseTests.java | 9 ++ .../xpack/security/apikey/ApiKeyRestIT.java | 4 + .../authc/apikey/ApiKeySingleNodeTests.java | 9 +- .../xpack/security/authc/ApiKeyService.java | 2 +- .../security/authc/ApiKeyServiceTests.java | 142 +++++++++++++++++- .../apikey/RestGetApiKeyActionTests.java | 4 + 11 files changed, 205 insertions(+), 13 deletions(-) create mode 100644 docs/changelog/105629.yaml diff --git a/docs/changelog/105629.yaml b/docs/changelog/105629.yaml new file mode 100644 index 0000000000000..00fa73a759558 --- /dev/null +++ b/docs/changelog/105629.yaml @@ -0,0 +1,5 @@ +pr: 105629 +summary: Show owner `realm_type` for returned API keys +area: Security +type: enhancement +issues: [] diff --git a/docs/reference/rest-api/security/get-api-keys.asciidoc b/docs/reference/rest-api/security/get-api-keys.asciidoc index d75edda9296a5..a02e8adb67b4f 100644 --- a/docs/reference/rest-api/security/get-api-keys.asciidoc +++ b/docs/reference/rest-api/security/get-api-keys.asciidoc @@ -134,6 +134,7 @@ A successful call returns a JSON structure that contains the information of the "invalidated": false, <6> "username": "myuser", <7> "realm": "native1", <8> + "realm_type": "native", "metadata": { <9> "application": "myapp" }, @@ -289,6 +290,7 @@ A successful call returns a JSON structure that contains the information of one "invalidated": false, "username": "myuser", "realm": "native1", + "realm_type": "native", "metadata": { "application": "myapp" }, diff --git a/docs/reference/rest-api/security/query-api-key.asciidoc b/docs/reference/rest-api/security/query-api-key.asciidoc index 88fef9a21ff88..e16ba267203b8 100644 --- a/docs/reference/rest-api/security/query-api-key.asciidoc +++ b/docs/reference/rest-api/security/query-api-key.asciidoc @@ -299,6 +299,7 @@ retrieved from one or more API keys: "invalidated": false, "username": "elastic", "realm": "reserved", + "realm_type": "reserved", "metadata": { "letter": "a" }, @@ -411,6 +412,7 @@ A successful call returns a JSON structure for API key information including its "invalidated": false, "username": "myuser", "realm": "native1", + "realm_type": "native", "metadata": { "application": "my-application" }, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java index 3ab487560f2b8..ae345870e718b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java @@ -16,6 +16,7 @@ import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptorsIntersection; @@ -88,6 +89,8 @@ public String value() { private final Instant invalidation; private final String username; private final String realm; + @Nullable + private final String realmType; private final Map metadata; @Nullable private final List roleDescriptors; @@ -104,6 +107,7 @@ public ApiKey( @Nullable Instant invalidation, String username, String realm, + @Nullable String realmType, @Nullable Map metadata, @Nullable List roleDescriptors, @Nullable List limitedByRoleDescriptors @@ -118,6 +122,7 @@ public ApiKey( invalidation, username, realm, + realmType, metadata, roleDescriptors, limitedByRoleDescriptors == null ? null : new RoleDescriptorsIntersection(List.of(Set.copyOf(limitedByRoleDescriptors))) @@ -134,6 +139,7 @@ private ApiKey( Instant invalidation, String username, String realm, + @Nullable String realmType, @Nullable Map metadata, @Nullable List roleDescriptors, @Nullable RoleDescriptorsIntersection limitedBy @@ -150,6 +156,7 @@ private ApiKey( this.invalidation = (invalidation != null) ? Instant.ofEpochMilli(invalidation.toEpochMilli()) : null; this.username = username; this.realm = realm; + this.realmType = realmType; this.metadata = metadata == null ? Map.of() : metadata; this.roleDescriptors = roleDescriptors != null ? List.copyOf(roleDescriptors) : null; // This assertion will need to be changed (or removed) when derived keys are properly supported @@ -193,6 +200,17 @@ public String getRealm() { return realm; } + public @Nullable String getRealmType() { + return realmType; + } + + public @Nullable RealmConfig.RealmIdentifier getRealmIdentifier() { + if (realm != null && realmType != null) { + return new RealmConfig.RealmIdentifier(realmType, realm); + } + return null; + } + public Map getMetadata() { return metadata; } @@ -223,7 +241,11 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t if (invalidation != null) { builder.field("invalidation", invalidation.toEpochMilli()); } - builder.field("username", username).field("realm", realm).field("metadata", (metadata == null ? Map.of() : metadata)); + builder.field("username", username).field("realm", realm); + if (realmType != null) { + builder.field("realm_type", realmType); + } + builder.field("metadata", (metadata == null ? Map.of() : metadata)); if (roleDescriptors != null) { builder.startObject("role_descriptors"); for (var roleDescriptor : roleDescriptors) { @@ -287,6 +309,7 @@ public int hashCode() { invalidation, username, realm, + realmType, metadata, roleDescriptors, limitedBy @@ -314,6 +337,7 @@ public boolean equals(Object obj) { && Objects.equals(invalidation, other.invalidation) && Objects.equals(username, other.username) && Objects.equals(realm, other.realm) + && Objects.equals(realmType, other.realmType) && Objects.equals(metadata, other.metadata) && Objects.equals(roleDescriptors, other.roleDescriptors) && Objects.equals(limitedBy, other.limitedBy); @@ -331,9 +355,10 @@ public boolean equals(Object obj) { (args[6] == null) ? null : Instant.ofEpochMilli((Long) args[6]), (String) args[7], (String) args[8], - (args[9] == null) ? null : (Map) args[9], - (List) args[10], - (RoleDescriptorsIntersection) args[11] + (String) args[9], + (args[10] == null) ? null : (Map) args[10], + (List) args[11], + (RoleDescriptorsIntersection) args[12] ); }); static { @@ -346,6 +371,7 @@ public boolean equals(Object obj) { PARSER.declareLong(optionalConstructorArg(), new ParseField("invalidation")); PARSER.declareString(constructorArg(), new ParseField("username")); PARSER.declareString(constructorArg(), new ParseField("realm")); + PARSER.declareStringOrNull(optionalConstructorArg(), new ParseField("realm_type")); PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); PARSER.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { p.nextToken(); @@ -383,6 +409,8 @@ public String toString() { + username + ", realm=" + realm + + ", realm_type=" + + realmType + ", metadata=" + metadata + ", role_descriptors=" diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java index 02bce50ed3483..361928590556a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java @@ -68,6 +68,7 @@ public void testXContent() throws IOException { assertThat(map.get("invalidated"), is(apiKey.isInvalidated())); assertThat(map.get("username"), equalTo(apiKey.getUsername())); assertThat(map.get("realm"), equalTo(apiKey.getRealm())); + assertThat(map.get("realm_type"), equalTo(apiKey.getRealmType())); assertThat(map.get("metadata"), equalTo(Objects.requireNonNullElseGet(apiKey.getMetadata(), Map::of))); if (apiKey.getRoleDescriptors() == null) { @@ -172,6 +173,7 @@ public static ApiKey randomApiKeyInstance() { : null; final String username = randomAlphaOfLengthBetween(4, 10); final String realmName = randomAlphaOfLengthBetween(3, 8); + final String realmType = randomFrom(randomAlphaOfLengthBetween(3, 8), null); final Map metadata = randomMetadata(); final List roleDescriptors = type == ApiKey.Type.CROSS_CLUSTER ? List.of(randomCrossClusterAccessRoleDescriptor()) @@ -190,6 +192,7 @@ public static ApiKey randomApiKeyInstance() { invalidation, username, realmName, + realmType, metadata, roleDescriptors, limitedByRoleDescriptors diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java index 0b287f2fb6329..d5de84045096a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java @@ -61,6 +61,7 @@ public void testToXContent() throws IOException { "realm-x", null, null, + null, List.of() // empty limited-by role descriptor to simulate derived keys ); ApiKey apiKeyInfo2 = createApiKeyInfo( @@ -73,6 +74,7 @@ public void testToXContent() throws IOException { Instant.ofEpochMilli(100000000L), "user-b", "realm-y", + "realm-type-y", Map.of(), List.of(), limitedByRoleDescriptors @@ -87,6 +89,7 @@ public void testToXContent() throws IOException { Instant.ofEpochMilli(100000000L), "user-c", "realm-z", + "realm-type-z", Map.of("foo", "bar"), roleDescriptors, limitedByRoleDescriptors @@ -111,6 +114,7 @@ public void testToXContent() throws IOException { Instant.ofEpochMilli(100000000L), "user-c", "realm-z", + "realm-type-z", Map.of("foo", "bar"), crossClusterAccessRoleDescriptors, null @@ -145,6 +149,7 @@ public void testToXContent() throws IOException { "invalidation": 100000000, "username": "user-b", "realm": "realm-y", + "realm_type": "realm-type-y", "metadata": {}, "role_descriptors": {}, "limited_by": [ @@ -185,6 +190,7 @@ public void testToXContent() throws IOException { "invalidation": 100000000, "username": "user-c", "realm": "realm-z", + "realm_type": "realm-type-z", "metadata": { "foo": "bar" }, @@ -252,6 +258,7 @@ public void testToXContent() throws IOException { "invalidation": 100000000, "username": "user-c", "realm": "realm-z", + "realm_type": "realm-type-z", "metadata": { "foo": "bar" }, @@ -321,6 +328,7 @@ private ApiKey createApiKeyInfo( Instant invalidation, String username, String realm, + String realmType, Map metadata, List roleDescriptors, List limitedByRoleDescriptors @@ -335,6 +343,7 @@ private ApiKey createApiKeyInfo( invalidation, username, realm, + realmType, metadata, roleDescriptors, limitedByRoleDescriptors diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java index 4e639e14eda6e..850dfe5dffa99 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java @@ -300,6 +300,8 @@ public void testGrantApiKeyForOtherUserWithPassword() throws IOException { ApiKey apiKey = getApiKey((String) responseBody.get("id")); assertThat(apiKey.getUsername(), equalTo(END_USER)); + assertThat(apiKey.getRealm(), equalTo("default_native")); + assertThat(apiKey.getRealmType(), equalTo("native")); } public void testGrantApiKeyForOtherUserWithAccessToken() throws IOException { @@ -329,6 +331,8 @@ public void testGrantApiKeyForOtherUserWithAccessToken() throws IOException { ApiKey apiKey = getApiKey((String) responseBody.get("id")); assertThat(apiKey.getUsername(), equalTo(END_USER)); + assertThat(apiKey.getRealm(), equalTo("default_native")); + assertThat(apiKey.getRealmType(), equalTo("native")); Instant minExpiry = before.plus(2, ChronoUnit.HOURS); Instant maxExpiry = after.plus(2, ChronoUnit.HOURS); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java index bffc6c165c818..707e7b2846a9b 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java @@ -312,7 +312,10 @@ public void testGrantApiKeyForUserWithRunAs() throws IOException { final String apiKeyId = createApiKeyResponse.getId(); final String base64ApiKeyKeyValue = Base64.getEncoder() .encodeToString((apiKeyId + ":" + createApiKeyResponse.getKey().toString()).getBytes(StandardCharsets.UTF_8)); - assertThat(securityClient.getApiKey(apiKeyId).getUsername(), equalTo("user2")); + ApiKey apiKey = securityClient.getApiKey(apiKeyId); + assertThat(apiKey.getUsername(), equalTo("user2")); + assertThat(apiKey.getRealm(), equalTo("index")); + assertThat(apiKey.getRealmType(), equalTo("native")); final Client clientWithGrantedKey = client().filterWithHeader(Map.of("Authorization", "ApiKey " + base64ApiKeyKeyValue)); // The API key has privileges (inherited from user2) to check cluster health clientWithGrantedKey.execute(TransportClusterHealthAction.TYPE, new ClusterHealthRequest()).actionGet(); @@ -618,6 +621,7 @@ public void testCreateCrossClusterApiKey() throws IOException { assertThat(getApiKeyInfo.getMetadata(), anEmptyMap()); assertThat(getApiKeyInfo.getUsername(), equalTo("test_user")); assertThat(getApiKeyInfo.getRealm(), equalTo("file")); + assertThat(getApiKeyInfo.getRealmType(), equalTo("file")); // Check the API key attributes with Query API final QueryApiKeyRequest queryApiKeyRequest = new QueryApiKeyRequest( @@ -638,6 +642,7 @@ public void testCreateCrossClusterApiKey() throws IOException { assertThat(queryApiKeyInfo.getMetadata(), anEmptyMap()); assertThat(queryApiKeyInfo.getUsername(), equalTo("test_user")); assertThat(queryApiKeyInfo.getRealm(), equalTo("file")); + assertThat(queryApiKeyInfo.getRealmType(), equalTo("file")); } public void testUpdateCrossClusterApiKey() throws IOException { @@ -672,6 +677,7 @@ public void testUpdateCrossClusterApiKey() throws IOException { assertThat(getApiKeyInfo.getMetadata(), anEmptyMap()); assertThat(getApiKeyInfo.getUsername(), equalTo("test_user")); assertThat(getApiKeyInfo.getRealm(), equalTo("file")); + assertThat(getApiKeyInfo.getRealmType(), equalTo("file")); final CrossClusterApiKeyRoleDescriptorBuilder roleDescriptorBuilder; final boolean shouldUpdateAccess = randomBoolean(); @@ -745,6 +751,7 @@ public void testUpdateCrossClusterApiKey() throws IOException { assertThat(queryApiKeyInfo.getMetadata(), equalTo(updateMetadata == null ? Map.of() : updateMetadata)); assertThat(queryApiKeyInfo.getUsername(), equalTo("test_user")); assertThat(queryApiKeyInfo.getRealm(), equalTo("file")); + assertThat(queryApiKeyInfo.getRealmType(), equalTo("file")); } // Cross-cluster API keys cannot be created by an API key even if it has manage_security privilege diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index 7cf045ad0f9f5..fea0c812e7e42 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -1937,7 +1937,6 @@ public void getApiKeys( public void queryApiKeys(SearchRequest searchRequest, boolean withLimitedBy, ActionListener listener) { ensureEnabled(); - final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { logger.debug("security index does not exist"); @@ -2004,6 +2003,7 @@ private ApiKey convertSearchHitToApiKeyInfo(SearchHit hit, boolean withLimitedBy apiKeyDoc.invalidation != -1 ? Instant.ofEpochMilli(apiKeyDoc.invalidation) : null, (String) apiKeyDoc.creator.get("principal"), (String) apiKeyDoc.creator.get("realm"), + (String) apiKeyDoc.creator.get("realm_type"), metadata, roleDescriptors, limitedByRoleDescriptors diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index ac11dee8d4a48..df454ddffe96f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -30,12 +30,14 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -54,6 +56,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -91,12 +94,14 @@ import org.elasticsearch.xpack.core.security.action.apikey.CrossClusterApiKeyRoleDescriptorBuilder; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; import org.elasticsearch.xpack.core.security.authc.AuthenticationTests; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmDomain; import org.elasticsearch.xpack.core.security.authc.support.AuthenticationContextSerializer; import org.elasticsearch.xpack.core.security.authc.support.Hasher; @@ -324,6 +329,109 @@ public void testGetApiKeys() throws Exception { assertThat(getApiKeyResponse.getApiKeyInfos(), emptyArray()); } + @SuppressWarnings("unchecked") + public void testApiKeysOwnerRealmIdentifier() throws Exception { + String realm1 = randomAlphaOfLength(4); + String realm1Type = randomAlphaOfLength(4); + String realm2 = randomAlphaOfLength(4); + when(clock.instant()).thenReturn(Instant.ofEpochMilli(randomMillisUpToYear9999())); + when(client.threadPool()).thenReturn(threadPool); + when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn(new SearchRequestBuilder(client)); + ApiKeyService service = createApiKeyService( + Settings.builder().put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true).build() + ); + CheckedSupplier searchResponseSupplier = () -> { + // 2 API keys, one with a "null" (missing) realm type + SearchHit[] searchHits = new SearchHit[2]; + searchHits[0] = SearchHit.unpooled(randomIntBetween(0, Integer.MAX_VALUE), "0"); + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + Map apiKeySourceDoc = buildApiKeySourceDoc("some_hash".toCharArray()); + ((Map) apiKeySourceDoc.get("creator")).put("realm", realm1); + ((Map) apiKeySourceDoc.get("creator")).put("realm_type", realm1Type); + builder.map(apiKeySourceDoc); + searchHits[0].sourceRef(BytesReference.bytes(builder)); + } + searchHits[1] = SearchHit.unpooled(randomIntBetween(0, Integer.MAX_VALUE), "1"); + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + Map apiKeySourceDoc = buildApiKeySourceDoc("some_hash".toCharArray()); + ((Map) apiKeySourceDoc.get("creator")).put("realm", realm2); + if (randomBoolean()) { + ((Map) apiKeySourceDoc.get("creator")).put("realm_type", null); + } else { + ((Map) apiKeySourceDoc.get("creator")).remove("realm_type"); + } + builder.map(apiKeySourceDoc); + searchHits[1].sourceRef(BytesReference.bytes(builder)); + } + return new SearchResponse( + SearchHits.unpooled( + searchHits, + new TotalHits(searchHits.length, TotalHits.Relation.EQUAL_TO), + randomFloat(), + null, + null, + null + ), + null, + null, + false, + null, + null, + 0, + randomAlphaOfLengthBetween(3, 8), + 1, + 1, + 0, + 10, + null, + null + ); + }; + doAnswer(invocation -> { + ActionListener.respondAndRelease((ActionListener) invocation.getArguments()[1], searchResponseSupplier.get()); + return null; + }).when(client).search(any(SearchRequest.class), anyActionListener()); + doAnswer(invocation -> { + ActionListener.respondAndRelease((ActionListener) invocation.getArguments()[2], searchResponseSupplier.get()); + return null; + }).when(client).execute(eq(TransportSearchAction.TYPE), any(SearchRequest.class), anyActionListener()); + { + PlainActionFuture getApiKeyResponsePlainActionFuture = new PlainActionFuture<>(); + service.getApiKeys( + generateRandomStringArray(4, 4, true, true), + randomFrom(randomAlphaOfLengthBetween(3, 8), null), + randomFrom(randomAlphaOfLengthBetween(3, 8), null), + generateRandomStringArray(4, 4, true, true), + randomBoolean(), + randomBoolean(), + getApiKeyResponsePlainActionFuture + ); + GetApiKeyResponse getApiKeyResponse = getApiKeyResponsePlainActionFuture.get(); + assertThat(getApiKeyResponse.getApiKeyInfos().length, is(2)); + assertThat(getApiKeyResponse.getApiKeyInfos()[0].getRealm(), is(realm1)); + assertThat(getApiKeyResponse.getApiKeyInfos()[0].getRealmType(), is(realm1Type)); + assertThat(getApiKeyResponse.getApiKeyInfos()[0].getRealmIdentifier(), is(new RealmConfig.RealmIdentifier(realm1Type, realm1))); + assertThat(getApiKeyResponse.getApiKeyInfos()[1].getRealm(), is(realm2)); + assertThat(getApiKeyResponse.getApiKeyInfos()[1].getRealmType(), nullValue()); + assertThat(getApiKeyResponse.getApiKeyInfos()[1].getRealmIdentifier(), nullValue()); + } + { + PlainActionFuture queryApiKeyResponsePlainActionFuture = new PlainActionFuture<>(); + service.queryApiKeys(new SearchRequest(".security"), false, queryApiKeyResponsePlainActionFuture); + QueryApiKeyResponse queryApiKeyResponse = queryApiKeyResponsePlainActionFuture.get(); + assertThat(queryApiKeyResponse.getItems().length, is(2)); + assertThat(queryApiKeyResponse.getItems()[0].getApiKey().getRealm(), is(realm1)); + assertThat(queryApiKeyResponse.getItems()[0].getApiKey().getRealmType(), is(realm1Type)); + assertThat( + queryApiKeyResponse.getItems()[0].getApiKey().getRealmIdentifier(), + is(new RealmConfig.RealmIdentifier(realm1Type, realm1)) + ); + assertThat(queryApiKeyResponse.getItems()[1].getApiKey().getRealm(), is(realm2)); + assertThat(queryApiKeyResponse.getItems()[1].getApiKey().getRealmType(), nullValue()); + assertThat(queryApiKeyResponse.getItems()[1].getApiKey().getRealmIdentifier(), nullValue()); + } + } + @SuppressWarnings("unchecked") public void testInvalidateApiKeys() throws Exception { final Settings settings = Settings.builder().put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true).build(); @@ -890,6 +998,24 @@ private Map mockKeyDocument( Duration expiry, @Nullable List keyRoles, ApiKey.Type type + ) throws IOException { + var apiKeyDoc = newApiKeyDocument(key, user, authUser, invalidated, expiry, keyRoles, type); + SecurityMocks.mockGetRequest( + client, + id, + BytesReference.bytes(XContentBuilder.builder(XContentType.JSON.xContent()).map(apiKeyDoc.v1())) + ); + return apiKeyDoc.v2(); + } + + private static Tuple, Map> newApiKeyDocument( + String key, + User user, + @Nullable User authUser, + boolean invalidated, + Duration expiry, + @Nullable List keyRoles, + ApiKey.Type type ) throws IOException { final Authentication authentication; if (authUser != null) { @@ -906,7 +1032,7 @@ private Map mockKeyDocument( .realmRef(new RealmRef("realm1", "native", "node01")) .build(false); } - final Map metadata = ApiKeyTests.randomMetadata(); + Map metadataMap = ApiKeyTests.randomMetadata(); XContentBuilder docSource = ApiKeyService.newDocument( getFastStoredHashAlgoForTests().hash(new SecureString(key.toCharArray())), "test", @@ -917,15 +1043,13 @@ private Map mockKeyDocument( keyRoles, type, Version.CURRENT, - metadata + metadataMap ); + Map keyMap = XContentHelper.convertToMap(BytesReference.bytes(docSource), true, XContentType.JSON).v2(); if (invalidated) { - Map map = XContentHelper.convertToMap(BytesReference.bytes(docSource), true, XContentType.JSON).v2(); - map.put("api_key_invalidated", true); - docSource = XContentBuilder.builder(XContentType.JSON.xContent()).map(map); + keyMap.put("api_key_invalidated", true); } - SecurityMocks.mockGetRequest(client, id, BytesReference.bytes(docSource)); - return metadata; + return new Tuple<>(keyMap, metadataMap); } private AuthenticationResult tryAuthenticate(ApiKeyService service, String id, String key, ApiKey.Type type) throws Exception { @@ -2860,6 +2984,10 @@ private Map buildApiKeySourceDoc(char[] hash) { creatorMap.put("full_name", "test user"); creatorMap.put("email", "test@user.com"); creatorMap.put("metadata", Collections.emptyMap()); + creatorMap.put("realm", randomAlphaOfLength(4)); + if (randomBoolean()) { + creatorMap.put("realm_type", randomAlphaOfLength(4)); + } sourceMap.put("creator", creatorMap); sourceMap.put("api_key_invalidated", false); // noinspection unchecked diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index 2ee42b360f02a..76a01f100b8ad 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -117,6 +117,7 @@ public void sendResponse(RestResponse restResponse) { null, "user-x", "realm-1", + "realm-type-1", metadata, roleDescriptors, limitedByRoleDescriptors @@ -176,6 +177,7 @@ public void doE null, "user-x", "realm-1", + "realm-type-1", metadata, roleDescriptors, limitedByRoleDescriptors @@ -226,6 +228,7 @@ public void sendResponse(RestResponse restResponse) { null, "user-x", "realm-1", + "realm-type-1", ApiKeyTests.randomMetadata(), type == ApiKey.Type.CROSS_CLUSTER ? List.of(randomCrossClusterAccessRoleDescriptor()) @@ -242,6 +245,7 @@ public void sendResponse(RestResponse restResponse) { null, "user-y", "realm-1", + "realm-type-1", ApiKeyTests.randomMetadata(), type == ApiKey.Type.CROSS_CLUSTER ? List.of(randomCrossClusterAccessRoleDescriptor()) From b3fc714b8729c8e08c300e96e5a4eb3c703a44f6 Mon Sep 17 00:00:00 2001 From: Stuart Tettemer Date: Tue, 20 Feb 2024 13:50:48 -0600 Subject: [PATCH 16/49] Painless: Apply true regex limit factor with FIND and MATCH operator (#105670) `script.painless.regex.enabled`: `true` changes the effective `script.painless.regex.regex.limit-factor`. This worked if the limit factor was looked up via the static `$COMPILERSETTINGS` such as during a call but it did not work if used via a binary operation such as `FIND`, `=~`, or `MATCH`, `==~`. Only expose the applied limit factor and use that everywhere. Fixes: #105669 --- docs/changelog/105670.yaml | 5 +++++ .../painless/CompilerSettings.java | 20 +++++++++---------- .../painless/PainlessScriptEngine.java | 2 +- .../phase/DefaultUserTreeToIRTreePhase.java | 2 +- .../painless/RegexLimitTests.java | 10 ++++++++++ 5 files changed, 26 insertions(+), 13 deletions(-) create mode 100644 docs/changelog/105670.yaml diff --git a/docs/changelog/105670.yaml b/docs/changelog/105670.yaml new file mode 100644 index 0000000000000..234f4b6af5a73 --- /dev/null +++ b/docs/changelog/105670.yaml @@ -0,0 +1,5 @@ +pr: 105670 +summary: "Painless: Apply true regex limit factor with FIND and MATCH operation" +area: Infra/Scripting +type: bug +issues: [] diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java index 5dfe2f19604c0..4080507a4e893 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java @@ -160,10 +160,14 @@ public void setRegexLimitFactor(int regexLimitFactor) { } /** - * What is the limit factor for regexes? - */ - public int getRegexLimitFactor() { - return regexLimitFactor; + * What is the effective limit factor for regexes? + */ + public int getAppliedRegexLimitFactor() { + return switch (regexesEnabled) { + case TRUE -> Augmentation.UNLIMITED_PATTERN_FACTOR; + case FALSE -> Augmentation.DISABLED_PATTERN_FACTOR; + case LIMITED -> regexLimitFactor; + }; } /** @@ -171,14 +175,8 @@ public int getRegexLimitFactor() { * annotation. */ public Map asMap() { - int regexLimitFactorToApply = this.regexLimitFactor; - if (regexesEnabled == RegexEnabled.TRUE) { - regexLimitFactorToApply = Augmentation.UNLIMITED_PATTERN_FACTOR; - } else if (regexesEnabled == RegexEnabled.FALSE) { - regexLimitFactorToApply = Augmentation.DISABLED_PATTERN_FACTOR; - } Map map = new HashMap<>(); - map.put("regex_limit_factor", regexLimitFactorToApply); + map.put("regex_limit_factor", getAppliedRegexLimitFactor()); // for testing only map.put("testInject0", testInject0); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index 7b84e3c9f1417..005148a6fcd5d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -425,7 +425,7 @@ private CompilerSettings buildCompilerSettings(Map params) { // Except regexes enabled - this is a node level setting and can't be changed in the request. compilerSettings.setRegexesEnabled(defaultCompilerSettings.areRegexesEnabled()); - compilerSettings.setRegexLimitFactor(defaultCompilerSettings.getRegexLimitFactor()); + compilerSettings.setRegexLimitFactor(defaultCompilerSettings.getAppliedRegexLimitFactor()); Map copy = new HashMap<>(params); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java index 76babcdb9d26e..5e9ba3601e11c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java @@ -1076,7 +1076,7 @@ public void visitBinary(EBinary userBinaryNode, ScriptScope scriptScope) { irBinaryMathNode.attachDecoration(new IRDOperation(operation)); if (operation == Operation.MATCH || operation == Operation.FIND) { - irBinaryMathNode.attachDecoration(new IRDRegexLimit(scriptScope.getCompilerSettings().getRegexLimitFactor())); + irBinaryMathNode.attachDecoration(new IRDRegexLimit(scriptScope.getCompilerSettings().getAppliedRegexLimitFactor())); } irBinaryMathNode.attachDecoration(new IRDBinaryType(binaryType)); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexLimitTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexLimitTests.java index e2c1d186e4cb9..265fba1327191 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexLimitTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexLimitTests.java @@ -73,6 +73,16 @@ public void testMethodRegexInject_Ref_Matcher() { assertTrue(cbe.getMessage().contains(regexCircuitMessage)); } + public void testInjectBinary() { + String script = "Pattern p = /.*a.*b.*c.*/; return 'abcxyz123abc' =~ p;"; + Settings settings = Settings.builder() + .put(CompilerSettings.REGEX_LIMIT_FACTOR.getKey(), 1) + .put(CompilerSettings.REGEX_ENABLED.getKey(), "true") + .build(); + scriptEngine = new PainlessScriptEngine(settings, scriptContexts()); + assertEquals(Boolean.TRUE, exec(script)); + } + public void testRegexInject_DefMethodRef_Matcher() { String script = "boolean isMatch(Function func) { func.apply(" + charSequence From 35df385ef961390745174f45b1213ded94dad33d Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Tue, 20 Feb 2024 15:06:56 -0500 Subject: [PATCH 17/49] [Transform] Fix testStopAtCheckpoint (#105664) Currently, there is a small chance that testStopAtCheckpoint will fail to correctly count the amount of times `doSaveState` is invoked: ``` Expected: <5> but: was <4> ``` There are two potential issues: 1. The test thread starts the Transform thread, which starts a Search thread. If the Search thread starts reading from the `saveStateListeners` while the test thread writes to the `saveStateListeners`, then there is a chance our testing logic will not be able to count the number of times we read from `saveStateListeners`. 2. The non-volatile integer may be read as one value and written as another value. Two fixes: 1. The test thread blocks the Transform thread until after the test thread writes all the listeners. The subsequent test will continue to verify that we can safely interlace reading and writing. 2. The counter is now an AtomicInteger to provide thread safety. Fixes #90549 --- .../TransformIndexerStateTests.java | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java index 3e5f4bc929083..fceba25afc7fd 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java @@ -64,6 +64,7 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.stream.Stream; @@ -106,7 +107,7 @@ class MockedTransformIndexer extends TransformIndexer { private final ThreadPool threadPool; private TransformState persistedState; - private int saveStateListenerCallCount = 0; + private AtomicInteger saveStateListenerCallCount = new AtomicInteger(0); // used for synchronizing with the test private CountDownLatch searchLatch; private CountDownLatch doProcessLatch; @@ -206,10 +207,10 @@ protected void doNextBulk(BulkRequest request, ActionListener next @Override protected void doSaveState(IndexerState state, TransformIndexerPosition position, Runnable next) { - Collection> saveStateListenersAtTheMomentOfCalling = saveStateListeners.get(); - saveStateListenerCallCount += (saveStateListenersAtTheMomentOfCalling != null) - ? saveStateListenersAtTheMomentOfCalling.size() - : 0; + var saveStateListenersAtTheMomentOfCalling = saveStateListeners.get(); + if (saveStateListenersAtTheMomentOfCalling != null) { + saveStateListenerCallCount.updateAndGet(count -> count + saveStateListenersAtTheMomentOfCalling.size()); + } super.doSaveState(state, position, next); } @@ -225,7 +226,7 @@ public boolean waitingForNextSearch() { } public int getSaveStateListenerCallCount() { - return saveStateListenerCallCount; + return saveStateListenerCallCount.get(); } public int getSaveStateListenerCount() { @@ -592,13 +593,13 @@ public void testStopAtCheckpoint() throws Exception { new TransformIndexerStats(), context ); + + // stop the indexer before it dispatches a search thread so we can load the listeners first + CountDownLatch searchLatch = indexer.createAwaitForSearchLatch(1); indexer.start(); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertEquals(indexer.getState(), IndexerState.INDEXING); - // slow down the indexer - CountDownLatch searchLatch = indexer.createAwaitForSearchLatch(1); - // this time call 5 times and change stopAtCheckpoint every time List responseLatches = new ArrayList<>(); for (int i = 0; i < 5; ++i) { From f65f8ecd29aaf0c5e5962f31bafe394b0ca005e5 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Wed, 21 Feb 2024 08:30:44 +0100 Subject: [PATCH 18/49] Remove unused field (#105640) This change removes a field in TransportPutShutdownNodeAction that is no longer used after the refactoring --- .../xpack/shutdown/TransportPutShutdownNodeAction.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java index fcd70d5c215f1..750bb9227cff6 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java @@ -45,8 +45,6 @@ public class TransportPutShutdownNodeAction extends AcknowledgedTransportMasterN private final AllocationService allocationService; private final MasterServiceTaskQueue taskQueue; - private final PutShutdownNodeExecutor executor = new PutShutdownNodeExecutor(); - private static boolean putShutdownNodeState( Map shutdownMetadata, Predicate nodeExists, From 4c21c96b7031302d4c5b2878aa0951a3c133c18c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Wed, 21 Feb 2024 08:37:28 +0100 Subject: [PATCH 19/49] Set disk watermarks to low values to prevent tests from failing on nodes without enough disk space (#105663) --- .../src/test/java/org/elasticsearch/node/NodeTests.java | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index 986ed9184f3e7..b36cafd694378 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -8,11 +8,11 @@ package org.elasticsearch.node; import org.apache.lucene.tests.util.LuceneTestCase; -import org.apache.lucene.util.Constants; import org.apache.lucene.util.SetOnce; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesReference; @@ -189,6 +189,10 @@ private static Settings.Builder baseSettings() { .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", randomLong())) .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) + // default the watermarks low values to prevent tests from failing on nodes without enough disk space + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "1b") .put(dataNode()); } @@ -304,7 +308,6 @@ public void testCloseOnInterruptibleTask() throws Exception { } public void testCloseOnLeakedIndexReaderReference() throws Exception { - assumeFalse("AwaitsFix https://github.com/elastic/elasticsearch/issues/105236", Constants.MAC_OS_X); Node node = new MockNode(baseSettings().build(), basePlugins()); node.start(); IndicesService indicesService = node.injector().getInstance(IndicesService.class); @@ -320,7 +323,6 @@ public void testCloseOnLeakedIndexReaderReference() throws Exception { } public void testCloseOnLeakedStoreReference() throws Exception { - assumeFalse("AwaitsFix https://github.com/elastic/elasticsearch/issues/105236", Constants.MAC_OS_X); Node node = new MockNode(baseSettings().build(), basePlugins()); node.start(); IndicesService indicesService = node.injector().getInstance(IndicesService.class); From 3cde13cae0c256bbc913ad107c5a209f38062d60 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 21 Feb 2024 07:57:35 +0000 Subject: [PATCH 20/49] Distinguish different snapshot failures by log level (#105622) Today all snapshot failures are reported in the logs at `WARN`, including a stack trace, but most of them are in fact benign or expected and do not need any further action. To make it easier to track actionable problems, this commit downgrades the non-actionable ones to `INFO` level and suppresses their stack traces. --- docs/changelog/105622.yaml | 5 + .../snapshots/SnapshotsService.java | 46 ++++- .../snapshots/SnapshotResiliencyTests.java | 178 +++++++++++++++++- 3 files changed, 219 insertions(+), 10 deletions(-) create mode 100644 docs/changelog/105622.yaml diff --git a/docs/changelog/105622.yaml b/docs/changelog/105622.yaml new file mode 100644 index 0000000000000..33093f5ffceb5 --- /dev/null +++ b/docs/changelog/105622.yaml @@ -0,0 +1,5 @@ +pr: 105622 +summary: Distinguish different snapshot failures by log level +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 3f8b19d72070b..3b2868298cf65 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -76,6 +76,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.shard.ShardId; @@ -3835,14 +3836,51 @@ private record CreateSnapshotTask( @Override public void onFailure(Exception e) { - logger.warn( - () -> format("[%s][%s] failed to create snapshot", snapshot.getRepository(), snapshot.getSnapshotId().getName()), - e - ); + final var logLevel = snapshotFailureLogLevel(e); + if (logLevel == Level.INFO && logger.isDebugEnabled() == false) { + // suppress stack trace at INFO unless extra verbosity is configured + logger.info( + format( + "[%s][%s] failed to create snapshot: %s", + snapshot.getRepository(), + snapshot.getSnapshotId().getName(), + e.getMessage() + ) + ); + } else { + logger.log( + logLevel, + () -> format("[%s][%s] failed to create snapshot", snapshot.getRepository(), snapshot.getSnapshotId().getName()), + e + ); + } listener.onFailure(e); } } + private static Level snapshotFailureLogLevel(Exception e) { + if (MasterService.isPublishFailureException(e)) { + // no action needed, the new master will take things from here + return Level.INFO; + } else if (e instanceof InvalidSnapshotNameException) { + // no action needed, typically ILM-related, or a user error + return Level.INFO; + } else if (e instanceof IndexNotFoundException) { + // not worrying, most likely a user error + return Level.INFO; + } else if (e instanceof SnapshotException) { + if (e.getMessage().contains(ReferenceDocs.UNASSIGNED_SHARDS.toString())) { + // non-partial snapshot requested but cluster health is not yellow or green; the health is tracked elsewhere so no need to + // make more noise here + return Level.INFO; + } + } else if (e instanceof IllegalArgumentException) { + // some other user error + return Level.INFO; + } + return Level.WARN; + } + private class SnapshotTaskExecutor implements ClusterStateTaskExecutor { @Override public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 54c97ea8dc1a3..edde9f0164a6e 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.snapshots; +import org.apache.logging.log4j.Level; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -140,6 +141,7 @@ import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.analysis.AnalysisRegistry; @@ -185,6 +187,7 @@ import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BytesRefRecycler; import org.elasticsearch.transport.DisruptableMockTransport; @@ -232,6 +235,7 @@ import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -1391,7 +1395,6 @@ public void testFullSnapshotUnassignedShards() { final var indices = IntStream.range(0, between(1, 4)).mapToObj(i -> "index-" + i).sorted().toList(); final var repoName = "repo"; - final var originalSnapshotName = "original-snapshot"; var testListener = SubscribableListener @@ -1423,11 +1426,11 @@ public void testFullSnapshotUnassignedShards() { } }) - // Take a full snapshot for use as the source for future clones + // Take the snapshot to check the reaction to having unassigned shards .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, originalSnapshotName) + .prepareCreateSnapshot(repoName, randomIdentifier()) .setWaitForCompletion(randomBoolean()) .execute(new ActionListener<>() { @Override @@ -1451,9 +1454,172 @@ public void onFailure(Exception e) { }) ); - deterministicTaskQueue.runAllRunnableTasks(); - assertTrue("executed all runnable tasks but test steps are still incomplete", testListener.isDone()); - safeAwait(testListener); // shouldn't throw + MockLogAppender.assertThatLogger(() -> { + deterministicTaskQueue.runAllRunnableTasks(); + assertTrue("executed all runnable tasks but test steps are still incomplete", testListener.isDone()); + safeAwait(testListener); // shouldn't throw + }, + SnapshotsService.class, + new MockLogAppender.SeenEventExpectation( + "INFO log", + SnapshotsService.class.getCanonicalName(), + Level.INFO, + "*failed to create snapshot*the following indices have unassigned primary shards*" + ) + ); + } + + public void testSnapshotNameAlreadyInUseExceptionLogging() { + setupTestCluster(1, 1); + + final var repoName = "repo"; + final var snapshotName = "test-snapshot"; + + final var testListener = createRepoAndIndex(repoName, "index", between(1, 2)) + // take snapshot once + .andThen( + (l, ignored) -> client().admin() + .cluster() + .prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(true) + .execute(l) + ) + // take snapshot again + .andThen( + (l, ignored) -> client().admin() + .cluster() + .prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(randomBoolean()) + .execute(new ActionListener<>() { + @Override + public void onResponse(CreateSnapshotResponse createSnapshotResponse) { + fail("snapshot should not have started"); + } + + @Override + public void onFailure(Exception e) { + assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(SnapshotNameAlreadyInUseException.class)); + l.onResponse(null); + } + }) + ); + + MockLogAppender.assertThatLogger(() -> { + deterministicTaskQueue.runAllRunnableTasks(); + assertTrue("executed all runnable tasks but test steps are still incomplete", testListener.isDone()); + safeAwait(testListener); // shouldn't throw + }, + SnapshotsService.class, + new MockLogAppender.SeenEventExpectation( + "INFO log", + SnapshotsService.class.getCanonicalName(), + Level.INFO, + Strings.format("*failed to create snapshot*Invalid snapshot name [%s]*", snapshotName) + ) + ); + } + + public void testIndexNotFoundExceptionLogging() { + setupTestCluster(1, 0); // no need for data nodes here + + final var repoName = "repo"; + final var indexName = "does-not-exist"; + + final var testListener = SubscribableListener + // create repo + .newForked( + l -> client().admin() + .cluster() + .preparePutRepository(repoName) + .setType(FsRepository.TYPE) + .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) + .execute(l) + ) + // take snapshot of index that does not exist + .andThen( + (l, ignored) -> client().admin() + .cluster() + .prepareCreateSnapshot(repoName, randomIdentifier()) + .setIndices(indexName) + .setWaitForCompletion(randomBoolean()) + .execute(new ActionListener<>() { + @Override + public void onResponse(CreateSnapshotResponse createSnapshotResponse) { + fail("snapshot should not have started"); + } + + @Override + public void onFailure(Exception e) { + assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(IndexNotFoundException.class)); + l.onResponse(null); + } + }) + ); + + MockLogAppender.assertThatLogger(() -> { + deterministicTaskQueue.runAllRunnableTasks(); + assertTrue("executed all runnable tasks but test steps are still incomplete", testListener.isDone()); + safeAwait(testListener); // shouldn't throw + }, + SnapshotsService.class, + new MockLogAppender.SeenEventExpectation( + "INFO log", + SnapshotsService.class.getCanonicalName(), + Level.INFO, + Strings.format("failed to create snapshot: no such index [%s]", indexName) + ) + ); + } + + public void testIllegalArgumentExceptionLogging() { + setupTestCluster(1, 0); // no need for data nodes here + + final var repoName = "repo"; + + final var testListener = SubscribableListener + // create repo + .newForked( + l -> client().admin() + .cluster() + .preparePutRepository(repoName) + .setType(FsRepository.TYPE) + .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) + .execute(l) + ) + // attempt to take snapshot with illegal config ('none' is allowed as a feature state iff it's the only one in the list) + .andThen( + (l, ignored) -> client().admin() + .cluster() + .prepareCreateSnapshot(repoName, randomIdentifier()) + .setFeatureStates("none", "none") + .setWaitForCompletion(randomBoolean()) + .execute(new ActionListener<>() { + @Override + public void onResponse(CreateSnapshotResponse createSnapshotResponse) { + fail("snapshot should not have started"); + } + + @Override + public void onFailure(Exception e) { + assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(IllegalArgumentException.class)); + l.onResponse(null); + } + }) + ); + + MockLogAppender.assertThatLogger(() -> { + deterministicTaskQueue.runAllRunnableTasks(); + assertTrue("executed all runnable tasks but test steps are still incomplete", testListener.isDone()); + safeAwait(testListener); // shouldn't throw + }, + SnapshotsService.class, + new MockLogAppender.SeenEventExpectation( + "INFO log", + SnapshotsService.class.getCanonicalName(), + Level.INFO, + Strings.format("*failed to create snapshot*other feature states were requested: [none, none]", "") + ) + ); } private RepositoryData getRepositoryData(Repository repository) { From 7cbdb6cc197541da584256034ede895d0311a44e Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 21 Feb 2024 07:57:50 +0000 Subject: [PATCH 21/49] Drop dead code from get-snapshots request & response (#105608) Removes all the now-dead code related to reading pre-7.16 get-snapshots requests and responses, and also moves the `XContent` response parsing out of production and into the only test suite that uses it. --- .../http/snapshots/RestGetSnapshotsIT.java | 37 ++++++- .../snapshots/get/GetSnapshotsRequest.java | 101 ++++-------------- .../snapshots/get/GetSnapshotsResponse.java | 75 ++----------- .../elasticsearch/snapshots/SnapshotInfo.java | 13 +-- .../get/GetSnapshotsResponseTests.java | 44 -------- 5 files changed, 64 insertions(+), 206 deletions(-) diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java index 5993978f9bd60..e9f4106433771 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.http.snapshots; import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; @@ -23,6 +24,8 @@ import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; @@ -31,6 +34,7 @@ import java.io.InputStream; import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -444,7 +448,7 @@ private static GetSnapshotsResponse readSnapshotInfos(Response response) throws InputStream input = response.getEntity().getContent(); XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, input) ) { - return GetSnapshotsResponse.fromXContent(parser); + return GET_SNAPSHOT_PARSER.parse(parser, null); } } @@ -501,4 +505,35 @@ private static GetSnapshotsResponse sortedWithLimit( final Response response = getRestClient().performRequest(request); return readSnapshotInfos(response); } + + private static final int UNKNOWN_COUNT = -1; + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser GET_SNAPSHOT_PARSER = new ConstructingObjectParser<>( + GetSnapshotsResponse.class.getName(), + true, + (args) -> new GetSnapshotsResponse( + (List) args[0], + (Map) args[1], + (String) args[2], + args[3] == null ? UNKNOWN_COUNT : (int) args[3], + args[4] == null ? UNKNOWN_COUNT : (int) args[4] + ) + ); + + static { + GET_SNAPSHOT_PARSER.declareObjectArray( + ConstructingObjectParser.constructorArg(), + (p, c) -> SnapshotInfo.SNAPSHOT_INFO_PARSER.apply(p, c).build(), + new ParseField("snapshots") + ); + GET_SNAPSHOT_PARSER.declareObject( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> p.map(HashMap::new, ElasticsearchException::fromXContent), + new ParseField("failures") + ); + GET_SNAPSHOT_PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), new ParseField("next")); + GET_SNAPSHOT_PARSER.declareIntOrNull(ConstructingObjectParser.optionalConstructorArg(), UNKNOWN_COUNT, new ParseField("total")); + GET_SNAPSHOT_PARSER.declareIntOrNull(ConstructingObjectParser.optionalConstructorArg(), UNKNOWN_COUNT, new ParseField("remaining")); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index c3e2dd6e3b536..fda371f9364f9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -41,18 +41,6 @@ public class GetSnapshotsRequest extends MasterNodeRequest public static final String NO_POLICY_PATTERN = "_none"; public static final boolean DEFAULT_VERBOSE_MODE = true; - public static final TransportVersion SLM_POLICY_FILTERING_VERSION = TransportVersions.V_7_16_0; - - public static final TransportVersion FROM_SORT_VALUE_VERSION = TransportVersions.V_7_16_0; - - public static final TransportVersion MULTIPLE_REPOSITORIES_SUPPORT_ADDED = TransportVersions.V_7_14_0; - - public static final TransportVersion PAGINATED_GET_SNAPSHOTS_VERSION = TransportVersions.V_7_14_0; - - public static final TransportVersion NUMERIC_PAGINATION_VERSION = TransportVersions.V_7_15_0; - - private static final TransportVersion SORT_BY_SHARDS_OR_REPO_VERSION = TransportVersions.V_7_16_0; - private static final TransportVersion INDICES_FLAG_VERSION = TransportVersions.V_8_3_0; public static final int NO_LIMIT = -1; @@ -113,89 +101,36 @@ public GetSnapshotsRequest(String... repositories) { public GetSnapshotsRequest(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { - repositories = in.readStringArray(); - } else { - repositories = new String[] { in.readString() }; - } + repositories = in.readStringArray(); snapshots = in.readStringArray(); ignoreUnavailable = in.readBoolean(); verbose = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(PAGINATED_GET_SNAPSHOTS_VERSION)) { - after = in.readOptionalWriteable(After::new); - sort = in.readEnum(SortBy.class); - size = in.readVInt(); - order = SortOrder.readFromStream(in); - if (in.getTransportVersion().onOrAfter(NUMERIC_PAGINATION_VERSION)) { - offset = in.readVInt(); - } - if (in.getTransportVersion().onOrAfter(SLM_POLICY_FILTERING_VERSION)) { - policies = in.readStringArray(); - } - if (in.getTransportVersion().onOrAfter(FROM_SORT_VALUE_VERSION)) { - fromSortValue = in.readOptionalString(); - } - if (in.getTransportVersion().onOrAfter(INDICES_FLAG_VERSION)) { - includeIndexNames = in.readBoolean(); - } + after = in.readOptionalWriteable(After::new); + sort = in.readEnum(SortBy.class); + size = in.readVInt(); + order = SortOrder.readFromStream(in); + offset = in.readVInt(); + policies = in.readStringArray(); + fromSortValue = in.readOptionalString(); + if (in.getTransportVersion().onOrAfter(INDICES_FLAG_VERSION)) { + includeIndexNames = in.readBoolean(); } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().onOrAfter(MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { - out.writeStringArray(repositories); - } else { - if (repositories.length != 1) { - throw new IllegalArgumentException( - "Requesting snapshots from multiple repositories is not supported in versions prior " - + "to " - + MULTIPLE_REPOSITORIES_SUPPORT_ADDED.toString() - ); - } - out.writeString(repositories[0]); - } + out.writeStringArray(repositories); out.writeStringArray(snapshots); out.writeBoolean(ignoreUnavailable); out.writeBoolean(verbose); - if (out.getTransportVersion().onOrAfter(PAGINATED_GET_SNAPSHOTS_VERSION)) { - out.writeOptionalWriteable(after); - if ((sort == SortBy.SHARDS || sort == SortBy.FAILED_SHARDS || sort == SortBy.REPOSITORY) - && out.getTransportVersion().before(SORT_BY_SHARDS_OR_REPO_VERSION)) { - throw new IllegalArgumentException( - "can't use sort by shard count or repository name in transport version [" + out.getTransportVersion() + "]" - ); - } - out.writeEnum(sort); - out.writeVInt(size); - order.writeTo(out); - if (out.getTransportVersion().onOrAfter(NUMERIC_PAGINATION_VERSION)) { - out.writeVInt(offset); - } else if (offset != 0) { - throw new IllegalArgumentException( - "can't use numeric offset in get snapshots request in transport version [" + out.getTransportVersion() + "]" - ); - } - } else if (sort != SortBy.START_TIME || size != NO_LIMIT || after != null || order != SortOrder.ASC) { - throw new IllegalArgumentException( - "can't use paginated get snapshots request in transport version [" + out.getTransportVersion() + "]" - ); - } - if (out.getTransportVersion().onOrAfter(SLM_POLICY_FILTERING_VERSION)) { - out.writeStringArray(policies); - } else if (policies.length > 0) { - throw new IllegalArgumentException( - "can't use slm policy filter in snapshots request in transport version [" + out.getTransportVersion() + "]" - ); - } - if (out.getTransportVersion().onOrAfter(FROM_SORT_VALUE_VERSION)) { - out.writeOptionalString(fromSortValue); - } else if (fromSortValue != null) { - throw new IllegalArgumentException( - "can't use after-value in snapshot request in transport version [" + out.getTransportVersion() + "]" - ); - } + out.writeOptionalWriteable(after); + out.writeEnum(sort); + out.writeVInt(size); + order.writeTo(out); + out.writeVInt(offset); + out.writeStringArray(policies); + out.writeOptionalString(fromSortValue); if (out.getTransportVersion().onOrAfter(INDICES_FLAG_VERSION)) { out.writeBoolean(includeIndexNames); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java index 3257ed1b986c3..85c2ff2806ace 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java @@ -17,14 +17,10 @@ import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.Nullable; import org.elasticsearch.snapshots.SnapshotInfo; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collections; -import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -35,37 +31,6 @@ */ public class GetSnapshotsResponse extends ActionResponse implements ChunkedToXContentObject { - private static final int UNKNOWN_COUNT = -1; - - @SuppressWarnings("unchecked") - private static final ConstructingObjectParser GET_SNAPSHOT_PARSER = new ConstructingObjectParser<>( - GetSnapshotsResponse.class.getName(), - true, - (args) -> new GetSnapshotsResponse( - (List) args[0], - (Map) args[1], - (String) args[2], - args[3] == null ? UNKNOWN_COUNT : (int) args[3], - args[4] == null ? UNKNOWN_COUNT : (int) args[4] - ) - ); - - static { - GET_SNAPSHOT_PARSER.declareObjectArray( - ConstructingObjectParser.constructorArg(), - (p, c) -> SnapshotInfo.SNAPSHOT_INFO_PARSER.apply(p, c).build(), - new ParseField("snapshots") - ); - GET_SNAPSHOT_PARSER.declareObject( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> p.map(HashMap::new, ElasticsearchException::fromXContent), - new ParseField("failures") - ); - GET_SNAPSHOT_PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), new ParseField("next")); - GET_SNAPSHOT_PARSER.declareIntOrNull(ConstructingObjectParser.optionalConstructorArg(), UNKNOWN_COUNT, new ParseField("total")); - GET_SNAPSHOT_PARSER.declareIntOrNull(ConstructingObjectParser.optionalConstructorArg(), UNKNOWN_COUNT, new ParseField("remaining")); - } - private final List snapshots; private final Map failures; @@ -93,21 +58,10 @@ public GetSnapshotsResponse( public GetSnapshotsResponse(StreamInput in) throws IOException { this.snapshots = in.readCollectionAsImmutableList(SnapshotInfo::readFrom); - if (in.getTransportVersion().onOrAfter(GetSnapshotsRequest.MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { - final Map failedResponses = in.readMap(StreamInput::readException); - this.failures = Collections.unmodifiableMap(failedResponses); - this.next = in.readOptionalString(); - } else { - this.failures = Collections.emptyMap(); - this.next = null; - } - if (in.getTransportVersion().onOrAfter(GetSnapshotsRequest.NUMERIC_PAGINATION_VERSION)) { - this.total = in.readVInt(); - this.remaining = in.readVInt(); - } else { - this.total = UNKNOWN_COUNT; - this.remaining = UNKNOWN_COUNT; - } + this.failures = Collections.unmodifiableMap(in.readMap(StreamInput::readException)); + this.next = in.readOptionalString(); + this.total = in.readVInt(); + this.remaining = in.readVInt(); } /** @@ -149,19 +103,10 @@ public int remaining() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeCollection(snapshots); - if (out.getTransportVersion().onOrAfter(GetSnapshotsRequest.MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { - out.writeMap(failures, StreamOutput::writeException); - out.writeOptionalString(next); - } else { - if (failures.isEmpty() == false) { - assert false : "transport action should have thrown directly for old version but saw " + failures; - throw failures.values().iterator().next(); - } - } - if (out.getTransportVersion().onOrAfter(GetSnapshotsRequest.NUMERIC_PAGINATION_VERSION)) { - out.writeVInt(total); - out.writeVInt(remaining); - } + out.writeMap(failures, StreamOutput::writeException); + out.writeOptionalString(next); + out.writeVInt(total); + out.writeVInt(remaining); } @Override @@ -198,10 +143,6 @@ public Iterator toXContentChunked(ToXContent.Params params) { })); } - public static GetSnapshotsResponse fromXContent(XContentParser parser) throws IOException { - return GET_SNAPSHOT_PARSER.parse(parser, null); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index c09719ec48039..243df88cfab00 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -496,12 +496,7 @@ public SnapshotInfo maybeWithoutIndices(boolean retainIndices) { * Constructs snapshot information from stream input */ public static SnapshotInfo readFrom(final StreamInput in) throws IOException { - final Snapshot snapshot; - if (in.getTransportVersion().onOrAfter(GetSnapshotsRequest.PAGINATED_GET_SNAPSHOTS_VERSION)) { - snapshot = new Snapshot(in); - } else { - snapshot = new Snapshot(UNKNOWN_REPO_NAME, new SnapshotId(in)); - } + final Snapshot snapshot = new Snapshot(in); final List indices = in.readStringCollectionAsImmutableList(); final SnapshotState state = in.readBoolean() ? SnapshotState.fromValue(in.readByte()) : null; final String reason = in.readOptionalString(); @@ -1015,11 +1010,7 @@ public static SnapshotInfo fromXContentInternal(final String repoName, final XCo @Override public void writeTo(final StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(GetSnapshotsRequest.PAGINATED_GET_SNAPSHOTS_VERSION)) { - snapshot.writeTo(out); - } else { - snapshot.getSnapshotId().writeTo(out); - } + snapshot.writeTo(out); out.writeStringCollection(indices); if (state != null) { out.writeBoolean(true); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java index f64f6a5d8275b..32a72bd0f7a76 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamOutput; @@ -24,9 +23,6 @@ import org.elasticsearch.snapshots.SnapshotShardFailure; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -39,11 +35,7 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.Predicate; -import java.util.regex.Pattern; -import static org.elasticsearch.snapshots.SnapshotInfo.INDEX_DETAILS_XCONTENT_PARAM; -import static org.elasticsearch.test.AbstractXContentTestCase.chunkedXContentTester; import static org.hamcrest.CoreMatchers.containsString; public class GetSnapshotsResponseTests extends ESTestCase { @@ -53,10 +45,6 @@ public class GetSnapshotsResponseTests extends ESTestCase { // It does not override equals and hashCode, because it // contains ElasticsearchException, which does not override equals and hashCode. - private GetSnapshotsResponse doParseInstance(XContentParser parser) throws IOException { - return GetSnapshotsResponse.fromXContent(parser); - } - private GetSnapshotsResponse copyInstance(GetSnapshotsResponse instance) throws IOException { return copyInstance( instance, @@ -146,38 +134,6 @@ public void testSerialization() throws IOException { assertEqualInstances(testInstance, deserializedInstance); } - public void testFromXContent() throws IOException { - // Explicitly include the index details, excluded by default, since this is required for a faithful round-trip - final ToXContent.MapParams params = new ToXContent.MapParams(Map.of(INDEX_DETAILS_XCONTENT_PARAM, "true")); - - // Don't inject random fields into the custom snapshot metadata, because the metadata map is equality-checked after doing a - // round-trip through xContent serialization/deserialization. Even though the rest of the object ignores unknown fields, - // `metadata` doesn't ignore unknown fields (it just includes them in the parsed object, because the keys are arbitrary), - // so any new fields added to the metadata before it gets deserialized that weren't in the serialized version will - // cause the equality check to fail. - // - // Also don't inject random junk into the index details section, since this is keyed by index name but the values - // are required to be a valid IndexSnapshotDetails - // - // The actual fields are nested in an array, so this regex matches fields with names of the form - // `snapshots.3.metadata` - final Predicate predicate = Pattern.compile("snapshots\\.\\d+\\.metadata.*") - .asMatchPredicate() - .or(Pattern.compile("snapshots\\.\\d+\\.index_details").asMatchPredicate()) - .or(Pattern.compile("failures\\.*").asMatchPredicate()); - chunkedXContentTester(this::createParser, (XContentType t) -> createTestInstance(), params, this::doParseInstance).numberOfTestRuns( - 1 - ) - .supportsUnknownFields(true) - .shuffleFieldsExceptions(Strings.EMPTY_ARRAY) - .randomFieldsExcludeFilter(predicate) - .assertEqualsConsumer(this::assertEqualInstances) - // We set it to false, because GetSnapshotsResponse contains - // ElasticsearchException, whose xContent creation/parsing are not stable. - .assertToXContentEquivalence(false) - .test(); - } - public void testChunking() { AbstractChunkedSerializingTestCase.assertChunkCount(createTestInstance(), response -> response.getSnapshots().size() + 2); } From f06a580eb7a84144707d6ef761218dc44604dba4 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Wed, 21 Feb 2024 09:25:25 +0100 Subject: [PATCH 22/49] ES|QL: Set default query LIMIT to 1000 (#105618) --- docs/reference/esql/esql-get-started.asciidoc | 8 +- docs/reference/esql/esql-kibana.asciidoc | 2 +- docs/reference/esql/esql-limitations.asciidoc | 2 +- .../esql/source-commands/from.asciidoc | 4 +- .../xpack/esql/qa/rest/RestEsqlTestCase.java | 5 +- .../xpack/esql/EsqlTestUtils.java | 2 +- .../xpack/esql/action/EsqlActionTaskIT.java | 2 +- .../xpack/esql/plugin/EsqlPlugin.java | 2 +- .../LocalPhysicalPlanOptimizerTests.java | 36 +++---- .../optimizer/LogicalPlanOptimizerTests.java | 90 ++++++++-------- .../optimizer/PhysicalPlanOptimizerTests.java | 54 +++++----- .../rest-api-spec/test/esql/100_bug_fix.yml | 15 +-- .../rest-api-spec/test/esql/20_aggs.yml | 98 ++++++++--------- .../rest-api-spec/test/esql/30_types.yml | 102 +++++++++--------- .../test/esql/90_non_indexed.yml | 9 +- 15 files changed, 218 insertions(+), 213 deletions(-) diff --git a/docs/reference/esql/esql-get-started.asciidoc b/docs/reference/esql/esql-get-started.asciidoc index 631a961b023ab..4dae9ffcddd7f 100644 --- a/docs/reference/esql/esql-get-started.asciidoc +++ b/docs/reference/esql/esql-get-started.asciidoc @@ -37,7 +37,7 @@ image::images/esql/source-command.svg[A source command producing a table from {e The <> source command returns a table with documents from a data stream, index, or alias. Each row in the resulting table represents a document. -This query returns up to 500 documents from the `sample_data` index: +This query returns up to 1000 documents from the `sample_data` index: [source,esql] ---- @@ -237,7 +237,7 @@ include::{esql-specs}/eval.csv-spec[tag=gs-eval-stats-backticks] To track statistics over time, {esql} enables you to create histograms using the <> function. `AUTO_BUCKET` creates human-friendly bucket sizes and returns a value for each row that corresponds to the resulting bucket the -row falls into. +row falls into. For example, to create hourly buckets for the data on October 23rd: @@ -272,7 +272,7 @@ image::images/esql/esql-enrich.png[align="center"] Before you can use `ENRICH`, you first need to <> and <> -an <>. +an <>. include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc[] @@ -344,4 +344,4 @@ For more about data processing with {esql}, refer to [[esql-getting-learn-more]] === Learn more -To learn more about {esql}, refer to <> and <>. \ No newline at end of file +To learn more about {esql}, refer to <> and <>. diff --git a/docs/reference/esql/esql-kibana.asciidoc b/docs/reference/esql/esql-kibana.asciidoc index 07502add5a620..67827d32ce29c 100644 --- a/docs/reference/esql/esql-kibana.asciidoc +++ b/docs/reference/esql/esql-kibana.asciidoc @@ -103,7 +103,7 @@ detailed warning, expand the query bar, and click *warnings*. === The results table For the example query, the results table shows 10 rows. Omitting the `LIMIT` -command, the results table defaults to up to 500 rows. Using `LIMIT`, you can +command, the results table defaults to up to 1000 rows. Using `LIMIT`, you can increase the limit to up to 10,000 rows. NOTE: the 10,000 row limit only applies to the number of rows that are retrieved diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index 94bd38cd0ec28..788177df64dc9 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -9,7 +9,7 @@ [[esql-max-rows]] === Result set size limit -By default, an {esql} query returns up to 500 rows. You can increase the number +By default, an {esql} query returns up to 1000 rows. You can increase the number of rows up to 10,000 using the <> command. include::processing-commands/limit.asciidoc[tag=limitation] diff --git a/docs/reference/esql/source-commands/from.asciidoc b/docs/reference/esql/source-commands/from.asciidoc index 5263a17b48df9..dbb5010060257 100644 --- a/docs/reference/esql/source-commands/from.asciidoc +++ b/docs/reference/esql/source-commands/from.asciidoc @@ -26,7 +26,7 @@ corresponds to a field, and can be accessed by the name of that field. [NOTE] ==== By default, an {esql} query without an explicit <> uses an implicit -limit of 500. This applies to `FROM` too. A `FROM` command without `LIMIT`: +limit of 1000. This applies to `FROM` too. A `FROM` command without `LIMIT`: [source,esql] ---- @@ -38,7 +38,7 @@ is executed as: [source,esql] ---- FROM employees -| LIMIT 500 +| LIMIT 1000 ---- ==== diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index 6dea60476c3d1..cccd1a3f8854b 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -817,7 +817,10 @@ private static HttpEntity assertWarnings(Response response, List allowed } private static Set mutedWarnings() { - return Set.of("No limit defined, adding default limit of [500]"); + return Set.of( + "No limit defined, adding default limit of [1000]", + "No limit defined, adding default limit of [500]" // this is for bwc tests, the limit in v 8.12.x is 500 + ); } private static void bulkLoadTestData(int count) throws IOException { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java index bcfe5ce9787ad..8c5c79b98767e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java @@ -185,7 +185,7 @@ public static List> getValuesList(Iterator> values public static List withDefaultLimitWarning(List warnings) { List result = warnings == null ? new ArrayList<>() : new ArrayList<>(warnings); - result.add("No limit defined, adding default limit of [500]"); + result.add("No limit defined, adding default limit of [1000]"); return result; } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java index 276539a5bbeac..60f174773a1b8 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java @@ -71,7 +71,7 @@ public void setup() { \\_ExchangeSourceOperator[] \\_AggregationOperator[mode = FINAL, aggs = sum of longs] \\_ProjectOperator[projection = [0]] - \\_LimitOperator[limit = 500] + \\_LimitOperator[limit = 1000] \\_OutputOperator[columns = [sum(pause_me)]]"""; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index 17dad71401119..14ebf3da2cd7e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -79,7 +79,7 @@ public class EsqlPlugin extends Plugin implements ActionPlugin { public static final Setting QUERY_RESULT_TRUNCATION_DEFAULT_SIZE = Setting.intSetting( "esql.query.result_truncation_default_size", - 500, + 1000, 1, 10000, Setting.Property.NodeScope, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index 80cc7d9a52a4b..55320cfbeca32 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -152,7 +152,7 @@ private Analyzer makeAnalyzer(String mappingFileName, EnrichResolution enrichRes /** * Expects - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[COUNT([2a][KEYWORD]) AS c],FINAL,null] * \_ExchangeExec[[count{r}#24, seen{r}#25],true] * \_EsStatsQueryExec[test], stats[Stat[name=*, type=COUNT, query=null]]], query[{"esql_single_value":{"field":"emp_no","next": @@ -171,7 +171,7 @@ public void testCountAllWithEval() { /** * Expects - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[COUNT([2a][KEYWORD]) AS c],FINAL,null] * \_ExchangeExec[[count{r}#14, seen{r}#15],true] * \_EsStatsQueryExec[test], stats[Stat[name=*, type=COUNT, query=null]]], @@ -187,7 +187,7 @@ public void testCountAllWithFilter() { /** * Expects - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[COUNT(emp_no{f}#5) AS c],FINAL,null] * \_ExchangeExec[[count{r}#15, seen{r}#16],true] * \_EsStatsQueryExec[test], stats[Stat[name=emp_no, type=COUNT, query={ @@ -207,7 +207,7 @@ public void testCountFieldWithFilter() { /** * Expects - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[COUNT(salary{f}#20) AS c],FINAL,null] * \_ExchangeExec[[count{r}#25, seen{r}#26],true] * \_EsStatsQueryExec[test], stats[Stat[name=salary, type=COUNT, query={ @@ -302,7 +302,7 @@ public void testAnotherCountAllWithFilter() { /** * Expected * ProjectExec[[c{r}#3, c{r}#3 AS call, c_literal{r}#7]] - * \_LimitExec[500[INTEGER]] + * \_LimitExec[1000[INTEGER]] * \_AggregateExec[[],[COUNT([2a][KEYWORD]) AS c, COUNT(1[INTEGER]) AS c_literal],FINAL,null] * \_ExchangeExec[[count{r}#18, seen{r}#19, count{r}#20, seen{r}#21],true] * \_EsStatsQueryExec[test], stats[Stat[name=*, type=COUNT, query=null], Stat[name=*, type=COUNT, query=null]]], @@ -346,7 +346,7 @@ public void testCountFieldsAndAllWithFilter() { /** * Expecting - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[COUNT([2a][KEYWORD]) AS c],FINAL,null] * \_ExchangeExec[[count{r}#14, seen{r}#15],true] * \_LocalSourceExec[[c{r}#3],[LongVectorBlock[vector=ConstantLongVector[positions=1, value=0]]]] @@ -377,12 +377,12 @@ public boolean exists(String field) { /** * Expects - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_ExchangeExec[[],false] * \_ProjectExec[[_meta_field{f}#9, emp_no{f}#3, first_name{f}#4, gender{f}#5, job{f}#10, job.raw{f}#11, languages{f}#6, last_n * ame{f}#7, long_noidx{f}#12, salary{f}#8]] * \_FieldExtractExec[_meta_field{f}#9, emp_no{f}#3, first_name{f}#4, gen..] - * \_EsQueryExec[test], query[{"exists":{"field":"emp_no","boost":1.0}}][_doc{f}#13], limit[500], sort[] estimatedRowSize[324] + * \_EsQueryExec[test], query[{"exists":{"field":"emp_no","boost":1.0}}][_doc{f}#13], limit[1000], sort[] estimatedRowSize[324] */ public void testIsNotNullPushdownFilter() { var plan = plan("from test | where emp_no is not null"); @@ -392,20 +392,20 @@ public void testIsNotNullPushdownFilter() { var project = as(exchange.child(), ProjectExec.class); var field = as(project.child(), FieldExtractExec.class); var query = as(field.child(), EsQueryExec.class); - assertThat(query.limit().fold(), is(500)); + assertThat(query.limit().fold(), is(1000)); var expected = QueryBuilders.existsQuery("emp_no"); assertThat(query.query().toString(), is(expected.toString())); } /** * Expects - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_ExchangeExec[[],false] * \_ProjectExec[[_meta_field{f}#9, emp_no{f}#3, first_name{f}#4, gender{f}#5, job{f}#10, job.raw{f}#11, languages{f}#6, last_n * ame{f}#7, long_noidx{f}#12, salary{f}#8]] * \_FieldExtractExec[_meta_field{f}#9, emp_no{f}#3, first_name{f}#4, gen..] * \_EsQueryExec[test], query[{"bool":{"must_not":[{"exists":{"field":"emp_no","boost":1.0}}],"boost":1.0}}][_doc{f}#13], - * limit[500], sort[] estimatedRowSize[324] + * limit[1000], sort[] estimatedRowSize[324] */ public void testIsNullPushdownFilter() { var plan = plan("from test | where emp_no is null"); @@ -415,21 +415,21 @@ public void testIsNullPushdownFilter() { var project = as(exchange.child(), ProjectExec.class); var field = as(project.child(), FieldExtractExec.class); var query = as(field.child(), EsQueryExec.class); - assertThat(query.limit().fold(), is(500)); + assertThat(query.limit().fold(), is(1000)); var expected = QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("emp_no")); assertThat(query.query().toString(), is(expected.toString())); } /** * Expects - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_ExchangeExec[[],false] * \_ProjectExec[[!alias_integer, boolean{f}#4, byte{f}#5, constant_keyword-foo{f}#6, date{f}#7, double{f}#8, float{f}#9, * half_float{f}#10, integer{f}#12, ip{f}#13, keyword{f}#14, long{f}#15, scaled_float{f}#11, short{f}#17, text{f}#18, * unsigned_long{f}#16, version{f}#19, wildcard{f}#20]] * \_FieldExtractExec[!alias_integer, boolean{f}#4, byte{f}#5, constant_k..][] * \_EsQueryExec[test], query[{"esql_single_value":{"field":"ip","next":{"terms":{"ip":["127.0.0.0/24"],"boost":1.0}},"source": - * "cidr_match(ip, \"127.0.0.0/24\")@1:19"}}][_doc{f}#21], limit[500], sort[] estimatedRowSize[389] + * "cidr_match(ip, \"127.0.0.0/24\")@1:19"}}][_doc{f}#21], limit[1000], sort[] estimatedRowSize[389] */ public void testCidrMatchPushdownFilter() { var allTypeMappingAnalyzer = makeAnalyzer("mapping-ip.json", new EnrichResolution()); @@ -451,7 +451,7 @@ public void testCidrMatchPushdownFilter() { var project = as(exchange.child(), ProjectExec.class); var field = as(project.child(), FieldExtractExec.class); var queryExec = as(field.child(), EsQueryExec.class); - assertThat(queryExec.limit().fold(), is(500)); + assertThat(queryExec.limit().fold(), is(1000)); var expectedInnerQuery = QueryBuilders.termsQuery(fieldName, cidrBlocks); var expectedQuery = wrapWithSingleQuery(expectedInnerQuery, fieldName, new Source(1, 18, cidrMatch)); @@ -549,7 +549,7 @@ public void testOutOfRangeFilterPushdown() { /** * Expects e.g. - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_ExchangeExec[[],false] * \_ProjectExec[[!alias_integer, boolean{f}#190, byte{f}#191, constant_keyword-foo{f}#192, date{f}#193, double{f}#194, ...]] * \_FieldExtractExec[!alias_integer, boolean{f}#190, byte{f}#191, consta..][] @@ -569,13 +569,13 @@ private EsQueryExec doTestOutOfRangeFilterPushdown(String query, Analyzer analyz /** * Expects - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_ExchangeExec[[],false] * \_ProjectExec[[_meta_field{f}#8, emp_no{r}#2, first_name{r}#3, gender{f}#4, job{f}#9, job.raw{f}#10, languages{f}#5, first_n * ame{r}#3 AS last_name, long_noidx{f}#11, emp_no{r}#2 AS salary]] * \_FieldExtractExec[_meta_field{f}#8, gender{f}#4, job{f}#9, job.raw{f}..] * \_EvalExec[[null[INTEGER] AS emp_no, null[KEYWORD] AS first_name]] - * \_EsQueryExec[test], query[][_doc{f}#12], limit[500], sort[] estimatedRowSize[270] + * \_EsQueryExec[test], query[][_doc{f}#12], limit[1000], sort[] estimatedRowSize[270] */ public void testMissingFieldsDoNotGetExtracted() { var stats = EsqlTestUtils.statsForMissingField("first_name", "last_name", "emp_no", "salary"); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index e04344ca86732..9dfcffbf48e6e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -238,7 +238,7 @@ public void testCombineProjectionWithAggregation() { /** * Project[[s{r}#4 AS d, s{r}#4, last_name{f}#21, first_name{f}#18]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_Aggregate[[last_name{f}#21, first_name{f}#18],[SUM(salary{f}#22) AS s, last_name{f}#21, first_name{f}#18]] * \_EsRelation[test][_meta_field{f}#23, emp_no{f}#17, first_name{f}#18, ..] */ @@ -297,7 +297,7 @@ public void testCombineProjectionWithPruning() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[f{r}#7],[SUM(emp_no{f}#15) AS s, COUNT(first_name{f}#16) AS c, first_name{f}#16 AS f]] * \_EsRelation[test][_meta_field{f}#21, emp_no{f}#15, first_name{f}#16, ..] */ @@ -327,7 +327,7 @@ public void testCombineProjectionWithAggregationFirstAndAliasedGroupingUsedInAgg /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[f{r}#7],[SUM(emp_no{f}#15) AS s, first_name{f}#16 AS f]] * \_EsRelation[test][_meta_field{f}#21, emp_no{f}#15, first_name{f}#16, ..] */ @@ -987,9 +987,9 @@ public void testCombineOrderByThroughFilter() { /** * Expected - * TopN[[Order[first_name{f}#170,ASC,LAST]],500[INTEGER]] + * TopN[[Order[first_name{f}#170,ASC,LAST]],1000[INTEGER]] * \_MvExpand[first_name{f}#170] - * \_TopN[[Order[emp_no{f}#169,ASC,LAST]],500[INTEGER]] + * \_TopN[[Order[emp_no{f}#169,ASC,LAST]],1000[INTEGER]] * \_EsRelation[test][avg_worked_seconds{f}#167, birth_date{f}#168, emp_n..] */ public void testDontCombineOrderByThroughMvExpand() { @@ -1009,10 +1009,10 @@ public void testDontCombineOrderByThroughMvExpand() { /** * Expected - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_MvExpand[x{r}#159] * \_EsqlProject[[first_name{f}#162 AS x]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_EsRelation[test][first_name{f}#162] */ public void testCopyDefaultLimitPastMvExpand() { @@ -1969,7 +1969,7 @@ public void testPruneChainedEval() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[],[COUNT(salary{f}#1345) AS c]] * \_EsRelation[test][_meta_field{f}#1346, emp_no{f}#1340, first_name{f}#..] */ @@ -2008,7 +2008,7 @@ public void testPruneUnusedAggSimple() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[],[COUNT(salary{f}#19) AS x]] * \_EsRelation[test][_meta_field{f}#20, emp_no{f}#14, first_name{f}#15, ..] */ @@ -2053,7 +2053,7 @@ public void testPruneUnusedAggsChainedAgg() { /** * Expects * Project[[c{r}#342]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_Filter[min{r}#348 > 10[INTEGER]] * \_Aggregate[[],[COUNT(salary{f}#367) AS c, MIN(salary{f}#367) AS min]] * \_EsRelation[test][_meta_field{f}#368, emp_no{f}#362, first_name{f}#36..] @@ -2084,7 +2084,7 @@ public void testPruneMixedAggInsideUnusedEval() { /** * Expects * Eval[[max{r}#6 + min{r}#9 + c{r}#3 AS x, min{r}#9 AS y, c{r}#3 AS z]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_Aggregate[[],[COUNT(salary{f}#26) AS c, MAX(salary{f}#26) AS max, MIN(salary{f}#26) AS min]] * \_EsRelation[test][_meta_field{f}#27, emp_no{f}#21, first_name{f}#22, ..] */ @@ -2106,7 +2106,7 @@ public void testNoPruningWhenDealingJustWithEvals() { * Expects * Project[[y{r}#6 AS z]] * \_Eval[[emp_no{f}#11 + 1[INTEGER] AS y]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_EsRelation[test][_meta_field{f}#17, emp_no{f}#11, first_name{f}#12, ..] */ public void testNoPruningWhenChainedEvals() { @@ -2127,7 +2127,7 @@ public void testNoPruningWhenChainedEvals() { /** * Expects * Project[[salary{f}#20 AS x, emp_no{f}#15 AS y]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_EsRelation[test][_meta_field{f}#21, emp_no{f}#15, first_name{f}#16, ..] */ public void testPruningDuplicateEvals() { @@ -2153,7 +2153,7 @@ public void testPruningDuplicateEvals() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[],[COUNT(salary{f}#24) AS cx, COUNT(emp_no{f}#19) AS cy]] * \_EsRelation[test][_meta_field{f}#25, emp_no{f}#19, first_name{f}#20, ..] */ @@ -2177,7 +2177,7 @@ public void testPruneEvalAliasOnAggUngrouped() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[x{r}#6],[COUNT(emp_no{f}#17) AS cy, salary{f}#22 AS x]] * \_EsRelation[test][_meta_field{f}#23, emp_no{f}#17, first_name{f}#18, ..] */ @@ -2202,7 +2202,7 @@ public void testPruneEvalAliasOnAggGroupedByAlias() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[gender{f}#22],[COUNT(emp_no{f}#20) AS cy, MIN(salary{f}#25) AS cx, gender{f}#22]] * \_EsRelation[test][_meta_field{f}#26, emp_no{f}#20, first_name{f}#21, ..] */ @@ -2228,7 +2228,7 @@ public void testPruneEvalAliasOnAggGrouped() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[gender{f}#21],[COUNT(emp_no{f}#19) AS cy, MIN(salary{f}#24) AS cx, gender{f}#21]] * \_EsRelation[test][_meta_field{f}#25, emp_no{f}#19, first_name{f}#20, ..] */ @@ -2254,7 +2254,7 @@ public void testPruneEvalAliasMixedWithRenameOnAggGrouped() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[gender{f}#19],[COUNT(x{r}#3) AS cy, MIN(x{r}#3) AS cx, gender{f}#19]] * \_Eval[[emp_no{f}#17 + 1[INTEGER] AS x]] * \_EsRelation[test][_meta_field{f}#23, emp_no{f}#17, first_name{f}#18, ..] @@ -2283,7 +2283,7 @@ public void testEvalAliasingAcrossCommands() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[gender{f}#19],[COUNT(x{r}#3) AS cy, MIN(x{r}#3) AS cx, gender{f}#19]] * \_Eval[[emp_no{f}#17 + 1[INTEGER] AS x]] * \_EsRelation[test][_meta_field{f}#23, emp_no{f}#17, first_name{f}#18, ..] @@ -2310,7 +2310,7 @@ public void testEvalAliasingInsideSameCommand() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[gender{f}#22],[COUNT(z{r}#9) AS cy, MIN(x{r}#3) AS cx, gender{f}#22]] * \_Eval[[emp_no{f}#20 + 1[INTEGER] AS x, x{r}#3 + 1[INTEGER] AS z]] * \_EsRelation[test][_meta_field{f}#26, emp_no{f}#20, first_name{f}#21, ..] @@ -2355,7 +2355,7 @@ public void testPruneRenameOnAgg() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[gender{f}#14],[COUNT(salary{f}#17) AS cy, MIN(emp_no{f}#12) AS cx, gender{f}#14]] * \_EsRelation[test][_meta_field{f}#18, emp_no{f}#12, first_name{f}#13, ..] */ @@ -2383,7 +2383,7 @@ public void testPruneRenameOnAggBy() { * Expects * Project[[c1{r}#2, c2{r}#4, cs{r}#6, cm{r}#8, cexp{r}#10]] * \_Eval[[c1{r}#2 AS c2, c1{r}#2 AS cs, c1{r}#2 AS cm, c1{r}#2 AS cexp]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_Aggregate[[],[COUNT([2a][KEYWORD]) AS c1]] * \_EsRelation[test][_meta_field{f}#17, emp_no{f}#11, first_name{f}#12, ..] */ @@ -2414,7 +2414,7 @@ public void testEliminateDuplicateAggsCountAll() { * Expects * Project[[c1{r}#7, cx{r}#10, cs{r}#12, cy{r}#15]] * \_Eval[[c1{r}#7 AS cx, c1{r}#7 AS cs, c1{r}#7 AS cy]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_Aggregate[[],[COUNT([2a][KEYWORD]) AS c1]] * \_EsRelation[test][_meta_field{f}#22, emp_no{f}#16, first_name{f}#17, ..] */ @@ -2446,7 +2446,7 @@ public void testEliminateDuplicateAggsWithAliasedFields() { /** * Expects * Project[[min{r}#1385, max{r}#1388, min{r}#1385 AS min2, max{r}#1388 AS max2, gender{f}#1398]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_Aggregate[[gender{f}#1398],[MIN(salary{f}#1401) AS min, MAX(salary{f}#1401) AS max, gender{f}#1398]] * \_EsRelation[test][_meta_field{f}#1402, emp_no{f}#1396, first_name{f}#..] */ @@ -2492,7 +2492,7 @@ public void testEliminateDuplicateAggWithNull() { /** * Expects * Project[[max(x){r}#11, max(x){r}#11 AS max(y), max(x){r}#11 AS max(z)]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_Aggregate[[],[MAX(salary{f}#21) AS max(x)]] * \_EsRelation[test][_meta_field{f}#22, emp_no{f}#16, first_name{f}#17, ..] */ @@ -2546,7 +2546,7 @@ public void testMvExpandFoldable() { /** * Expected - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[a{r}#2],[COUNT([2a][KEYWORD]) AS bar]] * \_Row[[1[INTEGER] AS a]] */ @@ -2565,7 +2565,7 @@ public void testRenameStatsDropGroup() { /** * Expected - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[a{r}#2, bar{r}#8],[COUNT([2a][KEYWORD]) AS baz, b{r}#4 AS bar]] * \_Row[[1[INTEGER] AS a, 2[INTEGER] AS b]] */ @@ -2584,7 +2584,7 @@ public void testMultipleRenameStatsDropGroup() { /** * Expected - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[emp_no{f}#11, bar{r}#4],[MAX(salary{f}#16) AS baz, gender{f}#13 AS bar]] * \_EsRelation[test][_meta_field{f}#17, emp_no{f}#11, first_name{f}#12, ..] */ @@ -2626,7 +2626,7 @@ private void aggFieldName(Expression exp, Class /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[],[SUM(emp_no{f}#4) AS sum(emp_no)]] * \_EsRelation[test][_meta_field{f}#10, emp_no{f}#4, first_name{f}#5, ge..] */ @@ -2658,7 +2658,7 @@ public void testIsNotNullConstraintForStatsWithGrouping() { /** * Expected - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[salary{f}#1185],[SUM(salary{f}#1185) AS sum(salary), salary{f}#1185]] * \_EsRelation[test][_meta_field{f}#1186, emp_no{f}#1180, first_name{f}#..] */ @@ -2677,7 +2677,7 @@ public void testIsNotNullConstraintForStatsWithAndOnGrouping() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[x{r}#4],[SUM(salary{f}#13) AS sum(salary), salary{f}#13 AS x]] * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] */ @@ -2697,7 +2697,7 @@ public void testIsNotNullConstraintForStatsWithAndOnGroupingAlias() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[salary{f}#13],[SUM(emp_no{f}#8) AS sum(x), salary{f}#13]] * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] */ @@ -2719,7 +2719,7 @@ public void testIsNotNullConstraintSkippedForStatsWithAlias() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[],[SUM(emp_no{f}#8) AS a, MIN(salary{f}#13) AS b]] * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] */ @@ -2738,7 +2738,7 @@ public void testIsNotNullConstraintForStatsWithMultiAggWithoutGrouping() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[gender{f}#11],[SUM(emp_no{f}#9) AS a, MIN(salary{f}#14) AS b, gender{f}#11]] * \_EsRelation[test][_meta_field{f}#15, emp_no{f}#9, first_name{f}#10, g..] */ @@ -2757,7 +2757,7 @@ public void testIsNotNullConstraintForStatsWithMultiAggWithGrouping() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[emp_no{f}#9],[SUM(emp_no{f}#9) AS a, MIN(salary{f}#14) AS b, emp_no{f}#9]] * \_EsRelation[test][_meta_field{f}#15, emp_no{f}#9, first_name{f}#10, g..] */ @@ -2776,7 +2776,7 @@ public void testIsNotNullConstraintForStatsWithMultiAggWithAndOnGrouping() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[w{r}#14, g{r}#16],[COUNT(b{r}#24) AS c, w{r}#14, gender{f}#32 AS g]] * \_Eval[[emp_no{f}#30 / 10[INTEGER] AS x, x{r}#4 + salary{f}#35 AS y, y{r}#8 / 4[INTEGER] AS z, z{r}#11 * 2[INTEGER] + * 3[INTEGER] AS w, salary{f}#35 + 4[INTEGER] / 2[INTEGER] AS a, a{r}#21 + 3[INTEGER] AS b]] @@ -2804,7 +2804,7 @@ public void testIsNotNullConstraintForAliasedExpressions() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[],[SPATIALCENTROID(location{f}#9) AS centroid]] * \_EsRelation[airports][abbrev{f}#5, location{f}#9, name{f}#6, scalerank{f}..] */ @@ -2829,7 +2829,7 @@ public void testSpatialTypesAndStatsUseDocValues() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[emp_no%2{r}#6],[COUNT(salary{f}#12) AS c, emp_no%2{r}#6]] * \_Eval[[emp_no{f}#7 % 2[INTEGER] AS emp_no%2]] * \_EsRelation[test][_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, ge..] @@ -2854,7 +2854,7 @@ public void testNestedExpressionsInGroups() { /** * Expects - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[emp_no{f}#6],[COUNT(__c_COUNT@1bd45f36{r}#16) AS c, emp_no{f}#6]] * \_Eval[[salary{f}#11 + 1[INTEGER] AS __c_COUNT@1bd45f36]] * \_EsRelation[test][_meta_field{f}#12, emp_no{f}#6, first_name{f}#7, ge..] @@ -2879,7 +2879,7 @@ public void testNestedExpressionsInAggs() { } /** - * Limit[500[INTEGER]] + * Limit[1000[INTEGER]] * \_Aggregate[[emp_no%2{r}#7],[COUNT(__c_COUNT@fb7855b0{r}#18) AS c, emp_no%2{r}#7]] * \_Eval[[emp_no{f}#8 % 2[INTEGER] AS emp_no%2, 100[INTEGER] / languages{f}#11 + salary{f}#13 + 1[INTEGER] AS __c_COUNT * @fb7855b0]] @@ -2952,7 +2952,7 @@ public void testLogicalPlanOptimizerVerificationException() { * Project[[x{r}#5]] * \_Eval[[____x_AVG@9efc3cf3_SUM@daf9f221{r}#18 / ____x_AVG@9efc3cf3_COUNT@53cd08ed{r}#19 AS __x_AVG@9efc3cf3, __x_AVG@ * 9efc3cf3{r}#16 / 2[INTEGER] + __x_MAX@475d0e4d{r}#17 AS x]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_Aggregate[[],[SUM(salary{f}#11) AS ____x_AVG@9efc3cf3_SUM@daf9f221, COUNT(salary{f}#11) AS ____x_AVG@9efc3cf3_COUNT@53cd0 * 8ed, MAX(salary{f}#11) AS __x_MAX@475d0e4d]] * \_EsRelation[test][_meta_field{f}#12, emp_no{f}#6, first_name{f}#7, ge..] @@ -2990,7 +2990,7 @@ public void testStatsExpOverAggs() { * \_Eval[[$$SUM$$$AVG$avg(salary_%_3)>$0$0{r}#29 / $$COUNT$$$AVG$avg(salary_%_3)>$0$1{r}#30 AS $$AVG$avg(salary_%_3)>$0, * $$AVG$avg(salary_%_3)>$0{r}#23 + $$MAX$avg(salary_%_3)>$1{r}#24 AS x, * $$MIN$min(emp_no_/_3)>$2{r}#25 + 10[INTEGER] - $$MEDIAN$min(emp_no_/_3)>$3{r}#26 AS y]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_Aggregate[[z{r}#12],[SUM($$salary_%_3$AVG$0{r}#27) AS $$SUM$$$AVG$avg(salary_%_3)>$0$0, * COUNT($$salary_%_3$AVG$0{r}#27) AS $$COUNT$$$AVG$avg(salary_%_3)>$0$1, * MAX(emp_no{f}#13) AS $$MAX$avg(salary_%_3)>$1, @@ -3047,7 +3047,7 @@ public void testStatsExpOverAggsMulti() { * CONCAT(TOSTRING($$AVG$CONCAT(TO_STRIN>$0{r}#23),TOSTRING($$MAX$CONCAT(TO_STRIN>$1{r}#24)) AS x, * $$MIN$(MIN(emp_no_/_3>$2{r}#25 + 3.141592653589793[DOUBLE] - $$MEDIAN$(MIN(emp_no_/_3>$3{r}#26 / 2.718281828459045[DOUBLE] * AS y]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_Aggregate[[z{r}#12],[SUM($$salary_%_3$AVG$0{r}#27) AS $$SUM$$$AVG$CONCAT(TO_STRIN>$0$0, * COUNT($$salary_%_3$AVG$0{r}#27) AS $$COUNT$$$AVG$CONCAT(TO_STRIN>$0$1, * MAX(emp_no{f}#13) AS $$MAX$CONCAT(TO_STRIN>$1, @@ -3109,7 +3109,7 @@ public void testStatsExpOverAggsWithScalars() { * \_Eval[[$$$$avg(salary)_+_m>$AVG$0$SUM$0{r}#48 / $$max(salary)_+_3>$COUNT$2{r}#46 AS $$avg(salary)_+_m>$AVG$0, $$avg( * salary)_+_m>$AVG$0{r}#44 + $$avg(salary)_+_m>$MAX$1{r}#45 AS a, $$avg(salary)_+_m>$MAX$1{r}#45 + 3[INTEGER] + * 3.141592653589793[DOUBLE] + $$max(salary)_+_3>$COUNT$2{r}#46 AS b]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_Aggregate[[w{r}#28],[SUM(salary{f}#39) AS $$$$avg(salary)_+_m>$AVG$0$SUM$0, MAX(salary{f}#39) AS $$avg(salary)_+_m>$MAX$1 * , COUNT(salary{f}#39) AS $$max(salary)_+_3>$COUNT$2, MIN(salary{f}#39) AS $$count(salary)_->$MIN$3]] * \_Eval[[languages{f}#37 % 2[INTEGER] AS w]] @@ -3176,7 +3176,7 @@ public void testStatsExpOverAggsWithScalarAndDuplicateAggs() { /** * Expects * Project[[a{r}#5, a{r}#5 AS b, w{r}#12]] - * \_Limit[500[INTEGER]] + * \_Limit[1000[INTEGER]] * \_Aggregate[[w{r}#12],[SUM($$salary_/_2_+_la>$SUM$0{r}#26) AS a, w{r}#12]] * \_Eval[[emp_no{f}#16 % 2[INTEGER] AS w, salary{f}#21 / 2[INTEGER] + languages{f}#19 AS $$salary_/_2_+_la>$SUM$0]] * \_EsRelation[test][_meta_field{f}#22, emp_no{f}#16, first_name{f}#17, ..] diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 4030e7e0bcbef..0c87db5e5c6db 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -349,7 +349,7 @@ public void testExactlyOneExtractorPerFieldWithPruning() { /** * Expects - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SUM(salary{f}#882) AS x],FINAL,null] * \_ExchangeExec[[sum{r}#887, seen{r}#888],true] * \_FragmentExec[filter=null, estimatedRowSize=0, fragment=[ @@ -615,7 +615,7 @@ public void testExtractGroupingFieldsIfAggdWithEval() { /** * Expects * EvalExec[[agg_emp{r}#4 + 7[INTEGER] AS x]] - * \_LimitExec[500[INTEGER]] + * \_LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SUM(emp_no{f}#8) AS agg_emp],FINAL,16] * \_ExchangeExec[[sum{r}#18, seen{r}#19],true] * \_AggregateExec[[],[SUM(emp_no{f}#8) AS agg_emp],PARTIAL,8] @@ -647,7 +647,7 @@ public void testQueryWithAggregation() { /** * Expects * EvalExec[[agg_emp{r}#4 + 7[INTEGER] AS x]] - * \_LimitExec[500[INTEGER]] + * \_LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SUM(emp_no{f}#8) AS agg_emp],FINAL,16] * \_ExchangeExec[[sum{r}#18, seen{r}#19],true] * \_AggregateExec[[],[SUM(emp_no{f}#8) AS agg_emp],PARTIAL,8] @@ -1562,14 +1562,14 @@ public void testPushDownRLike() { } /** - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_ExchangeExec[[],false] * \_ProjectExec[[_meta_field{f}#9, emp_no{f}#3, first_name{f}#4, gender{f}#5, job{f}#10, job.raw{f}#11, languages{f}#6, last_n * ame{f}#7, long_noidx{f}#12, salary{f}#8]] * \_FieldExtractExec[_meta_field{f}#9, emp_no{f}#3, first_name{f}#4, gen..] * \_EsQueryExec[test], query[{"esql_single_value":{"field":"first_name","next": * {"term":{"first_name":{"value":"foo","case_insensitive":true}}},"source":"first_name =~ \"foo\"@2:9"}}] - * [_doc{f}#23], limit[500], sort[] estimatedRowSize[324] + * [_doc{f}#23], limit[1000], sort[] estimatedRowSize[324] */ @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103599") public void testPushDownEqualsIgnoreCase() { @@ -1591,12 +1591,12 @@ public void testPushDownEqualsIgnoreCase() { } /** - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_ExchangeExec[[],false] * \_ProjectExec[[_meta_field{f}#12, emp_no{f}#6, first_name{f}#7, gender{f}#8, job{f}#13, job.raw{f}#14, languages{f}#9, last_ * name{f}#10, long_noidx{f}#15, salary{f}#11, x{r}#4]] * \_FieldExtractExec[_meta_field{f}#12, emp_no{f}#6, gender{f}#8, job{f}..] - * \_LimitExec[500[INTEGER]] + * \_LimitExec[1000[INTEGER]] * \_FilterExec[x{r}#4 =~ [66 6f 6f][KEYWORD]] * \_EvalExec[[CONCAT(first_name{f}#7,[66 6f 6f][KEYWORD]) AS x]] * \_FieldExtractExec[first_name{f}#7] @@ -2081,7 +2081,7 @@ public void testAvgSurrogateFunctionAfterRenameAndLimit() { /** * Expects - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[languages{f}#9],[MIN(salary{f}#11) AS m, languages{f}#9],FINAL,8] * \_ExchangeExec[[languages{f}#9, min{r}#16, seen{r}#17],true] * \_LocalSourceExec[[languages{f}#9, min{r}#16, seen{r}#17],EMPTY] @@ -2113,7 +2113,7 @@ public boolean exists(String field) { /** * Expects * intermediate plan - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[COUNT(emp_no{f}#6) AS c],FINAL,null] * \_ExchangeExec[[count{r}#16, seen{r}#17],true] * \_FragmentExec[filter=null, estimatedRowSize=0, fragment=[ @@ -2122,7 +2122,7 @@ public boolean exists(String field) { * \_EsRelation[test][_meta_field{f}#12, emp_no{f}#6, first_name{f}#7, ge..]]] * * and final plan is - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[COUNT(emp_no{f}#6) AS c],FINAL,8] * \_ExchangeExec[[count{r}#16, seen{r}#17],true] * \_LocalSourceExec[[count{r}#16, seen{r}#17],[LongVectorBlock[vector=ConstantLongVector[positions=1, value=0]]]] @@ -2184,7 +2184,7 @@ public void testGlobalAggFoldingOutput() { * Expects * ProjectExec[[a{r}#5]] * \_EvalExec[[__a_SUM@734e2841{r}#16 / __a_COUNT@12536eab{r}#17 AS a]] - * \_LimitExec[500[INTEGER]] + * \_LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SUM(emp_no{f}#6) AS __a_SUM@734e2841, COUNT(emp_no{f}#6) AS __a_COUNT@12536eab],FINAL,24] * \_ExchangeExec[[sum{r}#18, seen{r}#19, count{r}#20, seen{r}#21],true] * \_LocalSourceExec[[sum{r}#18, seen{r}#19, count{r}#20, seen{r}#21],[LongArrayBlock[positions=1, mvOrdering=UNORDERED, @@ -2217,7 +2217,7 @@ public void testPartialAggFoldingOutputForSyntheticAgg() { /** * Before local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SPATIALCENTROID(location{f}#9) AS centroid],FINAL,null] * \_ExchangeExec[[xVal{r}#10, xDel{r}#11, yVal{r}#12, yDel{r}#13, count{r}#14],true] * \_FragmentExec[filter=null, estimatedRowSize=0, fragment=[ @@ -2226,7 +2226,7 @@ public void testPartialAggFoldingOutputForSyntheticAgg() { * * After local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SPATIALCENTROID(location{f}#9) AS centroid],FINAL,50] * \_ExchangeExec[[xVal{r}#10, xDel{r}#11, yVal{r}#12, yDel{r}#13, count{r}#14],true] * \_AggregateExec[[],[SPATIALCENTROID(location{f}#9) AS centroid],PARTIAL,50] @@ -2274,7 +2274,7 @@ public void testSpatialTypesAndStatsUseDocValues() { /** * Before local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SPATIALCENTROID(__centroid_SPATIALCENTROID@b54a93a7{r}#10) AS centroid],FINAL,null] * \_ExchangeExec[[xVal{r}#11, xDel{r}#12, yVal{r}#13, yDel{r}#14, count{r}#15],true] * \_FragmentExec[filter=null, estimatedRowSize=0, fragment=[ @@ -2284,7 +2284,7 @@ public void testSpatialTypesAndStatsUseDocValues() { * * After local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SPATIALCENTROID(__centroid_SPATIALCENTROID@ad2847b6{r}#10) AS centroid],FINAL,50] * \_ExchangeExec[[xVal{r}#11, xDel{r}#12, yVal{r}#13, yDel{r}#14, count{r}#15],true] * \_AggregateExec[[],[SPATIALCENTROID(__centroid_SPATIALCENTROID@ad2847b6{r}#10) AS centroid],PARTIAL,50] @@ -2337,7 +2337,7 @@ public void testSpatialTypesAndStatsUseDocValuesNested() { /** * Before local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SPATIALCENTROID(__centroid_SPATIALCENTROID@ec8dd77e{r}#7) AS centroid],FINAL,null] * \_AggregateExec[[],[SPATIALCENTROID(__centroid_SPATIALCENTROID@ec8dd77e{r}#7) AS centroid],PARTIAL,null] * \_EvalExec[[[1 1 0 0 0 0 0 30 e2 4c 7c 45 40 0 0 e0 92 b0 82 2d 40][GEO_POINT] AS __centroid_SPATIALCENTROID@ec8dd77e]] @@ -2346,7 +2346,7 @@ public void testSpatialTypesAndStatsUseDocValuesNested() { * * After local optimizations we expect no changes because field is extracted: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SPATIALCENTROID(__centroid_SPATIALCENTROID@7ff910a{r}#7) AS centroid],FINAL,50] * \_AggregateExec[[],[SPATIALCENTROID(__centroid_SPATIALCENTROID@7ff910a{r}#7) AS centroid],PARTIAL,50] * \_EvalExec[[[1 1 0 0 0 0 0 30 e2 4c 7c 45 40 0 0 e0 92 b0 82 2d 40][GEO_POINT] AS __centroid_SPATIALCENTROID@7ff910a]] @@ -2389,7 +2389,7 @@ public void testSpatialTypesAndStatsUseDocValuesNestedLiteral() { /** * Before local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SPATIALCENTROID(location{f}#11) AS centroid, COUNT([2a][KEYWORD]) AS count],FINAL,null] * \_ExchangeExec[[xVal{r}#12, xDel{r}#13, yVal{r}#14, yDel{r}#15, count{r}#16, count{r}#17, seen{r}#18],true] * \_FragmentExec[filter=null, estimatedRowSize=0, fragment=[ @@ -2398,7 +2398,7 @@ public void testSpatialTypesAndStatsUseDocValuesNestedLiteral() { * * After local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SPATIALCENTROID(location{f}#11) AS centroid, COUNT([2a][KEYWORD]) AS count],FINAL,58] * \_ExchangeExec[[xVal{r}#12, xDel{r}#13, yVal{r}#14, yDel{r}#15, count{r}#16, count{r}#17, seen{r}#18],true] * \_AggregateExec[[],[COUNT([2a][KEYWORD]) AS count, SPATIALCENTROID(location{f}#11) AS centroid],PARTIAL,58] @@ -2449,7 +2449,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiAggregations() { /** * Before local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SPATIALCENTROID(location{f}#14) AS airports, SPATIALCENTROID(city_location{f}#17) AS cities, COUNT([2a][KEY * WORD]) AS count],FINAL,null] * \_ExchangeExec[[xVal{r}#18, xDel{r}#19, yVal{r}#20, yDel{r}#21, count{r}#22, xVal{r}#23, xDel{r}#24, yVal{r}#25, yDel{r}#26, @@ -2461,7 +2461,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiAggregations() { * * After local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SPATIALCENTROID(location{f}#14) AS airports, SPATIALCENTROID(city_location{f}#17) AS cities, COUNT([2a][KEY * WORD]) AS count],FINAL,108] * \_ExchangeExec[[xVal{r}#18, xDel{r}#19, yVal{r}#20, yDel{r}#21, count{r}#22, xVal{r}#23, xDel{r}#24, yVal{r}#25, yDel{r}#26, @@ -2518,7 +2518,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiSpatialAggregations() { /** * Before local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SPATIALCENTROID(location{f}#12) AS centroid, COUNT([2a][KEYWORD]) AS count],FINAL,null] * \_ExchangeExec[[xVal{r}#13, xDel{r}#14, yVal{r}#15, yDel{r}#16, count{r}#17, count{r}#18, seen{r}#19],true] * \_FragmentExec[filter=null, estimatedRowSize=0, fragment=[ @@ -2528,7 +2528,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiSpatialAggregations() { * * After local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SPATIALCENTROID(location{f}#11) AS centroid, COUNT([2a][KEYWORD]) AS count],FINAL,58] * \_ExchangeExec[[xVal{r}#12, xDel{r}#13, yVal{r}#14, yDel{r}#15, count{r}#16, count{r}#17, seen{r}#18],true] * \_AggregateExec[[],[COUNT([2a][KEYWORD]) AS count, SPATIALCENTROID(location{f}#11) AS centroid],PARTIAL,58] @@ -2585,7 +2585,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiAggregationsFiltered() { /** * Before local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[scalerank{f}#10],[SPATIALCENTROID(location{f}#12) AS centroid, COUNT([2a][KEYWORD]) AS count, scalerank{f}#10], * FINAL,null] * \_ExchangeExec[[scalerank{f}#10, xVal{r}#13, xDel{r}#14, yVal{r}#15, yDel{r}#16, count{r}#17, count{r}#18, seen{r}#19],true] @@ -2595,7 +2595,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiAggregationsFiltered() { * * After local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[scalerank{f}#10],[SPATIALCENTROID(location{f}#12) AS centroid, COUNT([2a][KEYWORD]) AS count, scalerank{f}#10], * FINAL,62] * \_ExchangeExec[[scalerank{f}#10, xVal{r}#13, xDel{r}#14, yVal{r}#15, yDel{r}#16, count{r}#17, count{r}#18, seen{r}#19],true] @@ -2654,7 +2654,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiAggregationsGrouped() { /** * Before local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SPATIALCENTROID(centroid{r}#4) AS centroid, SUM(count{r}#6) AS count],FINAL,null] * \_AggregateExec[[],[SPATIALCENTROID(centroid{r}#4) AS centroid, SUM(count{r}#6) AS count],PARTIAL,null] * \_AggregateExec[[scalerank{f}#16],[SPATIALCENTROID(location{f}#18) AS centroid, COUNT([2a][KEYWORD]) AS count],FINAL,null] @@ -2665,7 +2665,7 @@ public void testSpatialTypesAndStatsUseDocValuesMultiAggregationsGrouped() { * * After local optimizations: * - * LimitExec[500[INTEGER]] + * LimitExec[1000[INTEGER]] * \_AggregateExec[[],[SPATIALCENTROID(centroid{r}#4) AS centroid, SUM(count{r}#6) AS count],FINAL,58] * \_AggregateExec[[],[SPATIALCENTROID(centroid{r}#4) AS centroid, SUM(count{r}#6) AS count],PARTIAL,58] * \_AggregateExec[[scalerank{f}#16],[SPATIALCENTROID(location{f}#18) AS centroid, COUNT([2a][KEYWORD]) AS count],FINAL,58] diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml index f44b45a8be1d2..44d7290cbc002 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml @@ -3,7 +3,7 @@ - skip: version: " - 8.11.99" reason: "fixes in 8.12 or later" - features: warnings + features: allowed_warnings_regex - do: bulk: index: test @@ -14,10 +14,11 @@ - { "index": { } } - { "emp_no": 20 } - do: - warnings: - - "Line 1:37: evaluation of [to_ip(coalesce(ip1.keyword, \"255.255.255.255\"))] failed, treating result as null. Only first 20 failures recorded." + allowed_warnings_regex: + - "Line 1:37: evaluation of \\[to_ip\\(coalesce\\(ip1.keyword, \\\\\"255.255.255.255\\\\\"\\)\\)\\] failed, treating result as null. Only first 20 failures recorded." - "Line 1:37: java.lang.IllegalArgumentException: '127.0' is not an IP string literal." - - "No limit defined, adding default limit of [500]" + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: body: query: 'FROM test | sort emp_no | eval ip = to_ip(coalesce(ip1.keyword, "255.255.255.255")) | keep emp_no, ip' @@ -33,10 +34,10 @@ - do: - warnings: - - "Line 1:98: evaluation of [to_ip(x2)] failed, treating result as null. Only first 20 failures recorded." + allowed_warnings_regex: + - "Line 1:98: evaluation of \\[to_ip\\(x2\\)\\] failed, treating result as null. Only first 20 failures recorded." - "Line 1:98: java.lang.IllegalArgumentException: '127.00.1' is not an IP string literal." - - "No limit defined, adding default limit of [500]" + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'FROM test | sort emp_no | eval x1 = concat(ip1, ip2), x2 = coalesce(x1, "255.255.255.255"), x3 = to_ip(x2) | keep emp_no, x*' diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/20_aggs.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/20_aggs.yml index 4019b3a303345..820df8a3ff066 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/20_aggs.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/20_aggs.yml @@ -3,7 +3,7 @@ setup: - skip: version: " - 8.10.99" reason: "ESQL is available in 8.11+" - features: warnings + features: allowed_warnings_regex - do: indices.create: index: test @@ -115,8 +115,8 @@ setup: --- "Test From": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test' @@ -140,8 +140,8 @@ setup: --- "Test simple grouping avg": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | where color == "red" | stats avg(data) by color' @@ -156,8 +156,8 @@ setup: --- "Test From Stats Avg": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats avg(count)' @@ -170,8 +170,8 @@ setup: --- "Test From Stats Avg With Alias": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats f1 = avg(count)' @@ -184,8 +184,8 @@ setup: --- "Test From Stats Count": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats count(data)' @@ -198,8 +198,8 @@ setup: --- "Test From Stats Count With Alias": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats dataCount = count(data)' @@ -212,8 +212,8 @@ setup: --- "Test From Stats Min": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats min(count)' @@ -226,8 +226,8 @@ setup: --- "Test From Stats Min With Alias": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats minCount=min(count)' @@ -240,8 +240,8 @@ setup: --- "Test From Stats Max": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats max(count)' @@ -254,8 +254,8 @@ setup: --- "Test From Stats Max With Alias": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats maxCount=max(count)' @@ -283,8 +283,8 @@ setup: --- "Test Median On Long": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats med=median(count)' @@ -297,8 +297,8 @@ setup: --- "Test Median On Double": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats med=median(count_d)' @@ -311,8 +311,8 @@ setup: --- "Test Grouping Median On Long": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats med=median(count) by color | sort med' @@ -328,8 +328,8 @@ setup: --- "Test Grouping Median On Double": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats med=median(count_d) by color | sort med' @@ -345,8 +345,8 @@ setup: --- "Test Median Absolute Deviation On Long": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats med=median_absolute_deviation(count)' @@ -359,8 +359,8 @@ setup: --- "Test Median Absolute Deviation On Double": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats med=median_absolute_deviation(count_d)' @@ -373,8 +373,8 @@ setup: --- "Test Grouping Median Absolute Deviation On Long": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats med=median_absolute_deviation(count) by color | sort color' @@ -390,8 +390,8 @@ setup: --- "Test Grouping Median Absolute Deviation On Double": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats med=median_absolute_deviation(count_d) by color | sort color' @@ -407,8 +407,8 @@ setup: --- "Test From Stats Eval": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats avg_count = avg(count) | eval x = avg_count + 7' @@ -420,8 +420,8 @@ setup: --- "Test Stats Where": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats x = avg(count) | where x > 100' @@ -444,8 +444,8 @@ setup: --- "Test Eval Row With Null": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'row a = 1, b = 2, c = null | eval z = c + b + a' @@ -469,8 +469,8 @@ setup: --- "Test Eval With Null And Count": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | eval nullsum = count_d + null | stats count(nullsum)' @@ -485,8 +485,8 @@ setup: --- "Test Eval With Multiple Expressions": - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'row l=1, d=1.0, ln=1 + null, dn=1.0 + null | stats sum(l), sum(d), sum(ln), sum(dn)' @@ -510,8 +510,8 @@ setup: --- grouping on text: - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'FROM test | STATS med=median(count) BY text | SORT med' diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml index 41e6d6b2cca77..8f1d64e169fde 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml @@ -3,7 +3,7 @@ setup: - skip: version: " - 8.11.99" reason: "more field loading added in 8.12+" - features: warnings + features: allowed_warnings_regex --- constant_keyword: @@ -30,8 +30,8 @@ constant_keyword: - { "color": "red" } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test' @@ -44,8 +44,8 @@ constant_keyword: - match: { values.0.1: wow such constant } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | eval l=length(kind) | keep l' @@ -108,8 +108,8 @@ multivalued keyword: - { "card": ["jack", "of", "diamonds"] } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test' @@ -139,8 +139,8 @@ keyword no doc_values: - { "card": ["jack", "of", "diamonds"] } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test' @@ -169,8 +169,8 @@ wildcard: - { "card": "jack of diamonds" } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test' @@ -180,8 +180,8 @@ wildcard: - match: {values.0.0: jack of diamonds} - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | eval l=length(card) | keep l' @@ -220,8 +220,8 @@ numbers: - { i: 123, l: -1234567891011121131, d: 1.234567891234568, mv_i: [123456, -123456], mv_l: [1234567891011121131, -1234567891011121131], mv_d: [1.234567891234568, -1.234567891234568] } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test' @@ -271,8 +271,8 @@ small_numbers: - { b: 1, s: 1245, hf: 12.01, f: 112.0 } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test' @@ -291,8 +291,8 @@ small_numbers: - match: {values.0.3: 1245} - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | eval sum_d = b + f + hf + s, sum_i = b + s | keep sum_d, sum_i' @@ -305,8 +305,8 @@ small_numbers: - match: {values.0.1: 1246} - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | eval r_f = round(f), r_hf = round(hf) | keep r_f, r_hf' @@ -341,8 +341,8 @@ scaled_float: - { f: 112.01, d: 1.0 } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test' @@ -355,8 +355,8 @@ scaled_float: - match: {values.0.1: 112.01} - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | eval sum = d + f | keep sum' @@ -385,8 +385,8 @@ multivalued boolean: - { "booleans": [ true, false, false, false ] } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test' @@ -417,8 +417,8 @@ ip: - { "ip": "127.0.0.1", "keyword": "127.0.0.2" } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test' @@ -431,8 +431,8 @@ ip: - match: { values.0.1: "127.0.0.2" } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | where keyword == "127.0.0.2" | rename ip as IP | drop keyword' @@ -487,8 +487,8 @@ alias: - { "foo": "def", "level1": {"level2": 50}, "some_long": 15, "some_date": "2015-01-01T12:00:00.000Z" } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | keep foo, bar, level1.level2, level2_alias, some_long, some_long_alias, some_long_alias2, some_date, some_date_alias | sort level2_alias' @@ -531,8 +531,8 @@ alias: - match: { values.1.8: 2015-01-01T12:00:00.000Z } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | where bar == "abc" | keep foo, bar, level1.level2, level2_alias' @@ -551,8 +551,8 @@ alias: - match: { values.0.3: 10 } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | where level2_alias == 10 | keep foo, bar, level1.level2, level2_alias' @@ -571,16 +571,16 @@ alias: - match: { values.0.3: 10 } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | where level2_alias == 20' - length: { values: 0 } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test | stats x = max(level2_alias)' @@ -609,8 +609,8 @@ version: - { "version": [ "1.2.3", "4.5.6-SNOOPY" ] } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test' @@ -642,8 +642,8 @@ id: - { "kw": "keyword1" } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test metadata _id | keep _id, kw' @@ -673,8 +673,8 @@ unsigned_long: - { "number": [ "1", "9223372036854775808", "0", "18446744073709551615" ] } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test' @@ -892,8 +892,8 @@ geo_point: - { "location": "POINT(1 -1)" } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test' @@ -925,8 +925,8 @@ cartesian_point: - { "location": "POINT(4321 -1234)" } - do: - warnings: - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: query: 'from test' diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/90_non_indexed.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/90_non_indexed.yml index 80f15b9cb7414..c10554cebf300 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/90_non_indexed.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/90_non_indexed.yml @@ -2,7 +2,7 @@ setup: - skip: version: " - 8.11.99" reason: "extracting non-indexed fields available in 8.12+" - features: allowed_warnings + features: allowed_warnings_regex - do: indices.create: index: test @@ -95,9 +95,10 @@ setup: --- fetch: - do: - allowed_warnings: - - "Field [ip_noidx] cannot be retrieved, it is unsupported or not indexed; returning null" - - "No limit defined, adding default limit of [500]" + allowed_warnings_regex: + - "Field \\[ip_noidx\\] cannot be retrieved, it is unsupported or not indexed; returning null" + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: body: query: 'from test' From 16bdbe4be15fc474348636b8ee977f4147f11b46 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 21 Feb 2024 09:44:00 +0100 Subject: [PATCH 23/49] Remove needless allocations of ReducedRequestInfo from TransportBulkAction (#105646) These things accounted for a couple of GB in needless allocations during bulk indexing in the TSDB benchmark. We could represent the logic here cleaner by chaning the algorithm to not require collecting a map etc. but for a 10 min fix this is fine and saves non-trivial GC. --- .../action/bulk/TransportBulkAction.java | 35 ++++++++++++++----- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 32566b559410d..3e661c2efe72f 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -353,8 +353,11 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec .collect( Collectors.toMap( DocWriteRequest::index, - request -> new ReducedRequestInfo(request.isRequireAlias(), request.isRequireDataStream()), - ReducedRequestInfo::merge + request -> ReducedRequestInfo.of(request.isRequireAlias(), request.isRequireDataStream()), + (existing, updated) -> ReducedRequestInfo.of( + existing.isRequireAlias || updated.isRequireAlias, + existing.isRequireDataStream || updated.isRequireDataStream + ) ) ); @@ -601,13 +604,29 @@ protected long buildTookInMillis(long startTimeNanos) { return TimeUnit.NANOSECONDS.toMillis(relativeTime() - startTimeNanos); } - private record ReducedRequestInfo(boolean isRequireAlias, boolean isRequireDataStream) { - private ReducedRequestInfo merge(ReducedRequestInfo other) { - return new ReducedRequestInfo( - this.isRequireAlias || other.isRequireAlias, - this.isRequireDataStream || other.isRequireDataStream - ); + private enum ReducedRequestInfo { + + REQUIRE_ALIAS_AND_DATA_STREAM(true, true), + REQUIRE_ALIAS_NOT_DATA_STREAM(true, false), + + REQUIRE_DATA_STREAM_NOT_ALIAS(false, true), + REQUIRE_NOTHING(false, false); + + private final boolean isRequireAlias; + private final boolean isRequireDataStream; + + ReducedRequestInfo(boolean isRequireAlias, boolean isRequireDataStream) { + this.isRequireAlias = isRequireAlias; + this.isRequireDataStream = isRequireDataStream; } + + static ReducedRequestInfo of(boolean isRequireAlias, boolean isRequireDataStream) { + if (isRequireAlias) { + return isRequireDataStream ? REQUIRE_ALIAS_AND_DATA_STREAM : REQUIRE_ALIAS_NOT_DATA_STREAM; + } + return isRequireDataStream ? REQUIRE_DATA_STREAM_NOT_ALIAS : REQUIRE_NOTHING; + } + } void executeBulk( From 954c428cde5f3dcab7b1dc99c24ee0ea00a724fa Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Wed, 21 Feb 2024 09:47:48 +0100 Subject: [PATCH 24/49] Fix EsAbortPolicy to not force execution if executor is already shutting down (#105666) Submitting a task during shutdown is highly unreliable and in almost all cases the task will be rejected (removed) anyways. Not forcing execution if the executor is already shutting down leads to more deterministic behavior and fixes EsExecutorsTests.testFixedBoundedRejectOnShutdown. --- .../org/elasticsearch/common/util/concurrent/EsAbortPolicy.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java index 52bd736f2bcf4..5dbacbb16aeea 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java @@ -14,7 +14,7 @@ public class EsAbortPolicy extends EsRejectedExecutionHandler { @Override public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { - if (r instanceof AbstractRunnable abstractRunnable) { + if (executor.isShutdown() == false && r instanceof AbstractRunnable abstractRunnable) { if (abstractRunnable.isForceExecution()) { if (executor.getQueue() instanceof SizeBlockingQueue sizeBlockingQueue) { try { From 0dca71eff96a4def23a3cc349e4399c6317565c7 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 21 Feb 2024 09:49:37 +0100 Subject: [PATCH 25/49] Avoid allocations in security's Automaton cache (#105654) We can just cast here instead of capturing the key in the lambda. --- .../xpack/core/security/support/Automatons.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java index a364b9cdbb227..5d7a4b279298c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java @@ -80,6 +80,7 @@ public static Automaton patterns(String... patterns) { /** * Builds and returns an automaton that will represent the union of all the given patterns. */ + @SuppressWarnings("unchecked") public static Automaton patterns(Collection patterns) { if (patterns.isEmpty()) { return EMPTY; @@ -88,7 +89,7 @@ public static Automaton patterns(Collection patterns) { return buildAutomaton(patterns); } else { try { - return cache.computeIfAbsent(Sets.newHashSet(patterns), ignore -> buildAutomaton(patterns)); + return cache.computeIfAbsent(Sets.newHashSet(patterns), p -> buildAutomaton((Set) p)); } catch (ExecutionException e) { throw unwrapCacheException(e); } @@ -184,7 +185,7 @@ static Automaton pattern(String pattern) { return buildAutomaton(pattern); } else { try { - return cache.computeIfAbsent(pattern, ignore -> buildAutomaton(pattern)); + return cache.computeIfAbsent(pattern, p -> buildAutomaton((String) p)); } catch (ExecutionException e) { throw unwrapCacheException(e); } From d1ec0d2544e8cfeb9b57ccb208d6080465ee818c Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Wed, 21 Feb 2024 11:02:36 +0100 Subject: [PATCH 26/49] Add protected method to allow overriding the computation of the size of a cache file region (#105570) This change introduces a protected method in SharedBlobCacheService that allows to initialize all cache file regions with the full region size. It causes the underlying SparseFileTracker to always track a full region, and therefore it makes it possible to write more bytes to a region that has its initial cache file length changed. --- .../blobcache/BlobCacheUtils.java | 12 +++++ .../shared/SharedBlobCacheService.java | 11 ++++- .../shared/SharedBlobCacheServiceTests.java | 45 ++++++++++++++++++- 3 files changed, 65 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheUtils.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheUtils.java index c4dff2cb4457b..be2971bfa319a 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheUtils.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheUtils.java @@ -9,6 +9,7 @@ import org.apache.lucene.store.IndexInput; import org.elasticsearch.blobcache.common.ByteRange; +import org.elasticsearch.blobcache.shared.SharedBytes; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.Streams; @@ -31,6 +32,17 @@ public static int toIntBytes(long l) { return ByteSizeUnit.BYTES.toIntBytes(l); } + /** + * Rounds the length up so that it is aligned on the next page size (defined by SharedBytes.PAGE_SIZE). For example + */ + public static long toPageAlignedSize(long length) { + int remainder = (int) length % SharedBytes.PAGE_SIZE; + if (remainder > 0L) { + return length + (SharedBytes.PAGE_SIZE - remainder); + } + return length; + } + public static void throwEOF(long channelPos, long len) throws EOFException { throw new EOFException(format("unexpected EOF reading [%d-%d]", channelPos, channelPos + len)); } diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 2c5997e479209..f2ebe61906258 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -435,7 +435,14 @@ private ByteRange mapSubRangeToRegion(ByteRange range, int region) { ); } - private int getRegionSize(long fileLength, int region) { + /** + * Compute the size of a cache file region. + * + * @param fileLength the length of the file/blob to cache + * @param region the region number + * @return a size in bytes of the cache file region + */ + protected int computeCacheFileRegionSize(long fileLength, int region) { assert fileLength > 0; final int maxRegion = getEndingRegion(fileLength); assert region >= 0 && region <= maxRegion : region + " - " + maxRegion; @@ -1209,7 +1216,7 @@ public LFUCacheEntry get(KeyType cacheKey, long fileLength, int region) { // if we did not find an entry var entry = keyMapping.get(regionKey); if (entry == null) { - final int effectiveRegionSize = getRegionSize(fileLength, region); + final int effectiveRegionSize = computeCacheFileRegionSize(fileLength, region); entry = keyMapping.computeIfAbsent(regionKey, key -> new LFUCacheEntry(new CacheFileRegion(key, effectiveRegionSize), now)); } // io is volatile, double locking is fine, as long as we assign it last. diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java index 049197edd97df..5cdd44ad86332 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.blobcache.BlobCacheMetrics; +import org.elasticsearch.blobcache.BlobCacheUtils; import org.elasticsearch.blobcache.common.ByteRange; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.settings.Setting; @@ -1052,7 +1053,6 @@ public void testPopulate() throws Exception { .put("path.home", createTempDir()) .build(); - final AtomicLong relativeTimeInMillis = new AtomicLong(0L); final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue(); try ( NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); @@ -1136,4 +1136,47 @@ public void testNonPositiveRecoveryRangeSizeRejected() { assertThatNonPositiveRecoveryRangeSizeRejected(SharedBlobCacheService.SHARED_CACHE_RECOVERY_RANGE_SIZE_SETTING); } + public void testUseFullRegionSize() throws IOException { + final long regionSize = size(randomIntBetween(1, 100)); + final long cacheSize = regionSize * randomIntBetween(1, 10); + + Settings settings = Settings.builder() + .put(NODE_NAME_SETTING.getKey(), "node") + .put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(regionSize).getStringRep()) + .put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(cacheSize).getStringRep()) + .put("path.home", createTempDir()) + .build(); + final DeterministicTaskQueue taskQueue = new DeterministicTaskQueue(); + try ( + NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); + var cacheService = new SharedBlobCacheService<>( + environment, + settings, + taskQueue.getThreadPool(), + ThreadPool.Names.GENERIC, + ThreadPool.Names.GENERIC, + BlobCacheMetrics.NOOP + ) { + @Override + protected int computeCacheFileRegionSize(long fileLength, int region) { + // use full region + return super.getRegionSize(); + } + } + ) { + final var cacheKey = generateCacheKey(); + final var blobLength = randomLongBetween(1L, cacheSize); + + int regions = Math.toIntExact(blobLength / regionSize); + regions += (blobLength % regionSize == 0L ? 0L : 1L); + assertThat( + cacheService.computeCacheFileRegionSize(blobLength, randomFrom(regions)), + equalTo(BlobCacheUtils.toIntBytes(regionSize)) + ); + for (int region = 0; region < regions; region++) { + var cacheFileRegion = cacheService.get(cacheKey, blobLength, region); + assertThat(cacheFileRegion.tracker.getLength(), equalTo(regionSize)); + } + } + } } From 76cf92718660a886b6e25b9eec02e19f1af6c019 Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Wed, 21 Feb 2024 11:29:24 +0100 Subject: [PATCH 27/49] Fix Setting.exists if key doesn't equal key in settings keys. (#105652) Fix `exists` for affix settings and list settings if using the index syntax. In these cases the equality check fails. Additionally, this fixes inconsistencies in different implementations of `exists` and `existsOrFallbackExists` to make sure secure setting keys are consistently excluded unless using `SecureSetting.exists`. --- .../common/settings/SecureSetting.java | 9 ++- .../common/settings/Setting.java | 68 +++++++++++++------ .../common/settings/SettingTests.java | 52 +++++++++++++- 3 files changed, 106 insertions(+), 23 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java b/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java index b69f05ea62fcb..6fe2c71c15e00 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java @@ -13,6 +13,7 @@ import java.io.InputStream; import java.security.GeneralSecurityException; +import java.util.Collections; import java.util.EnumSet; import java.util.Set; @@ -67,7 +68,13 @@ String innerGetRaw(final Settings settings) { @Override public boolean exists(Settings settings) { final SecureSettings secureSettings = settings.getSecureSettings(); - return secureSettings != null && secureSettings.getSettingNames().contains(getKey()); + return secureSettings != null && getRawKey().exists(secureSettings.getSettingNames(), Collections.emptySet()); + } + + @Override + public boolean exists(Settings.Builder builder) { + final SecureSettings secureSettings = builder.getSecureSettings(); + return secureSettings != null && getRawKey().exists(secureSettings.getSettingNames(), Collections.emptySet()); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index f6dd5532a3aea..aa1c25a3f1952 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.VersionId; import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.MemorySizeValue; import org.elasticsearch.common.xcontent.XContentParserUtils; @@ -49,6 +48,7 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.IntFunction; +import java.util.function.Predicate; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -503,16 +503,13 @@ public T getDefault(Settings settings) { * @return true if the setting is present in the given settings instance, otherwise false */ public boolean exists(final Settings settings) { - return exists(settings.keySet(), settings.getSecureSettings()); + SecureSettings secureSettings = settings.getSecureSettings(); + return key.exists(settings.keySet(), secureSettings == null ? Collections.emptySet() : secureSettings.getSettingNames()); } public boolean exists(final Settings.Builder builder) { - return exists(builder.keys(), builder.getSecureSettings()); - } - - private boolean exists(final Set keys, final SecureSettings secureSettings) { - final String key = getKey(); - return keys.contains(key) && (secureSettings == null || secureSettings.getSettingNames().contains(key) == false); + SecureSettings secureSettings = builder.getSecureSettings(); + return key.exists(builder.keys(), secureSettings == null ? Collections.emptySet() : secureSettings.getSettingNames()); } /** @@ -522,7 +519,7 @@ private boolean exists(final Set keys, final SecureSettings secureSettin * @return true if the setting including fallback settings is present in the given settings instance, otherwise false */ public boolean existsOrFallbackExists(final Settings settings) { - return settings.keySet().contains(getKey()) || (fallbackSetting != null && fallbackSetting.existsOrFallbackExists(settings)); + return exists(settings) || (fallbackSetting != null && fallbackSetting.existsOrFallbackExists(settings)); } /** @@ -1164,21 +1161,12 @@ public String innerGetRaw(final Settings settings) { @Override public Settings get(Settings settings) { + // TODO should we be checking for deprecations here? Settings byPrefix = settings.getByPrefix(getKey()); validator.accept(byPrefix); return byPrefix; } - @Override - public boolean exists(Settings settings) { - for (String settingsKey : settings.keySet()) { - if (settingsKey.startsWith(key)) { - return true; - } - } - return false; - } - @Override public void diff(Settings.Builder builder, Settings source, Settings defaultSettings) { Set leftGroup = get(source).keySet(); @@ -2108,6 +2096,13 @@ private static AffixSetting affixKeySetting( public interface Key { boolean match(String key); + + /** + * Returns true if and only if this key is present in the given settings instance (ignoring given exclusions). + * @param keys keys to check + * @param exclusions exclusions to ignore + */ + boolean exists(Set keys, Set exclusions); } public static class SimpleKey implements Key { @@ -2139,9 +2134,15 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(key); } + + @Override + public boolean exists(Set keys, Set exclusions) { + return keys.contains(key) && exclusions.contains(key) == false; + } } public static final class GroupKey extends SimpleKey { + public GroupKey(String key) { super(key); if (key.endsWith(".") == false) { @@ -2151,7 +2152,15 @@ public GroupKey(String key) { @Override public boolean match(String toTest) { - return Regex.simpleMatch(key + "*", toTest); + return toTest != null && toTest.startsWith(key); + } + + @Override + public boolean exists(Set keys, Set exclusions) { + if (exclusions.isEmpty()) { + return keys.stream().anyMatch(this::match); + } + return keys.stream().filter(Predicate.not(exclusions::contains)).anyMatch(this::match); } } @@ -2167,6 +2176,17 @@ public ListKey(String key) { public boolean match(String toTest) { return pattern.matcher(toTest).matches(); } + + @Override + public boolean exists(Set keys, Set exclusions) { + if (keys.contains(key)) { + return exclusions.contains(key) == false; + } + if (exclusions.isEmpty()) { + return keys.stream().anyMatch(this::match); + } + return keys.stream().filter(Predicate.not(exclusions::contains)).anyMatch(this::match); + } } /** @@ -2224,6 +2244,14 @@ public boolean match(String key) { return pattern.matcher(key).matches(); } + @Override + public boolean exists(Set keys, Set exclusions) { + if (exclusions.isEmpty()) { + return keys.stream().anyMatch(this::match); + } + return keys.stream().filter(Predicate.not(exclusions::contains)).anyMatch(this::match); + } + /** * Does this key have a fallback prefix? */ diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 1b3d741a6ea44..13f789a8b5fae 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -552,6 +552,13 @@ public void testGroups() { } } + public void testGroupKeyExists() { + Setting setting = Setting.groupSetting("foo.deprecated.", Property.NodeScope); + + assertFalse(setting.exists(Settings.EMPTY)); + assertTrue(setting.exists(Settings.builder().put("foo.deprecated.1.value", "1").build())); + } + public void testFilteredGroups() { AtomicReference ref = new AtomicReference<>(null); Setting setting = Setting.groupSetting("foo.bar.", Property.Filtered, Property.Dynamic); @@ -659,6 +666,22 @@ public void testCompositeValidator() { } + public void testListKeyExists() { + final Setting> listSetting = Setting.listSetting( + "foo", + Collections.singletonList("bar"), + Function.identity(), + Property.NodeScope + ); + Settings settings = Settings.builder().put("foo", "bar1,bar2").build(); + assertFalse(listSetting.exists(Settings.EMPTY)); + assertTrue(listSetting.exists(settings)); + + settings = Settings.builder().put("foo.0", "foo1").put("foo.1", "foo2").build(); + assertFalse(listSetting.exists(Settings.EMPTY)); + assertTrue(listSetting.exists(settings)); + } + public void testListSettingsDeprecated() { final Setting> deprecatedListSetting = Setting.listSetting( "foo.deprecated", @@ -673,9 +696,19 @@ public void testListSettingsDeprecated() { Function.identity(), Property.NodeScope ); - final Settings settings = Settings.builder() + Settings settings = Settings.builder() .put("foo.deprecated", "foo.deprecated1,foo.deprecated2") - .put("foo.deprecated", "foo.non_deprecated1,foo.non_deprecated2") + .put("foo.non_deprecated", "foo.non_deprecated1,foo.non_deprecated2") + .build(); + deprecatedListSetting.get(settings); + nonDeprecatedListSetting.get(settings); + assertSettingDeprecationsAndWarnings(new Setting[] { deprecatedListSetting }); + + settings = Settings.builder() + .put("foo.deprecated.0", "foo.deprecated1") + .put("foo.deprecated.1", "foo.deprecated2") + .put("foo.non_deprecated.0", "foo.non_deprecated1") + .put("foo.non_deprecated.1", "foo.non_deprecated2") .build(); deprecatedListSetting.get(settings); nonDeprecatedListSetting.get(settings); @@ -881,6 +914,21 @@ public void testAffixKeySetting() { assertFalse(listAffixSetting.match("foo")); } + public void testAffixKeyExists() { + Setting setting = Setting.affixKeySetting("foo.", "enable", (key) -> Setting.boolSetting(key, false, Property.NodeScope)); + + assertFalse(setting.exists(Settings.EMPTY)); + assertTrue(setting.exists(Settings.builder().put("foo.test.enable", "true").build())); + } + + public void testAffixKeyExistsWithSecure() { + Setting setting = Setting.affixKeySetting("foo.", "enable", (key) -> Setting.boolSetting(key, false, Property.NodeScope)); + + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("foo.test.enabled", "true"); + assertFalse(setting.exists(Settings.builder().setSecureSettings(secureSettings).build())); + } + public void testAffixSettingNamespaces() { Setting.AffixSetting setting = Setting.affixKeySetting( "foo.", From 550b5bb76f98ca35fed5a8cb76870f9a4be4d499 Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Wed, 21 Feb 2024 11:29:44 +0100 Subject: [PATCH 28/49] [Connectors API] Unify enum error messages and add more tests (#105569) --- .../connector/ConnectorStatus.java | 2 +- .../connector/ConnectorSyncStatus.java | 4 +-- .../ConfigurationDisplayType.java | 2 +- .../configuration/ConfigurationFieldType.java | 2 +- .../ConfigurationValidationType.java | 2 +- .../connector/filtering/FilteringPolicy.java | 2 +- .../filtering/FilteringRuleCondition.java | 2 +- .../filtering/FilteringValidationState.java | 2 +- .../ConnectorSyncJobTriggerMethod.java | 4 ++- .../syncjob/ConnectorSyncJobType.java | 2 +- .../connector/ConnectorStatusTests.java | 26 ++++++++++++++++ .../connector/ConnectorSyncStatusTests.java | 26 ++++++++++++++++ .../connector/ConnectorTestUtils.java | 16 +++++----- .../ConfigurationDisplayTypeTests.java | 26 ++++++++++++++++ .../ConfigurationFieldTypeTests.java | 27 +++++++++++++++++ .../ConfigurationValidationTypeTests.java | 27 +++++++++++++++++ .../filtering/FilteringPolicyTests.java | 27 +++++++++++++++++ .../FilteringRuleConditionTests.java | 30 +++++++++++++++++++ .../FilteringValidationStateTests.java | 30 +++++++++++++++++++ 19 files changed, 240 insertions(+), 19 deletions(-) create mode 100644 x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorStatusTests.java create mode 100644 x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorSyncStatusTests.java create mode 100644 x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDisplayTypeTests.java create mode 100644 x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationFieldTypeTests.java create mode 100644 x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidationTypeTests.java create mode 100644 x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/filtering/FilteringPolicyTests.java create mode 100644 x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRuleConditionTests.java create mode 100644 x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidationStateTests.java diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStatus.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStatus.java index 5ebbab668890b..b64da63adcd50 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStatus.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStatus.java @@ -37,6 +37,6 @@ public static ConnectorStatus connectorStatus(String status) { return connectorStatus; } } - throw new IllegalArgumentException("Unknown ConnectorStatus: " + status); + throw new IllegalArgumentException("Unknown " + ConnectorStatus.class.getSimpleName() + " [" + status + "]."); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorSyncStatus.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorSyncStatus.java index 30fca79f78876..eedb585e3ad6c 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorSyncStatus.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorSyncStatus.java @@ -37,7 +37,7 @@ public static ConnectorSyncStatus fromString(String syncStatusString) { } } - throw new IllegalArgumentException("Unknown sync status '" + syncStatusString + "'."); + throw new IllegalArgumentException("Unknown " + ConnectorSyncStatus.class.getSimpleName() + " [" + syncStatusString + "]."); } @Override @@ -51,6 +51,6 @@ public static ConnectorSyncStatus connectorSyncStatus(String status) { return connectorSyncStatus; } } - throw new IllegalArgumentException("Unknown ConnectorSyncStatus: " + status); + throw new IllegalArgumentException("Unknown " + ConnectorSyncStatus.class.getSimpleName() + " [" + status + "]."); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDisplayType.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDisplayType.java index df8dee04d61b9..c6c87d18a4939 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDisplayType.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDisplayType.java @@ -28,6 +28,6 @@ public static ConfigurationDisplayType displayType(String type) { return displayType; } } - throw new IllegalArgumentException("Unknown DisplayType: " + type); + throw new IllegalArgumentException("Unknown " + ConfigurationDisplayType.class.getSimpleName() + " [" + type + "]."); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationFieldType.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationFieldType.java index 20162735985c6..2d59f23e7aec8 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationFieldType.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationFieldType.java @@ -30,6 +30,6 @@ public static ConfigurationFieldType fieldType(String type) { return fieldType; } } - throw new IllegalArgumentException("Unknown FieldType: " + type); + throw new IllegalArgumentException("Unknown " + ConfigurationFieldType.class.getSimpleName() + " [" + type + "]."); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidationType.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidationType.java index 7c064014a95ba..182be36a473f7 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidationType.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidationType.java @@ -27,6 +27,6 @@ public static ConfigurationValidationType validationType(String type) { return displayType; } } - throw new IllegalArgumentException("Unknown ValidationType: " + type); + throw new IllegalArgumentException("Unknown " + ConfigurationValidationType.class.getSimpleName() + " [" + type + "]."); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringPolicy.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringPolicy.java index 48170cfc8fae4..a59a7e3fd4831 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringPolicy.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringPolicy.java @@ -24,6 +24,6 @@ public static FilteringPolicy filteringPolicy(String policy) { return filteringPolicy; } } - throw new IllegalArgumentException("Unknown FilteringPolicy: " + policy); + throw new IllegalArgumentException("Unknown " + FilteringPolicy.class.getSimpleName() + " [" + policy + "]."); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRuleCondition.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRuleCondition.java index 967107961b0d4..2d640d58732dd 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRuleCondition.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRuleCondition.java @@ -33,6 +33,6 @@ public static FilteringRuleCondition filteringRuleCondition(String condition) { return filteringRuleCondition; } } - throw new IllegalArgumentException("Unknown FilteringRuleCondition: " + condition); + throw new IllegalArgumentException("Unknown " + FilteringRuleCondition.class.getSimpleName() + " [" + condition + "]."); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidationState.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidationState.java index e2d370e3b9ed8..d033a1189ae00 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidationState.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidationState.java @@ -25,6 +25,6 @@ public static FilteringValidationState filteringValidationState(String validatio return filteringValidationState; } } - throw new IllegalArgumentException("Unknown FilteringValidationState: " + validationState); + throw new IllegalArgumentException("Unknown " + FilteringValidationState.class.getSimpleName() + " [" + validationState + "]."); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTriggerMethod.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTriggerMethod.java index 110748795fb77..890c8db018cdf 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTriggerMethod.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTriggerMethod.java @@ -20,7 +20,9 @@ public static ConnectorSyncJobTriggerMethod fromString(String triggerMethodStrin } } - throw new IllegalArgumentException("Unknown trigger method '" + triggerMethodString + "'."); + throw new IllegalArgumentException( + "Unknown " + ConnectorSyncJobTriggerMethod.class.getSimpleName() + " [" + triggerMethodString + "]." + ); } @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobType.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobType.java index 2d0a18da6fec5..7a6dc22f409cd 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobType.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobType.java @@ -21,7 +21,7 @@ public static ConnectorSyncJobType fromString(String syncJobTypeString) { } } - throw new IllegalArgumentException("Unknown sync job type '" + syncJobTypeString + "'."); + throw new IllegalArgumentException("Unknown " + ConnectorSyncJobType.class.getSimpleName() + " [" + syncJobTypeString + "]."); } @Override diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorStatusTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorStatusTests.java new file mode 100644 index 0000000000000..a08a015158198 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorStatusTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class ConnectorStatusTests extends ESTestCase { + + public void testConnectorStatus_WithValidConnectorStatusString() { + ConnectorStatus connectorStatus = ConnectorTestUtils.getRandomConnectorStatus(); + + assertThat(ConnectorStatus.connectorStatus(connectorStatus.toString()), equalTo(connectorStatus)); + } + + public void testConnectorStatus_WithInvalidConnectorStatusString_ExpectIllegalArgumentException() { + assertThrows(IllegalArgumentException.class, () -> ConnectorStatus.connectorStatus("invalid connector status")); + } + +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorSyncStatusTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorSyncStatusTests.java new file mode 100644 index 0000000000000..ae341eb1f8d2f --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorSyncStatusTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class ConnectorSyncStatusTests extends ESTestCase { + + public void testConnectorSyncStatus_WithValidConnectorSyncStatusString() { + ConnectorSyncStatus connectorSyncStatus = ConnectorTestUtils.getRandomSyncStatus(); + + assertThat(ConnectorSyncStatus.connectorSyncStatus(connectorSyncStatus.toString()), equalTo(connectorSyncStatus)); + } + + public void testConnectorSyncStatus_WithInvalidConnectorSyncStatusString_ExpectIllegalArgumentException() { + assertThrows(IllegalArgumentException.class, () -> ConnectorSyncStatus.connectorSyncStatus("invalid connector sync status")); + } + +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java index 3e17c33834989..6d94cdc3ebe35 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java @@ -152,7 +152,7 @@ public static ConnectorFiltering getRandomConnectorFiltering() { .setId(randomAlphaOfLength(10)) .setOrder(randomInt()) .setPolicy(getRandomFilteringPolicy()) - .setRule(getRandomFilteringRule()) + .setRule(getRandomFilteringRuleCondition()) .setUpdatedAt(currentTimestamp) .setValue(randomAlphaOfLength(10)) .build() @@ -180,7 +180,7 @@ public static ConnectorFiltering getRandomConnectorFiltering() { .setId(randomAlphaOfLength(10)) .setOrder(randomInt()) .setPolicy(getRandomFilteringPolicy()) - .setRule(getRandomFilteringRule()) + .setRule(getRandomFilteringRuleCondition()) .setUpdatedAt(currentTimestamp) .setValue(randomAlphaOfLength(10)) .build() @@ -378,32 +378,32 @@ public static ConnectorStatus getRandomConnectorStatus() { return values[randomInt(values.length - 1)]; } - private static FilteringPolicy getRandomFilteringPolicy() { + public static FilteringPolicy getRandomFilteringPolicy() { FilteringPolicy[] values = FilteringPolicy.values(); return values[randomInt(values.length - 1)]; } - private static FilteringRuleCondition getRandomFilteringRule() { + public static FilteringRuleCondition getRandomFilteringRuleCondition() { FilteringRuleCondition[] values = FilteringRuleCondition.values(); return values[randomInt(values.length - 1)]; } - private static FilteringValidationState getRandomFilteringValidationState() { + public static FilteringValidationState getRandomFilteringValidationState() { FilteringValidationState[] values = FilteringValidationState.values(); return values[randomInt(values.length - 1)]; } - private static ConfigurationDisplayType getRandomConfigurationDisplayType() { + public static ConfigurationDisplayType getRandomConfigurationDisplayType() { ConfigurationDisplayType[] values = ConfigurationDisplayType.values(); return values[randomInt(values.length - 1)]; } - private static ConfigurationFieldType getRandomConfigurationFieldType() { + public static ConfigurationFieldType getRandomConfigurationFieldType() { ConfigurationFieldType[] values = ConfigurationFieldType.values(); return values[randomInt(values.length - 1)]; } - private static ConfigurationValidationType getRandomConfigurationValidationType() { + public static ConfigurationValidationType getRandomConfigurationValidationType() { ConfigurationValidationType[] values = ConfigurationValidationType.values(); return values[randomInt(values.length - 1)]; } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDisplayTypeTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDisplayTypeTests.java new file mode 100644 index 0000000000000..e9fb15ba33082 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDisplayTypeTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.configuration; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.application.connector.ConnectorTestUtils; + +import static org.hamcrest.Matchers.equalTo; + +public class ConfigurationDisplayTypeTests extends ESTestCase { + + public void testDisplayType_WithValidConfigurationDisplayTypeString() { + ConfigurationDisplayType displayType = ConnectorTestUtils.getRandomConfigurationDisplayType(); + + assertThat(ConfigurationDisplayType.displayType(displayType.toString()), equalTo(displayType)); + } + + public void testDisplayType_WithInvalidConfigurationDisplayTypeString_ExpectIllegalArgumentException() { + expectThrows(IllegalArgumentException.class, () -> ConfigurationDisplayType.displayType("invalid configuration display type")); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationFieldTypeTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationFieldTypeTests.java new file mode 100644 index 0000000000000..eeab9e77c586f --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationFieldTypeTests.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.configuration; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.application.connector.ConnectorTestUtils; + +import static org.hamcrest.Matchers.equalTo; + +public class ConfigurationFieldTypeTests extends ESTestCase { + + public void testFieldType_WithValidConfigurationFieldTypeString() { + ConfigurationFieldType fieldType = ConnectorTestUtils.getRandomConfigurationFieldType(); + + assertThat(ConfigurationFieldType.fieldType(fieldType.toString()), equalTo(fieldType)); + } + + public void testFieldType_WithInvalidConfigurationFieldTypeString_ExpectIllegalArgumentException() { + assertThrows(IllegalArgumentException.class, () -> ConfigurationFieldType.fieldType("invalid field type")); + } + +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidationTypeTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidationTypeTests.java new file mode 100644 index 0000000000000..69b845d0c99d8 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationValidationTypeTests.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.configuration; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.application.connector.ConnectorTestUtils; + +import static org.hamcrest.Matchers.equalTo; + +public class ConfigurationValidationTypeTests extends ESTestCase { + + public void testValidationType_WithValidConfigurationValidationTypeString() { + ConfigurationValidationType validationType = ConnectorTestUtils.getRandomConfigurationValidationType(); + + assertThat(ConfigurationValidationType.validationType(validationType.toString()), equalTo(validationType)); + } + + public void testValidationType_WithInvalidConfigurationValidationTypeString_ExpectIllegalArgumentException() { + assertThrows(IllegalArgumentException.class, () -> ConfigurationValidationType.validationType("invalid validation type")); + } + +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/filtering/FilteringPolicyTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/filtering/FilteringPolicyTests.java new file mode 100644 index 0000000000000..4bf53661caf95 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/filtering/FilteringPolicyTests.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.filtering; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.application.connector.ConnectorTestUtils; + +import static org.hamcrest.Matchers.equalTo; + +public class FilteringPolicyTests extends ESTestCase { + + public void testFilteringPolicy_WithValidFilteringPolicyString() { + FilteringPolicy filteringPolicy = ConnectorTestUtils.getRandomFilteringPolicy(); + + assertThat(FilteringPolicy.filteringPolicy(filteringPolicy.toString()), equalTo(filteringPolicy)); + } + + public void testFilteringPolicy_WithInvalidFilteringPolicyString_ExpectIllegalArgumentException() { + assertThrows(IllegalArgumentException.class, () -> FilteringPolicy.filteringPolicy("invalid filtering policy")); + } + +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRuleConditionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRuleConditionTests.java new file mode 100644 index 0000000000000..8d8ffaf4fe02c --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRuleConditionTests.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.filtering; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.application.connector.ConnectorTestUtils; + +import static org.hamcrest.Matchers.equalTo; + +public class FilteringRuleConditionTests extends ESTestCase { + + public void testFilteringRuleCondition_WithValidFilteringRuleConditionString() { + FilteringRuleCondition ruleCondition = ConnectorTestUtils.getRandomFilteringRuleCondition(); + + assertThat(FilteringRuleCondition.filteringRuleCondition(ruleCondition.toString()), equalTo(ruleCondition)); + } + + public void testFilteringRuleCondition_WithInvalidFilteringRuleConditionString_ExpectIllegalArgumentException() { + assertThrows( + IllegalArgumentException.class, + () -> FilteringRuleCondition.filteringRuleCondition("invalid filtering rule condition") + ); + } + +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidationStateTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidationStateTests.java new file mode 100644 index 0000000000000..67cd86b2b8aef --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidationStateTests.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.filtering; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.application.connector.ConnectorTestUtils; + +import static org.hamcrest.Matchers.equalTo; + +public class FilteringValidationStateTests extends ESTestCase { + + public void testFilteringValidationState_WithValidFilteringValidationStateString() { + FilteringValidationState validationState = ConnectorTestUtils.getRandomFilteringValidationState(); + + assertThat(FilteringValidationState.filteringValidationState(validationState.toString()), equalTo(validationState)); + } + + public void testFilteringValidationState_WithInvalidFilteringValidationStateString_ExpectIllegalArgumentException() { + assertThrows( + IllegalArgumentException.class, + () -> FilteringValidationState.filteringValidationState("invalid filtering validation state") + ); + } + +} From 5d4bb6ef46523b5c0e914a2157ec30db23bb3b25 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 21 Feb 2024 10:36:26 +0000 Subject: [PATCH 29/49] Slight readability improvement in EsAbortPolicy (#105680) Reviewing #105666 I was tripped up by how the check for force execution is split into two nested `if` statements, the first of which now has another condition. By grouping the conditions like this it makes it look like `AbstractRunnable` is somehow not special on a shut-down executor. It is still special, but that specialness is implemented elsewhere in `EsThreadPoolExecutor#execute`. This commit regroups the conditions and extracts a method to limit the scope of the `AbstractRunnable`. --- .../common/util/concurrent/EsAbortPolicy.java | 30 ++++++++++--------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java index 5dbacbb16aeea..0f77326967ebb 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java @@ -14,24 +14,26 @@ public class EsAbortPolicy extends EsRejectedExecutionHandler { @Override public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { - if (executor.isShutdown() == false && r instanceof AbstractRunnable abstractRunnable) { - if (abstractRunnable.isForceExecution()) { - if (executor.getQueue() instanceof SizeBlockingQueue sizeBlockingQueue) { - try { - sizeBlockingQueue.forcePut(r); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new IllegalStateException("forced execution, but got interrupted", e); - } - if ((executor.isShutdown() && sizeBlockingQueue.remove(r)) == false) { - return; - } // else fall through and reject the task since the executor is shut down - } else { - throw new IllegalStateException("expected but did not find SizeBlockingQueue: " + executor); + if (executor.isShutdown() == false && isForceExecution(r)) { + if (executor.getQueue() instanceof SizeBlockingQueue sizeBlockingQueue) { + try { + sizeBlockingQueue.forcePut(r); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException("forced execution, but got interrupted", e); } + if ((executor.isShutdown() && sizeBlockingQueue.remove(r)) == false) { + return; + } // else fall through and reject the task since the executor is shut down + } else { + throw new IllegalStateException("expected but did not find SizeBlockingQueue: " + executor); } } incrementRejections(); throw newRejectedException(r, executor, executor.isShutdown()); } + + private static boolean isForceExecution(Runnable r) { + return r instanceof AbstractRunnable abstractRunnable && abstractRunnable.isForceExecution(); + } } From 48ceed06616eaaa0bd999ae845bdf63733a5ccec Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Wed, 21 Feb 2024 11:54:43 +0100 Subject: [PATCH 30/49] Additional roles and privileges APIs customization (#105503) This PR folds together the following: * Support fetching native-only roles, i.e., excluding reserved roles for the Get Roles API * Switch the Delete Roles API to public protection scope * Support injecting a response translator for the Get Builtin Privileges API Depends on: https://github.com/elastic/elasticsearch/pull/105336 Relates: ES-7826, ES-7828, ES-7845 --- .../GetBuiltinPrivilegesRequest.java | 7 -- .../GetBuiltinPrivilegesResponse.java | 28 +++--- ...etBuiltinPrivilegesResponseTranslator.java | 20 +++++ .../security/action/role/GetRolesRequest.java | 18 ++-- .../action/role/GetRolesRequestBuilder.java | 5 ++ .../action/role/GetRolesResponse.java | 18 +--- .../GetBuiltinPrivilegesResponseTests.java | 32 ------- .../xpack/security/Security.java | 46 +++++----- .../TransportGetBuiltinPrivilegesAction.java | 15 +--- .../action/role/TransportGetRolesAction.java | 54 +++++++---- .../RestGetBuiltinPrivilegesAction.java | 49 ++++++++-- .../action/role/RestDeleteRoleAction.java | 2 +- .../rest/action/role/RestGetRolesAction.java | 65 +++++++++----- .../role/TransportGetRolesActionTests.java | 89 +++++++++++++++++++ 14 files changed, 285 insertions(+), 163 deletions(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponseTranslator.java delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponseTests.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesRequest.java index 1fdf8ee35d1b6..bbcd2bbe255ce 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesRequest.java @@ -8,19 +8,12 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; /** * Request to retrieve built-in (cluster/index) privileges. */ public final class GetBuiltinPrivilegesRequest extends ActionRequest { - public GetBuiltinPrivilegesRequest(StreamInput in) throws IOException { - super(in); - } - public GetBuiltinPrivilegesRequest() {} @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponse.java index d4d99d0b25b7d..328089a73b2f5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponse.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.core.security.action.privilege; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -17,32 +17,25 @@ import java.util.Objects; /** - * Response containing one or more application privileges retrieved from the security index + * Response containing built-in (cluster/index) privileges */ public final class GetBuiltinPrivilegesResponse extends ActionResponse { - private String[] clusterPrivileges; - private String[] indexPrivileges; - - public GetBuiltinPrivilegesResponse(String[] clusterPrivileges, String[] indexPrivileges) { - this.clusterPrivileges = Objects.requireNonNull(clusterPrivileges, "Cluster privileges cannot be null"); - this.indexPrivileges = Objects.requireNonNull(indexPrivileges, "Index privileges cannot be null"); - } + private final String[] clusterPrivileges; + private final String[] indexPrivileges; public GetBuiltinPrivilegesResponse(Collection clusterPrivileges, Collection indexPrivileges) { - this(clusterPrivileges.toArray(Strings.EMPTY_ARRAY), indexPrivileges.toArray(Strings.EMPTY_ARRAY)); + this.clusterPrivileges = Objects.requireNonNull( + clusterPrivileges.toArray(Strings.EMPTY_ARRAY), + "Cluster privileges cannot be null" + ); + this.indexPrivileges = Objects.requireNonNull(indexPrivileges.toArray(Strings.EMPTY_ARRAY), "Index privileges cannot be null"); } public GetBuiltinPrivilegesResponse() { this(Collections.emptySet(), Collections.emptySet()); } - public GetBuiltinPrivilegesResponse(StreamInput in) throws IOException { - super(in); - this.clusterPrivileges = in.readStringArray(); - this.indexPrivileges = in.readStringArray(); - } - public String[] getClusterPrivileges() { return clusterPrivileges; } @@ -53,7 +46,6 @@ public String[] getIndexPrivileges() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeStringArray(clusterPrivileges); - out.writeStringArray(indexPrivileges); + TransportAction.localOnly(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponseTranslator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponseTranslator.java new file mode 100644 index 0000000000000..2d018ae2f1b2f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponseTranslator.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.privilege; + +public interface GetBuiltinPrivilegesResponseTranslator { + + GetBuiltinPrivilegesResponse translate(GetBuiltinPrivilegesResponse response, boolean restrictResponse); + + class Default implements GetBuiltinPrivilegesResponseTranslator { + public GetBuiltinPrivilegesResponse translate(GetBuiltinPrivilegesResponse response, boolean restrictResponse) { + assert false == restrictResponse; + return response; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequest.java index f5239f18c256a..310bd6c707796 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequest.java @@ -9,12 +9,12 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.action.support.TransportAction.localOnly; /** * Request to retrieve roles from the security index @@ -23,10 +23,7 @@ public class GetRolesRequest extends ActionRequest { private String[] names = Strings.EMPTY_ARRAY; - public GetRolesRequest(StreamInput in) throws IOException { - super(in); - names = in.readStringArray(); - } + private boolean nativeOnly = false; public GetRolesRequest() {} @@ -47,9 +44,16 @@ public String[] names() { return names; } + public void nativeOnly(boolean nativeOnly) { + this.nativeOnly = nativeOnly; + } + + public boolean nativeOnly() { + return this.nativeOnly; + } + @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(names); + localOnly(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequestBuilder.java index 693a497d05087..bd3b5784e5ba0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequestBuilder.java @@ -22,4 +22,9 @@ public GetRolesRequestBuilder names(String... names) { request.names(names); return this; } + + public GetRolesRequestBuilder nativeOnly(boolean nativeOnly) { + request.nativeOnly(nativeOnly); + return this; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesResponse.java index 86e74952c5956..e00c85749ca76 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesResponse.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.security.action.role; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; @@ -18,16 +18,7 @@ */ public class GetRolesResponse extends ActionResponse { - private RoleDescriptor[] roles; - - public GetRolesResponse(StreamInput in) throws IOException { - super(in); - int size = in.readVInt(); - roles = new RoleDescriptor[size]; - for (int i = 0; i < size; i++) { - roles[i] = new RoleDescriptor(in); - } - } + private final RoleDescriptor[] roles; public GetRolesResponse(RoleDescriptor... roles) { this.roles = roles; @@ -43,9 +34,6 @@ public boolean hasRoles() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(roles.length); - for (RoleDescriptor role : roles) { - role.writeTo(out); - } + TransportAction.localOnly(); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponseTests.java deleted file mode 100644 index c8d14a4d71db1..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/GetBuiltinPrivilegesResponseTests.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.core.security.action.privilege; - -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.test.ESTestCase; -import org.hamcrest.Matchers; - -import java.io.IOException; - -public class GetBuiltinPrivilegesResponseTests extends ESTestCase { - - public void testSerialization() throws IOException { - final String[] cluster = generateRandomStringArray(8, randomIntBetween(3, 8), false, true); - final String[] index = generateRandomStringArray(8, randomIntBetween(3, 8), false, true); - final GetBuiltinPrivilegesResponse original = new GetBuiltinPrivilegesResponse(cluster, index); - - final BytesStreamOutput out = new BytesStreamOutput(); - original.writeTo(out); - - final GetBuiltinPrivilegesResponse copy = new GetBuiltinPrivilegesResponse(out.bytes().streamInput()); - - assertThat(copy.getClusterPrivileges(), Matchers.equalTo(cluster)); - assertThat(copy.getIndexPrivileges(), Matchers.equalTo(index)); - } - -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 763eb2616175c..3beff69849a58 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -135,6 +135,7 @@ import org.elasticsearch.xpack.core.security.action.privilege.ClearPrivilegesCacheAction; import org.elasticsearch.xpack.core.security.action.privilege.DeletePrivilegesAction; import org.elasticsearch.xpack.core.security.action.privilege.GetBuiltinPrivilegesAction; +import org.elasticsearch.xpack.core.security.action.privilege.GetBuiltinPrivilegesResponseTranslator; import org.elasticsearch.xpack.core.security.action.privilege.GetPrivilegesAction; import org.elasticsearch.xpack.core.security.action.privilege.PutPrivilegesAction; import org.elasticsearch.xpack.core.security.action.profile.ActivateProfileAction; @@ -560,6 +561,7 @@ public class Security extends Plugin private final SetOnce scriptServiceReference = new SetOnce<>(); private final SetOnce operatorOnlyRegistry = new SetOnce<>(); private final SetOnce putRoleRequestBuilderFactory = new SetOnce<>(); + private final SetOnce getBuiltinPrivilegesResponseTranslator = new SetOnce<>(); private final SetOnce fileRolesStore = new SetOnce<>(); private final SetOnce operatorPrivilegesService = new SetOnce<>(); private final SetOnce reservedRoleMappingAction = new SetOnce<>(); @@ -820,6 +822,10 @@ Collection createComponents( putRoleRequestBuilderFactory.set(new PutRoleRequestBuilderFactory.Default()); } + if (getBuiltinPrivilegesResponseTranslator.get() == null) { + getBuiltinPrivilegesResponseTranslator.set(new GetBuiltinPrivilegesResponseTranslator.Default()); + } + final Map, ActionListener>>> customRoleProviders = new LinkedHashMap<>(); for (SecurityExtension extension : securityExtensions) { final List, ActionListener>> providers = extension.getRolesProviders( @@ -1446,7 +1452,7 @@ public List getRestHandlers( new RestOpenIdConnectPrepareAuthenticationAction(settings, getLicenseState()), new RestOpenIdConnectAuthenticateAction(settings, getLicenseState()), new RestOpenIdConnectLogoutAction(settings, getLicenseState()), - new RestGetBuiltinPrivilegesAction(settings, getLicenseState()), + new RestGetBuiltinPrivilegesAction(settings, getLicenseState(), getBuiltinPrivilegesResponseTranslator.get()), new RestGetPrivilegesAction(settings, getLicenseState()), new RestPutPrivilegesAction(settings, getLicenseState()), new RestDeletePrivilegesAction(settings, getLicenseState()), @@ -2030,33 +2036,21 @@ public void accept(DiscoveryNode node, ClusterState state) { @Override public void loadExtensions(ExtensionLoader loader) { securityExtensions.addAll(loader.loadExtensions(SecurityExtension.class)); + loadSingletonExtensionAndSetOnce(loader, operatorOnlyRegistry, OperatorOnlyRegistry.class); + loadSingletonExtensionAndSetOnce(loader, putRoleRequestBuilderFactory, PutRoleRequestBuilderFactory.class); + loadSingletonExtensionAndSetOnce(loader, getBuiltinPrivilegesResponseTranslator, GetBuiltinPrivilegesResponseTranslator.class); + } - // operator registry SPI - List operatorOnlyRegistries = loader.loadExtensions(OperatorOnlyRegistry.class); - if (operatorOnlyRegistries.size() > 1) { - throw new IllegalStateException(OperatorOnlyRegistry.class + " may not have multiple implementations"); - } else if (operatorOnlyRegistries.size() == 1) { - OperatorOnlyRegistry operatorOnlyRegistry = operatorOnlyRegistries.get(0); - this.operatorOnlyRegistry.set(operatorOnlyRegistry); - logger.debug( - "Loaded implementation [{}] for interface OperatorOnlyRegistry", - operatorOnlyRegistry.getClass().getCanonicalName() - ); - } - - List builderFactories = loader.loadExtensions(PutRoleRequestBuilderFactory.class); - if (builderFactories.size() > 1) { - throw new IllegalStateException(PutRoleRequestBuilderFactory.class + " may not have multiple implementations"); - } else if (builderFactories.size() == 1) { - PutRoleRequestBuilderFactory builderFactory = builderFactories.get(0); - this.putRoleRequestBuilderFactory.set(builderFactory); - logger.debug( - "Loaded implementation [{}] for interface [{}]", - builderFactory.getClass().getCanonicalName(), - PutRoleRequestBuilderFactory.class - ); + private void loadSingletonExtensionAndSetOnce(ExtensionLoader loader, SetOnce setOnce, Class clazz) { + final List loaded = loader.loadExtensions(clazz); + if (loaded.size() > 1) { + throw new IllegalStateException(clazz + " may not have multiple implementations"); + } else if (loaded.size() == 1) { + final T singleLoaded = loaded.get(0); + setOnce.set(singleLoaded); + logger.debug("Loaded implementation [{}] for interface [{}]", singleLoaded.getClass().getCanonicalName(), clazz); } else { - logger.debug("Will fall back on default implementation for interface [{}]", PutRoleRequestBuilderFactory.class); + logger.debug("Will fall back on default implementation for interface [{}]", clazz); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportGetBuiltinPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportGetBuiltinPrivilegesAction.java index 6494c5b7c9230..8ea8ec3e0dcd9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportGetBuiltinPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportGetBuiltinPrivilegesAction.java @@ -8,9 +8,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.privilege.GetBuiltinPrivilegesAction; @@ -22,19 +21,13 @@ import java.util.TreeSet; /** - * Transport action to retrieve one or more application privileges from the security index + * Transport action to retrieve built-in (cluster/index) privileges */ -public class TransportGetBuiltinPrivilegesAction extends HandledTransportAction { +public class TransportGetBuiltinPrivilegesAction extends TransportAction { @Inject public TransportGetBuiltinPrivilegesAction(ActionFilters actionFilters, TransportService transportService) { - super( - GetBuiltinPrivilegesAction.NAME, - transportService, - actionFilters, - GetBuiltinPrivilegesRequest::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(GetBuiltinPrivilegesAction.NAME, actionFilters, transportService.getTaskManager()); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesAction.java index 3d63364f85664..eadae3bfc0baf 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesAction.java @@ -8,9 +8,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.role.GetRolesAction; @@ -21,11 +20,14 @@ import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.stream.Collectors; -public class TransportGetRolesAction extends HandledTransportAction { +public class TransportGetRolesAction extends TransportAction { private final NativeRolesStore nativeRolesStore; private final ReservedRolesStore reservedRolesStore; @@ -37,7 +39,7 @@ public TransportGetRolesAction( TransportService transportService, ReservedRolesStore reservedRolesStore ) { - super(GetRolesAction.NAME, transportService, actionFilters, GetRolesRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + super(GetRolesAction.NAME, actionFilters, transportService.getTaskManager()); this.nativeRolesStore = nativeRolesStore; this.reservedRolesStore = reservedRolesStore; } @@ -46,15 +48,23 @@ public TransportGetRolesAction( protected void doExecute(Task task, final GetRolesRequest request, final ActionListener listener) { final String[] requestedRoles = request.names(); final boolean specificRolesRequested = requestedRoles != null && requestedRoles.length > 0; - final Set rolesToSearchFor = new HashSet<>(); - final List roles = new ArrayList<>(); + if (request.nativeOnly()) { + final Set rolesToSearchFor = specificRolesRequested + ? Arrays.stream(requestedRoles).collect(Collectors.toSet()) + : Collections.emptySet(); + getNativeRoles(rolesToSearchFor, listener); + return; + } + + final Set rolesToSearchFor = new HashSet<>(); + final List reservedRoles = new ArrayList<>(); if (specificRolesRequested) { for (String role : requestedRoles) { if (ReservedRolesStore.isReserved(role)) { RoleDescriptor rd = ReservedRolesStore.roleDescriptor(role); if (rd != null) { - roles.add(rd); + reservedRoles.add(rd); } else { listener.onFailure(new IllegalStateException("unable to obtain reserved role [" + role + "]")); return; @@ -64,21 +74,29 @@ protected void doExecute(Task task, final GetRolesRequest request, final ActionL } } } else { - roles.addAll(ReservedRolesStore.roleDescriptors()); + reservedRoles.addAll(ReservedRolesStore.roleDescriptors()); } if (specificRolesRequested && rolesToSearchFor.isEmpty()) { - // specific roles were requested but they were built in only, no need to hit the store - listener.onResponse(new GetRolesResponse(roles.toArray(new RoleDescriptor[roles.size()]))); + // specific roles were requested, but they were built in only, no need to hit the store + listener.onResponse(new GetRolesResponse(reservedRoles.toArray(new RoleDescriptor[0]))); } else { - nativeRolesStore.getRoleDescriptors(rolesToSearchFor, ActionListener.wrap((retrievalResult) -> { - if (retrievalResult.isSuccess()) { - roles.addAll(retrievalResult.getDescriptors()); - listener.onResponse(new GetRolesResponse(roles.toArray(new RoleDescriptor[roles.size()]))); - } else { - listener.onFailure(retrievalResult.getFailure()); - } - }, listener::onFailure)); + getNativeRoles(rolesToSearchFor, reservedRoles, listener); } } + + private void getNativeRoles(Set rolesToSearchFor, ActionListener listener) { + getNativeRoles(rolesToSearchFor, new ArrayList<>(), listener); + } + + private void getNativeRoles(Set rolesToSearchFor, List foundRoles, ActionListener listener) { + nativeRolesStore.getRoleDescriptors(rolesToSearchFor, ActionListener.wrap((retrievalResult) -> { + if (retrievalResult.isSuccess()) { + foundRoles.addAll(retrievalResult.getDescriptors()); + listener.onResponse(new GetRolesResponse(foundRoles.toArray(new RoleDescriptor[0]))); + } else { + listener.onFailure(retrievalResult.getFailure()); + } + }, listener::onFailure)); + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestGetBuiltinPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestGetBuiltinPrivilegesAction.java index fe3b5cab38444..334e560312db1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestGetBuiltinPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestGetBuiltinPrivilegesAction.java @@ -6,7 +6,11 @@ */ package org.elasticsearch.xpack.security.rest.action.privilege; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; @@ -19,6 +23,8 @@ import org.elasticsearch.xpack.core.security.action.privilege.GetBuiltinPrivilegesAction; import org.elasticsearch.xpack.core.security.action.privilege.GetBuiltinPrivilegesRequest; import org.elasticsearch.xpack.core.security.action.privilege.GetBuiltinPrivilegesResponse; +import org.elasticsearch.xpack.core.security.action.privilege.GetBuiltinPrivilegesResponseTranslator; +import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; @@ -27,13 +33,21 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; /** - * Rest action to retrieve an application privilege from the security index + * Rest action to retrieve built-in (cluster/index) privileges */ -@ServerlessScope(Scope.INTERNAL) +@ServerlessScope(Scope.PUBLIC) public class RestGetBuiltinPrivilegesAction extends SecurityBaseRestHandler { - public RestGetBuiltinPrivilegesAction(Settings settings, XPackLicenseState licenseState) { + private static final Logger logger = LogManager.getLogger(RestGetBuiltinPrivilegesAction.class); + private final GetBuiltinPrivilegesResponseTranslator responseTranslator; + + public RestGetBuiltinPrivilegesAction( + Settings settings, + XPackLicenseState licenseState, + GetBuiltinPrivilegesResponseTranslator responseTranslator + ) { super(settings, licenseState); + this.responseTranslator = responseTranslator; } @Override @@ -48,15 +62,17 @@ public String getName() { @Override public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + final boolean restrictResponse = request.hasParam(RestRequest.PATH_RESTRICTED); return channel -> client.execute( GetBuiltinPrivilegesAction.INSTANCE, new GetBuiltinPrivilegesRequest(), new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(GetBuiltinPrivilegesResponse response, XContentBuilder builder) throws Exception { + final var translatedResponse = responseTranslator.translate(response, restrictResponse); builder.startObject(); - builder.array("cluster", response.getClusterPrivileges()); - builder.array("index", response.getIndexPrivileges()); + builder.array("cluster", translatedResponse.getClusterPrivileges()); + builder.array("index", translatedResponse.getIndexPrivileges()); builder.endObject(); return new RestResponse(RestStatus.OK, builder); } @@ -64,4 +80,27 @@ public RestResponse buildResponse(GetBuiltinPrivilegesResponse response, XConten ); } + @Override + protected Exception innerCheckFeatureAvailable(RestRequest request) { + final boolean restrictPath = request.hasParam(RestRequest.PATH_RESTRICTED); + assert false == restrictPath || DiscoveryNode.isStateless(settings); + if (false == restrictPath) { + return super.innerCheckFeatureAvailable(request); + } + // This is a temporary hack: we are re-using the native roles setting as an overall feature flag for custom roles. + final Boolean nativeRolesEnabled = settings.getAsBoolean(NativeRolesStore.NATIVE_ROLES_ENABLED, true); + if (nativeRolesEnabled == false) { + logger.debug( + "Attempt to call [{} {}] but [{}] is [{}]", + request.method(), + request.rawPath(), + NativeRolesStore.NATIVE_ROLES_ENABLED, + settings.get(NativeRolesStore.NATIVE_ROLES_ENABLED) + ); + return new ElasticsearchStatusException("This API is not enabled on this Elasticsearch instance", RestStatus.GONE); + } else { + return null; + } + } + } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestDeleteRoleAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestDeleteRoleAction.java index 88f53c999dfb9..cf5e4d12e7b37 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestDeleteRoleAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestDeleteRoleAction.java @@ -28,7 +28,7 @@ /** * Rest endpoint to delete a Role from the security index */ -@ServerlessScope(Scope.INTERNAL) +@ServerlessScope(Scope.PUBLIC) public class RestDeleteRoleAction extends NativeRoleBaseRestHandler { public RestDeleteRoleAction(Settings settings, XPackLicenseState licenseState) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java index 4b2660658a38f..232d74d16725d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.rest.action.role; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.RestApiVersion; @@ -21,7 +22,6 @@ import org.elasticsearch.xpack.core.security.action.role.GetRolesRequestBuilder; import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; -import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; import java.util.List; @@ -30,12 +30,9 @@ /** * Rest endpoint to retrieve a Role from the security index - * - * Note: This class does not extend {@link NativeRoleBaseRestHandler} because it handles both reserved roles and native - * roles, and should still be available even if native role management is disabled. */ -@ServerlessScope(Scope.INTERNAL) -public class RestGetRolesAction extends SecurityBaseRestHandler { +@ServerlessScope(Scope.PUBLIC) +public class RestGetRolesAction extends NativeRoleBaseRestHandler { public RestGetRolesAction(Settings settings, XPackLicenseState licenseState) { super(settings, licenseState); @@ -57,25 +54,47 @@ public String getName() { @Override public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { final String[] roles = request.paramAsStringArray("name", Strings.EMPTY_ARRAY); - return channel -> new GetRolesRequestBuilder(client).names(roles).execute(new RestBuilderListener<>(channel) { - @Override - public RestResponse buildResponse(GetRolesResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - for (RoleDescriptor role : response.roles()) { - builder.field(role.getName(), role); - } - builder.endObject(); + final boolean restrictRequest = isPathRestricted(request); + return channel -> new GetRolesRequestBuilder(client).names(roles) + .nativeOnly(restrictRequest) + .execute(new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(GetRolesResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + for (RoleDescriptor role : response.roles()) { + builder.field(role.getName(), role); + } + builder.endObject(); + + // if the user asked for specific roles, but none of them were found + // we'll return an empty result and 404 status code + if (roles.length != 0 && response.roles().length == 0) { + return new RestResponse(RestStatus.NOT_FOUND, builder); + } - // if the user asked for specific roles, but none of them were found - // we'll return an empty result and 404 status code - if (roles.length != 0 && response.roles().length == 0) { - return new RestResponse(RestStatus.NOT_FOUND, builder); + // either the user asked for all roles, or at least one of the roles + // the user asked for was found + return new RestResponse(RestStatus.OK, builder); } + }); + } + + @Override + protected Exception innerCheckFeatureAvailable(RestRequest request) { + // Note: For non-restricted requests this action handles both reserved roles and native + // roles, and should still be available even if native role management is disabled. + // For restricted requests it should only be available if native role management is enabled + final boolean restrictPath = isPathRestricted(request); + if (false == restrictPath) { + return null; + } else { + return super.innerCheckFeatureAvailable(request); + } + } - // either the user asked for all roles, or at least one of the roles - // the user asked for was found - return new RestResponse(RestStatus.OK, builder); - } - }); + private boolean isPathRestricted(RestRequest request) { + final boolean restrictRequest = request.hasParam(RestRequest.PATH_RESTRICTED); + assert false == restrictRequest || DiscoveryNode.isStateless(settings); + return restrictRequest; } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java index 2b7125a411d61..0348ff6df90b2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java @@ -276,6 +276,95 @@ public void onFailure(Exception e) { } } + public void testGetWithNativeOnly() { + final boolean all = randomBoolean(); + final List storeRoleDescriptors = randomRoleDescriptors(); + final List storeNames = storeRoleDescriptors.stream().map(RoleDescriptor::getName).collect(Collectors.toList()); + + final List requestedNames = new ArrayList<>(); + final List requestedStoreNames = new ArrayList<>(); + if (all == false) { + // Add some reserved roles; we don't expect these to be returned by the native role store + requestedNames.addAll(randomSubsetOf(randomIntBetween(1, ReservedRolesStore.names().size()), ReservedRolesStore.names())); + requestedStoreNames.addAll(randomSubsetOf(randomIntBetween(1, storeNames.size()), storeNames)); + requestedNames.addAll(requestedStoreNames); + } + + final NativeRolesStore rolesStore = mockNativeRolesStore(requestedNames, storeRoleDescriptors); + + final TransportService transportService = new TransportService( + Settings.EMPTY, + mock(Transport.class), + mock(ThreadPool.class), + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, + null, + Collections.emptySet() + ); + final TransportGetRolesAction action = new TransportGetRolesAction( + mock(ActionFilters.class), + rolesStore, + transportService, + new ReservedRolesStore() + ); + + final GetRolesRequest request = new GetRolesRequest(); + request.names(requestedNames.toArray(Strings.EMPTY_ARRAY)); + request.nativeOnly(true); + + final List actualRoleNames = doExecuteSuccessfully(action, request); + if (all) { + assertThat(actualRoleNames, containsInAnyOrder(storeNames.toArray(Strings.EMPTY_ARRAY))); + verify(rolesStore, times(1)).getRoleDescriptors(eq(new HashSet<>()), anyActionListener()); + } else { + assertThat(actualRoleNames, containsInAnyOrder(requestedStoreNames.toArray(Strings.EMPTY_ARRAY))); + verify(rolesStore, times(1)).getRoleDescriptors(eq(new HashSet<>(requestedNames)), anyActionListener()); + } + } + + private List doExecuteSuccessfully(TransportGetRolesAction action, GetRolesRequest request) { + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(mock(Task.class), request, new ActionListener<>() { + @Override + public void onResponse(GetRolesResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(throwableRef.get(), is(nullValue())); + assertThat(responseRef.get(), is(notNullValue())); + return Arrays.stream(responseRef.get().roles()).map(RoleDescriptor::getName).collect(Collectors.toList()); + } + + private NativeRolesStore mockNativeRolesStore(List expectedStoreNames, List storeRoleDescriptors) { + NativeRolesStore rolesStore = mock(NativeRolesStore.class); + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 2; + @SuppressWarnings("unchecked") + Set requestedNames = (Set) args[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) args[1]; + if (requestedNames.size() == 0) { + listener.onResponse(RoleRetrievalResult.success(new HashSet<>(storeRoleDescriptors))); + } else { + listener.onResponse( + RoleRetrievalResult.success( + storeRoleDescriptors.stream().filter(r -> requestedNames.contains(r.getName())).collect(Collectors.toSet()) + ) + ); + } + return null; + }).when(rolesStore).getRoleDescriptors(eq(new HashSet<>(expectedStoreNames)), anyActionListener()); + return rolesStore; + } + public void testException() { final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException()); final List storeRoleDescriptors = randomRoleDescriptors(); From d4263c2d4eaa2049a0364039ca350ea1af7b1d3d Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 21 Feb 2024 12:00:37 +0000 Subject: [PATCH 31/49] Accept `SocketException` in `Netty4HttpClient` (#105690) It's also possible to get a `Connection reset` if the server closes the channel while we're still sending requests. This commit handles that case in these tests. --- .../java/org/elasticsearch/http/netty4/Netty4HttpClient.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java index d6ee096b8dfd8..56ba3ae1958f7 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java @@ -40,6 +40,7 @@ import java.io.Closeable; import java.net.SocketAddress; +import java.net.SocketException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; @@ -190,7 +191,7 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - if (cause instanceof PrematureChannelClosureException) { + if (cause instanceof PrematureChannelClosureException || cause instanceof SocketException) { // no more requests coming, so fast-forward the latch fastForward(); } else { From 9c72157bb7b55e4ba2b7411f325a8b3d214a7370 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Wed, 21 Feb 2024 13:50:29 +0100 Subject: [PATCH 32/49] Add dense vector inference mock service for testing (#105655) --- .../inference/InferenceBaseRestTest.java | 39 ++- .../xpack/inference/InferenceCrudIT.java | 12 +- .../MockDenseInferenceServiceIT.java | 65 +++++ ...java => MockSparseInferenceServiceIT.java} | 12 +- .../mock/AbstractTestInferenceService.java | 206 ++++++++++++++++ .../TestDenseInferenceServiceExtension.java | 224 ++++++++++++++++++ .../mock/TestInferenceServicePlugin.java | 23 +- ... TestSparseInferenceServiceExtension.java} | 184 +------------- ...search.inference.InferenceServiceExtension | 3 +- 9 files changed, 558 insertions(+), 210 deletions(-) create mode 100644 x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockDenseInferenceServiceIT.java rename x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/{MockInferenceServiceIT.java => MockSparseInferenceServiceIT.java} (88%) create mode 100644 x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/AbstractTestInferenceService.java create mode 100644 x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java rename x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/{TestInferenceServiceExtension.java => TestSparseInferenceServiceExtension.java} (56%) diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java index 11a5bdf045f21..a9096f9059c5b 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java @@ -50,11 +50,11 @@ protected Settings restClientSettings() { return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } - static String mockServiceModelConfig() { - return mockServiceModelConfig(null); + static String mockSparseServiceModelConfig() { + return mockSparseServiceModelConfig(null); } - static String mockServiceModelConfig(@Nullable TaskType taskTypeInBody) { + static String mockSparseServiceModelConfig(@Nullable TaskType taskTypeInBody) { var taskType = taskTypeInBody == null ? "" : "\"task_type\": \"" + taskTypeInBody + "\","; return Strings.format(""" { @@ -72,7 +72,7 @@ static String mockServiceModelConfig(@Nullable TaskType taskTypeInBody) { """, taskType); } - static String mockServiceModelConfig(@Nullable TaskType taskTypeInBody, boolean shouldReturnHiddenField) { + static String mockSparseServiceModelConfig(@Nullable TaskType taskTypeInBody, boolean shouldReturnHiddenField) { var taskType = taskTypeInBody == null ? "" : "\"task_type\": \"" + taskTypeInBody + "\","; return Strings.format(""" { @@ -91,6 +91,22 @@ static String mockServiceModelConfig(@Nullable TaskType taskTypeInBody, boolean """, taskType, shouldReturnHiddenField); } + static String mockDenseServiceModelConfig() { + return """ + { + "task_type": "text_embedding", + "service": "text_embedding_test_service", + "service_settings": { + "model": "my_dense_vector_model", + "api_key": "abc64", + "dimensions": 246 + }, + "task_settings": { + } + } + """; + } + protected void deleteModel(String modelId) throws IOException { var request = new Request("DELETE", "_inference/" + modelId); var response = client().performRequest(request); @@ -200,11 +216,16 @@ private Map inferOnMockServiceInternal(String endpoint, List resultMap, int expectedNumberOfResults, TaskType taskType) { - if (taskType == TaskType.SPARSE_EMBEDDING) { - var results = (List>) resultMap.get(TaskType.SPARSE_EMBEDDING.toString()); - assertThat(results, hasSize(expectedNumberOfResults)); - } else { - fail("test with task type [" + taskType + "] are not supported yet"); + switch (taskType) { + case SPARSE_EMBEDDING -> { + var results = (List>) resultMap.get(TaskType.SPARSE_EMBEDDING.toString()); + assertThat(results, hasSize(expectedNumberOfResults)); + } + case TEXT_EMBEDDING -> { + var results = (List>) resultMap.get(TaskType.TEXT_EMBEDDING.toString()); + assertThat(results, hasSize(expectedNumberOfResults)); + } + default -> fail("test with task type [" + taskType + "] are not supported yet"); } } diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index f6718afd2f879..1ecc7980cea99 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -25,10 +25,10 @@ public class InferenceCrudIT extends InferenceBaseRestTest { @SuppressWarnings("unchecked") public void testGet() throws IOException { for (int i = 0; i < 5; i++) { - putModel("se_model_" + i, mockServiceModelConfig(), TaskType.SPARSE_EMBEDDING); + putModel("se_model_" + i, mockSparseServiceModelConfig(), TaskType.SPARSE_EMBEDDING); } for (int i = 0; i < 4; i++) { - putModel("te_model_" + i, mockServiceModelConfig(), TaskType.TEXT_EMBEDDING); + putModel("te_model_" + i, mockSparseServiceModelConfig(), TaskType.TEXT_EMBEDDING); } var getAllModels = (List>) getAllModels().get("models"); @@ -59,7 +59,7 @@ public void testGet() throws IOException { } public void testGetModelWithWrongTaskType() throws IOException { - putModel("sparse_embedding_model", mockServiceModelConfig(), TaskType.SPARSE_EMBEDDING); + putModel("sparse_embedding_model", mockSparseServiceModelConfig(), TaskType.SPARSE_EMBEDDING); var e = expectThrows(ResponseException.class, () -> getModels("sparse_embedding_model", TaskType.TEXT_EMBEDDING)); assertThat( e.getMessage(), @@ -68,7 +68,7 @@ public void testGetModelWithWrongTaskType() throws IOException { } public void testDeleteModelWithWrongTaskType() throws IOException { - putModel("sparse_embedding_model", mockServiceModelConfig(), TaskType.SPARSE_EMBEDDING); + putModel("sparse_embedding_model", mockSparseServiceModelConfig(), TaskType.SPARSE_EMBEDDING); var e = expectThrows(ResponseException.class, () -> deleteModel("sparse_embedding_model", TaskType.TEXT_EMBEDDING)); assertThat( e.getMessage(), @@ -79,7 +79,7 @@ public void testDeleteModelWithWrongTaskType() throws IOException { @SuppressWarnings("unchecked") public void testGetModelWithAnyTaskType() throws IOException { String inferenceEntityId = "sparse_embedding_model"; - putModel(inferenceEntityId, mockServiceModelConfig(), TaskType.SPARSE_EMBEDDING); + putModel(inferenceEntityId, mockSparseServiceModelConfig(), TaskType.SPARSE_EMBEDDING); var singleModel = (List>) getModels(inferenceEntityId, TaskType.ANY).get("models"); assertEquals(inferenceEntityId, singleModel.get(0).get("model_id")); assertEquals(TaskType.SPARSE_EMBEDDING.toString(), singleModel.get(0).get("task_type")); @@ -88,7 +88,7 @@ public void testGetModelWithAnyTaskType() throws IOException { @SuppressWarnings("unchecked") public void testApisWithoutTaskType() throws IOException { String modelId = "no_task_type_in_url"; - putModel(modelId, mockServiceModelConfig(TaskType.SPARSE_EMBEDDING)); + putModel(modelId, mockSparseServiceModelConfig(TaskType.SPARSE_EMBEDDING)); var singleModel = (List>) getModel(modelId).get("models"); assertEquals(modelId, singleModel.get(0).get("model_id")); assertEquals(TaskType.SPARSE_EMBEDDING.toString(), singleModel.get(0).get("task_type")); diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockDenseInferenceServiceIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockDenseInferenceServiceIT.java new file mode 100644 index 0000000000000..a8c0a45f3f9db --- /dev/null +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockDenseInferenceServiceIT.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference; + +import org.elasticsearch.inference.TaskType; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class MockDenseInferenceServiceIT extends InferenceBaseRestTest { + + @SuppressWarnings("unchecked") + public void testMockService() throws IOException { + String inferenceEntityId = "test-mock"; + var putModel = putModel(inferenceEntityId, mockDenseServiceModelConfig(), TaskType.TEXT_EMBEDDING); + var getModels = getModels(inferenceEntityId, TaskType.TEXT_EMBEDDING); + var model = ((List>) getModels.get("models")).get(0); + + for (var modelMap : List.of(putModel, model)) { + assertEquals(inferenceEntityId, modelMap.get("model_id")); + assertEquals(TaskType.TEXT_EMBEDDING, TaskType.fromString((String) modelMap.get("task_type"))); + assertEquals("text_embedding_test_service", modelMap.get("service")); + } + + // The response is randomly generated, the input can be anything + var inference = inferOnMockService(inferenceEntityId, List.of(randomAlphaOfLength(10))); + assertNonEmptyInferenceResults(inference, 1, TaskType.TEXT_EMBEDDING); + } + + public void testMockServiceWithMultipleInputs() throws IOException { + String inferenceEntityId = "test-mock-with-multi-inputs"; + putModel(inferenceEntityId, mockDenseServiceModelConfig(), TaskType.TEXT_EMBEDDING); + + // The response is randomly generated, the input can be anything + var inference = inferOnMockService( + inferenceEntityId, + TaskType.TEXT_EMBEDDING, + List.of(randomAlphaOfLength(5), randomAlphaOfLength(10), randomAlphaOfLength(15)) + ); + + assertNonEmptyInferenceResults(inference, 3, TaskType.TEXT_EMBEDDING); + } + + @SuppressWarnings("unchecked") + public void testMockService_DoesNotReturnSecretsInGetResponse() throws IOException { + String inferenceEntityId = "test-mock"; + var putModel = putModel(inferenceEntityId, mockDenseServiceModelConfig(), TaskType.TEXT_EMBEDDING); + var getModels = getModels(inferenceEntityId, TaskType.TEXT_EMBEDDING); + var model = ((List>) getModels.get("models")).get(0); + + var serviceSettings = (Map) model.get("service_settings"); + assertNull(serviceSettings.get("api_key")); + assertNotNull(serviceSettings.get("model")); + + var putServiceSettings = (Map) putModel.get("service_settings"); + assertNull(putServiceSettings.get("api_key")); + assertNotNull(putServiceSettings.get("model")); + } +} diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockInferenceServiceIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockSparseInferenceServiceIT.java similarity index 88% rename from x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockInferenceServiceIT.java rename to x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockSparseInferenceServiceIT.java index c226612d7a6e5..616947eae4d72 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockInferenceServiceIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockSparseInferenceServiceIT.java @@ -15,12 +15,12 @@ import static org.hamcrest.Matchers.is; -public class MockInferenceServiceIT extends InferenceBaseRestTest { +public class MockSparseInferenceServiceIT extends InferenceBaseRestTest { @SuppressWarnings("unchecked") public void testMockService() throws IOException { String inferenceEntityId = "test-mock"; - var putModel = putModel(inferenceEntityId, mockServiceModelConfig(), TaskType.SPARSE_EMBEDDING); + var putModel = putModel(inferenceEntityId, mockSparseServiceModelConfig(), TaskType.SPARSE_EMBEDDING); var getModels = getModels(inferenceEntityId, TaskType.SPARSE_EMBEDDING); var model = ((List>) getModels.get("models")).get(0); @@ -37,7 +37,7 @@ public void testMockService() throws IOException { public void testMockServiceWithMultipleInputs() throws IOException { String inferenceEntityId = "test-mock-with-multi-inputs"; - putModel(inferenceEntityId, mockServiceModelConfig(), TaskType.SPARSE_EMBEDDING); + putModel(inferenceEntityId, mockSparseServiceModelConfig(), TaskType.SPARSE_EMBEDDING); // The response is randomly generated, the input can be anything var inference = inferOnMockService( @@ -52,7 +52,7 @@ public void testMockServiceWithMultipleInputs() throws IOException { @SuppressWarnings("unchecked") public void testMockService_DoesNotReturnSecretsInGetResponse() throws IOException { String inferenceEntityId = "test-mock"; - var putModel = putModel(inferenceEntityId, mockServiceModelConfig(), TaskType.SPARSE_EMBEDDING); + var putModel = putModel(inferenceEntityId, mockSparseServiceModelConfig(), TaskType.SPARSE_EMBEDDING); var getModels = getModels(inferenceEntityId, TaskType.SPARSE_EMBEDDING); var model = ((List>) getModels.get("models")).get(0); @@ -68,7 +68,7 @@ public void testMockService_DoesNotReturnSecretsInGetResponse() throws IOExcepti @SuppressWarnings("unchecked") public void testMockService_DoesNotReturnHiddenField_InModelResponses() throws IOException { String inferenceEntityId = "test-mock"; - var putModel = putModel(inferenceEntityId, mockServiceModelConfig(), TaskType.SPARSE_EMBEDDING); + var putModel = putModel(inferenceEntityId, mockSparseServiceModelConfig(), TaskType.SPARSE_EMBEDDING); var getModels = getModels(inferenceEntityId, TaskType.SPARSE_EMBEDDING); var model = ((List>) getModels.get("models")).get(0); @@ -87,7 +87,7 @@ public void testMockService_DoesNotReturnHiddenField_InModelResponses() throws I @SuppressWarnings("unchecked") public void testMockService_DoesReturnHiddenField_InModelResponses() throws IOException { String inferenceEntityId = "test-mock"; - var putModel = putModel(inferenceEntityId, mockServiceModelConfig(null, true), TaskType.SPARSE_EMBEDDING); + var putModel = putModel(inferenceEntityId, mockSparseServiceModelConfig(null, true), TaskType.SPARSE_EMBEDDING); var getModels = getModels(inferenceEntityId, TaskType.SPARSE_EMBEDDING); var model = ((List>) getModels.get("models")).get(0); diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/AbstractTestInferenceService.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/AbstractTestInferenceService.java new file mode 100644 index 0000000000000..99dfc9582eb05 --- /dev/null +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/AbstractTestInferenceService.java @@ -0,0 +1,206 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.mock; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.inference.InferenceService; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SecretSettings; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; + +public abstract class AbstractTestInferenceService implements InferenceService { + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.current(); // fine for these tests but will not work for cluster upgrade tests + } + + @SuppressWarnings("unchecked") + protected static Map getTaskSettingsMap(Map settings) { + Map taskSettingsMap; + // task settings are optional + if (settings.containsKey(ModelConfigurations.TASK_SETTINGS)) { + taskSettingsMap = (Map) settings.remove(ModelConfigurations.TASK_SETTINGS); + } else { + taskSettingsMap = Map.of(); + } + + return taskSettingsMap; + } + + @Override + @SuppressWarnings("unchecked") + public TestServiceModel parsePersistedConfigWithSecrets( + String modelId, + TaskType taskType, + Map config, + Map secrets + ) { + var serviceSettingsMap = (Map) config.remove(ModelConfigurations.SERVICE_SETTINGS); + var secretSettingsMap = (Map) secrets.remove(ModelSecrets.SECRET_SETTINGS); + + var serviceSettings = getServiceSettingsFromMap(serviceSettingsMap); + var secretSettings = TestSecretSettings.fromMap(secretSettingsMap); + + var taskSettingsMap = getTaskSettingsMap(config); + var taskSettings = TestTaskSettings.fromMap(taskSettingsMap); + + return new TestServiceModel(modelId, taskType, name(), serviceSettings, taskSettings, secretSettings); + } + + @Override + @SuppressWarnings("unchecked") + public Model parsePersistedConfig(String modelId, TaskType taskType, Map config) { + var serviceSettingsMap = (Map) config.remove(ModelConfigurations.SERVICE_SETTINGS); + + var serviceSettings = getServiceSettingsFromMap(serviceSettingsMap); + + var taskSettingsMap = getTaskSettingsMap(config); + var taskSettings = TestTaskSettings.fromMap(taskSettingsMap); + + return new TestServiceModel(modelId, taskType, name(), serviceSettings, taskSettings, null); + } + + protected abstract ServiceSettings getServiceSettingsFromMap(Map serviceSettingsMap); + + @Override + public void start(Model model, ActionListener listener) { + listener.onResponse(true); + } + + @Override + public void close() throws IOException {} + + public static class TestServiceModel extends Model { + + public TestServiceModel( + String modelId, + TaskType taskType, + String service, + ServiceSettings serviceSettings, + TestTaskSettings taskSettings, + TestSecretSettings secretSettings + ) { + super(new ModelConfigurations(modelId, taskType, service, serviceSettings, taskSettings), new ModelSecrets(secretSettings)); + } + + @Override + public TestDenseInferenceServiceExtension.TestServiceSettings getServiceSettings() { + return (TestDenseInferenceServiceExtension.TestServiceSettings) super.getServiceSettings(); + } + + @Override + public TestTaskSettings getTaskSettings() { + return (TestTaskSettings) super.getTaskSettings(); + } + + @Override + public TestSecretSettings getSecretSettings() { + return (TestSecretSettings) super.getSecretSettings(); + } + } + + public record TestTaskSettings(Integer temperature) implements TaskSettings { + + static final String NAME = "test_task_settings"; + + public static TestTaskSettings fromMap(Map map) { + Integer temperature = (Integer) map.remove("temperature"); + return new TestTaskSettings(temperature); + } + + public TestTaskSettings(StreamInput in) throws IOException { + this(in.readOptionalVInt()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalVInt(temperature); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (temperature != null) { + builder.field("temperature", temperature); + } + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.current(); // fine for these tests but will not work for cluster upgrade tests + } + } + + public record TestSecretSettings(String apiKey) implements SecretSettings { + + static final String NAME = "test_secret_settings"; + + public static TestSecretSettings fromMap(Map map) { + ValidationException validationException = new ValidationException(); + + String apiKey = (String) map.remove("api_key"); + + if (apiKey == null) { + validationException.addValidationError("missing api_key"); + } + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new TestSecretSettings(apiKey); + } + + public TestSecretSettings(StreamInput in) throws IOException { + this(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(apiKey); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("api_key", apiKey); + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.current(); // fine for these tests but will not work for cluster upgrade tests + } + } +} diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java new file mode 100644 index 0000000000000..54fe6e01946b4 --- /dev/null +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java @@ -0,0 +1,224 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.mock; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.inference.ChunkedInferenceServiceResults; +import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.InferenceServiceExtension; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.inference.results.TextEmbeddingResults; +import org.elasticsearch.xpack.core.ml.inference.results.ChunkedTextEmbeddingResults; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class TestDenseInferenceServiceExtension implements InferenceServiceExtension { + @Override + public List getInferenceServiceFactories() { + return List.of(TestInferenceService::new); + } + + public static class TestInferenceService extends AbstractTestInferenceService { + private static final String NAME = "text_embedding_test_service"; + + public TestInferenceService(InferenceServiceFactoryContext context) {} + + @Override + public String name() { + return NAME; + } + + @Override + @SuppressWarnings("unchecked") + public void parseRequestConfig( + String modelId, + TaskType taskType, + Map config, + Set platformArchitectures, + ActionListener parsedModelListener + ) { + var serviceSettingsMap = (Map) config.remove(ModelConfigurations.SERVICE_SETTINGS); + var serviceSettings = TestServiceSettings.fromMap(serviceSettingsMap); + var secretSettings = TestSecretSettings.fromMap(serviceSettingsMap); + + var taskSettingsMap = getTaskSettingsMap(config); + var taskSettings = TestTaskSettings.fromMap(taskSettingsMap); + + parsedModelListener.onResponse(new TestServiceModel(modelId, taskType, name(), serviceSettings, taskSettings, secretSettings)); + } + + @Override + public void infer( + Model model, + List input, + Map taskSettings, + InputType inputType, + ActionListener listener + ) { + switch (model.getConfigurations().getTaskType()) { + case ANY, TEXT_EMBEDDING -> listener.onResponse( + makeResults(input, ((TestServiceModel) model).getServiceSettings().dimensions()) + ); + default -> listener.onFailure( + new ElasticsearchStatusException( + TaskType.unsupportedTaskTypeErrorMsg(model.getConfigurations().getTaskType(), name()), + RestStatus.BAD_REQUEST + ) + ); + } + } + + @Override + public void chunkedInfer( + Model model, + List input, + Map taskSettings, + InputType inputType, + ChunkingOptions chunkingOptions, + ActionListener> listener + ) { + switch (model.getConfigurations().getTaskType()) { + case ANY, TEXT_EMBEDDING -> listener.onResponse( + makeChunkedResults(input, ((TestServiceModel) model).getServiceSettings().dimensions()) + ); + default -> listener.onFailure( + new ElasticsearchStatusException( + TaskType.unsupportedTaskTypeErrorMsg(model.getConfigurations().getTaskType(), name()), + RestStatus.BAD_REQUEST + ) + ); + } + } + + private TextEmbeddingResults makeResults(List input, int dimensions) { + List embeddings = new ArrayList<>(); + for (int i = 0; i < input.size(); i++) { + List values = new ArrayList<>(); + for (int j = 0; j < dimensions; j++) { + values.add((float) j); + } + embeddings.add(new TextEmbeddingResults.Embedding(values)); + } + return new TextEmbeddingResults(embeddings); + } + + private List makeChunkedResults(List input, int dimensions) { + var results = new ArrayList(); + for (int i = 0; i < input.size(); i++) { + double[] values = new double[dimensions]; + for (int j = 0; j < 5; j++) { + values[j] = j; + } + results.add( + new org.elasticsearch.xpack.core.inference.results.ChunkedTextEmbeddingResults( + List.of(new ChunkedTextEmbeddingResults.EmbeddingChunk(input.get(i), values)) + ) + ); + } + return results; + } + + protected ServiceSettings getServiceSettingsFromMap(Map serviceSettingsMap) { + return TestServiceSettings.fromMap(serviceSettingsMap); + } + } + + public record TestServiceSettings(String model, Integer dimensions, SimilarityMeasure similarity) implements ServiceSettings { + + static final String NAME = "test_text_embedding_service_settings"; + + public static TestServiceSettings fromMap(Map map) { + ValidationException validationException = new ValidationException(); + + String model = (String) map.remove("model"); + if (model == null) { + validationException.addValidationError("missing model"); + } + + Integer dimensions = (Integer) map.remove("dimensions"); + if (dimensions == null) { + validationException.addValidationError("missing dimensions"); + } + + SimilarityMeasure similarity = null; + String similarityStr = (String) map.remove("similarity"); + if (similarityStr != null) { + similarity = SimilarityMeasure.valueOf(similarityStr); + } + + return new TestServiceSettings(model, dimensions, similarity); + } + + public TestServiceSettings(StreamInput in) throws IOException { + this(in.readString(), in.readOptionalInt(), in.readOptionalEnum(SimilarityMeasure.class)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("model", model); + builder.field("dimensions", dimensions); + if (similarity != null) { + builder.field("similarity", similarity); + } + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.current(); // fine for these tests but will not work for cluster upgrade tests + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(model); + out.writeInt(dimensions); + out.writeOptionalEnum(similarity); + } + + @Override + public ToXContentObject getFilteredXContentObject() { + return (builder, params) -> { + builder.startObject(); + builder.field("model", model); + builder.field("dimensions", dimensions); + if (similarity != null) { + builder.field("similarity", similarity); + } + builder.endObject(); + return builder; + }; + } + + } + +} diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestInferenceServicePlugin.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestInferenceServicePlugin.java index 0345d7b6e5926..6460b06f13800 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestInferenceServicePlugin.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestInferenceServicePlugin.java @@ -20,20 +20,25 @@ public class TestInferenceServicePlugin extends Plugin { @Override public List getNamedWriteables() { return List.of( - new NamedWriteableRegistry.Entry( - ServiceSettings.class, - TestInferenceServiceExtension.TestServiceSettings.NAME, - TestInferenceServiceExtension.TestServiceSettings::new - ), new NamedWriteableRegistry.Entry( TaskSettings.class, - TestInferenceServiceExtension.TestTaskSettings.NAME, - TestInferenceServiceExtension.TestTaskSettings::new + AbstractTestInferenceService.TestTaskSettings.NAME, + AbstractTestInferenceService.TestTaskSettings::new ), new NamedWriteableRegistry.Entry( SecretSettings.class, - TestInferenceServiceExtension.TestSecretSettings.NAME, - TestInferenceServiceExtension.TestSecretSettings::new + AbstractTestInferenceService.TestSecretSettings.NAME, + AbstractTestInferenceService.TestSecretSettings::new + ), + new NamedWriteableRegistry.Entry( + ServiceSettings.class, + TestDenseInferenceServiceExtension.TestServiceSettings.NAME, + TestDenseInferenceServiceExtension.TestServiceSettings::new + ), + new NamedWriteableRegistry.Entry( + ServiceSettings.class, + TestSparseInferenceServiceExtension.TestServiceSettings.NAME, + TestSparseInferenceServiceExtension.TestServiceSettings::new ) ); } diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestInferenceServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java similarity index 56% rename from x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestInferenceServiceExtension.java rename to x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java index 215125960c4fc..e5020774a70f3 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestInferenceServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java @@ -15,16 +15,12 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; -import org.elasticsearch.inference.InferenceService; import org.elasticsearch.inference.InferenceServiceExtension; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; -import org.elasticsearch.inference.ModelSecrets; -import org.elasticsearch.inference.SecretSettings; import org.elasticsearch.inference.ServiceSettings; -import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ToXContentObject; @@ -40,13 +36,13 @@ import java.util.Map; import java.util.Set; -public class TestInferenceServiceExtension implements InferenceServiceExtension { +public class TestSparseInferenceServiceExtension implements InferenceServiceExtension { @Override public List getInferenceServiceFactories() { return List.of(TestInferenceService::new); } - public static class TestInferenceService implements InferenceService { + public static class TestInferenceService extends AbstractTestInferenceService { private static final String NAME = "test_service"; public TestInferenceService(InferenceServiceExtension.InferenceServiceFactoryContext context) {} @@ -56,31 +52,13 @@ public String name() { return NAME; } - @Override - public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.current(); // fine for these tests but will not work for cluster upgrade tests - } - - @SuppressWarnings("unchecked") - private static Map getTaskSettingsMap(Map settings) { - Map taskSettingsMap; - // task settings are optional - if (settings.containsKey(ModelConfigurations.TASK_SETTINGS)) { - taskSettingsMap = (Map) settings.remove(ModelConfigurations.TASK_SETTINGS); - } else { - taskSettingsMap = Map.of(); - } - - return taskSettingsMap; - } - @Override @SuppressWarnings("unchecked") public void parseRequestConfig( String modelId, TaskType taskType, Map config, - Set platfromArchitectures, + Set platformArchitectures, ActionListener parsedModelListener ) { var serviceSettingsMap = (Map) config.remove(ModelConfigurations.SERVICE_SETTINGS); @@ -93,39 +71,6 @@ public void parseRequestConfig( parsedModelListener.onResponse(new TestServiceModel(modelId, taskType, name(), serviceSettings, taskSettings, secretSettings)); } - @Override - @SuppressWarnings("unchecked") - public TestServiceModel parsePersistedConfigWithSecrets( - String modelId, - TaskType taskType, - Map config, - Map secrets - ) { - var serviceSettingsMap = (Map) config.remove(ModelConfigurations.SERVICE_SETTINGS); - var secretSettingsMap = (Map) secrets.remove(ModelSecrets.SECRET_SETTINGS); - - var serviceSettings = TestServiceSettings.fromMap(serviceSettingsMap); - var secretSettings = TestSecretSettings.fromMap(secretSettingsMap); - - var taskSettingsMap = getTaskSettingsMap(config); - var taskSettings = TestTaskSettings.fromMap(taskSettingsMap); - - return new TestServiceModel(modelId, taskType, name(), serviceSettings, taskSettings, secretSettings); - } - - @Override - @SuppressWarnings("unchecked") - public Model parsePersistedConfig(String modelId, TaskType taskType, Map config) { - var serviceSettingsMap = (Map) config.remove(ModelConfigurations.SERVICE_SETTINGS); - - var serviceSettings = TestServiceSettings.fromMap(serviceSettingsMap); - - var taskSettingsMap = getTaskSettingsMap(config); - var taskSettings = TestTaskSettings.fromMap(taskSettingsMap); - - return new TestServiceModel(modelId, taskType, name(), serviceSettings, taskSettings, null); - } - @Override public void infer( Model model, @@ -189,42 +134,10 @@ private List makeChunkedResults(List inp return List.of(new ChunkedSparseEmbeddingResults(chunks)); } - @Override - public void start(Model model, ActionListener listener) { - listener.onResponse(true); - } - - @Override - public void close() throws IOException {} - } - - public static class TestServiceModel extends Model { - - public TestServiceModel( - String modelId, - TaskType taskType, - String service, - TestServiceSettings serviceSettings, - TestTaskSettings taskSettings, - TestSecretSettings secretSettings - ) { - super(new ModelConfigurations(modelId, taskType, service, serviceSettings, taskSettings), new ModelSecrets(secretSettings)); - } - - @Override - public TestServiceSettings getServiceSettings() { - return (TestServiceSettings) super.getServiceSettings(); - } - - @Override - public TestTaskSettings getTaskSettings() { - return (TestTaskSettings) super.getTaskSettings(); + protected ServiceSettings getServiceSettingsFromMap(Map serviceSettingsMap) { + return TestServiceSettings.fromMap(serviceSettingsMap); } - @Override - public TestSecretSettings getSecretSettings() { - return (TestSecretSettings) super.getSecretSettings(); - } } public record TestServiceSettings(String model, String hiddenField, boolean shouldReturnHiddenField) implements ServiceSettings { @@ -300,91 +213,4 @@ public ToXContentObject getFilteredXContentObject() { }; } } - - public record TestTaskSettings(Integer temperature) implements TaskSettings { - - static final String NAME = "test_task_settings"; - - public static TestTaskSettings fromMap(Map map) { - Integer temperature = (Integer) map.remove("temperature"); - return new TestTaskSettings(temperature); - } - - public TestTaskSettings(StreamInput in) throws IOException { - this(in.readOptionalVInt()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalVInt(temperature); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - if (temperature != null) { - builder.field("temperature", temperature); - } - builder.endObject(); - return builder; - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.current(); // fine for these tests but will not work for cluster upgrade tests - } - } - - public record TestSecretSettings(String apiKey) implements SecretSettings { - - static final String NAME = "test_secret_settings"; - - public static TestSecretSettings fromMap(Map map) { - ValidationException validationException = new ValidationException(); - - String apiKey = (String) map.remove("api_key"); - - if (apiKey == null) { - validationException.addValidationError("missing api_key"); - } - - if (validationException.validationErrors().isEmpty() == false) { - throw validationException; - } - - return new TestSecretSettings(apiKey); - } - - public TestSecretSettings(StreamInput in) throws IOException { - this(in.readString()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(apiKey); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field("api_key", apiKey); - builder.endObject(); - return builder; - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.current(); // fine for these tests but will not work for cluster upgrade tests - } - } } diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/resources/META-INF/services/org.elasticsearch.inference.InferenceServiceExtension b/x-pack/plugin/inference/qa/test-service-plugin/src/main/resources/META-INF/services/org.elasticsearch.inference.InferenceServiceExtension index 019a6dad7be85..c1908dc788251 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/resources/META-INF/services/org.elasticsearch.inference.InferenceServiceExtension +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/resources/META-INF/services/org.elasticsearch.inference.InferenceServiceExtension @@ -1 +1,2 @@ -org.elasticsearch.xpack.inference.mock.TestInferenceServiceExtension +org.elasticsearch.xpack.inference.mock.TestSparseInferenceServiceExtension +org.elasticsearch.xpack.inference.mock.TestDenseInferenceServiceExtension From 1f1636e1f7665fcfdf8dca491d9ca828bc659526 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 21 Feb 2024 14:09:54 +0100 Subject: [PATCH 33/49] Fix error 500 on invalid ParentIdQuery (#105693) We need to enforce non-null values here, otherwise we'll error out and return a 500 when a user fails to set either id or type. closes #105366 --- docs/changelog/105693.yaml | 6 ++++++ .../org/elasticsearch/join/query/ParentIdQueryBuilder.java | 4 ++-- .../elasticsearch/join/query/ParentIdQueryBuilderTests.java | 5 +++++ 3 files changed, 13 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/105693.yaml diff --git a/docs/changelog/105693.yaml b/docs/changelog/105693.yaml new file mode 100644 index 0000000000000..8d14d611e19a3 --- /dev/null +++ b/docs/changelog/105693.yaml @@ -0,0 +1,6 @@ +pr: 105693 +summary: Fix error 500 on invalid `ParentIdQuery` +area: Search +type: bug +issues: + - 105366 diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java index 8fb72ddce1935..89850862cd63f 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java @@ -51,8 +51,8 @@ public final class ParentIdQueryBuilder extends AbstractQueryBuilder new ParentIdQueryBuilder(null, randomAlphaOfLength(5))); + expectThrows(IllegalArgumentException.class, () -> new ParentIdQueryBuilder(randomAlphaOfLength(5), null)); + } + public void testDisallowExpensiveQueries() { SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class); when(searchExecutionContext.allowExpensiveQueries()).thenReturn(false); From d693fc8b1939dbf94d8ee34c467ca8d3e2a9e4a1 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 21 Feb 2024 14:12:52 +0100 Subject: [PATCH 34/49] Fix search response leaks in async search tests (#105675) Fixing all of these muted test classes, tried my best to keep indention changes to a minimum but it wasn't possible to avoid them in all cases unfortunately. --- .../test/rest/RestActionTestCase.java | 5 +- .../search/AsyncSearchResponseTests.java | 694 ++++++++++-------- .../search/AsyncStatusResponseTests.java | 45 +- .../RestSubmitAsyncSearchActionTests.java | 2 - 4 files changed, 403 insertions(+), 343 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java index 9e638425d5c5c..fad8575ae1d58 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.Releasables; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -68,6 +69,8 @@ protected void dispatchRequest(RestRequest request) { ThreadContext threadContext = verifyingClient.threadPool().getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { controller.dispatchRequest(request, channel, threadContext); + } finally { + Releasables.close(channel.capturedResponse()); } } @@ -154,7 +157,7 @@ public Task exe ) { @SuppressWarnings("unchecked") // Callers are responsible for lining this up Response response = (Response) executeLocallyVerifier.get().apply(action, request); - listener.onResponse(response); + ActionListener.respondAndRelease(listener, response); return request.createTask( taskIdGenerator.incrementAndGet(), "transport", diff --git a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java index afabd8c7a7bc3..98513f611a5d8 100644 --- a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java +++ b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.search; import org.apache.lucene.index.CorruptIndexException; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchResponse; @@ -33,6 +32,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.search.action.AsyncSearchResponse; +import org.junit.After; import org.junit.Before; import java.io.IOException; @@ -48,7 +48,6 @@ import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.core.async.GetAsyncResultRequestTests.randomSearchId; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104838") public class AsyncSearchResponseTests extends ESTestCase { private final SearchResponse searchResponse = randomSearchResponse(randomBoolean()); private NamedWriteableRegistry namedWriteableRegistry; @@ -61,6 +60,11 @@ public void registerNamedObjects() { namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); } + @After + public void releaseResponse() { + searchResponse.decRef(); + } + protected Writeable.Reader instanceReader() { return AsyncSearchResponse::new; } @@ -77,7 +81,11 @@ protected void assertEqualInstances(AsyncSearchResponse expectedInstance, AsyncS public final void testSerialization() throws IOException { for (int runs = 0; runs < 10; runs++) { AsyncSearchResponse testInstance = createTestInstance(); - assertSerialization(testInstance); + try { + assertSerialization(testInstance).decRef(); + } finally { + testInstance.decRef(); + } } } @@ -160,44 +168,47 @@ static void assertEqualResponses(AsyncSearchResponse expected, AsyncSearchRespon public void testToXContentWithoutSearchResponse() throws IOException { Date date = new Date(); AsyncSearchResponse asyncSearchResponse = new AsyncSearchResponse("id", true, true, date.getTime(), date.getTime()); + try { + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + builder.prettyPrint(); + ChunkedToXContent.wrapAsToXContent(asyncSearchResponse).toXContent(builder, ToXContent.EMPTY_PARAMS); + assertEquals(Strings.format(""" + { + "id" : "id", + "is_partial" : true, + "is_running" : true, + "start_time_in_millis" : %s, + "expiration_time_in_millis" : %s + }""", date.getTime(), date.getTime()), Strings.toString(builder)); + } - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { - builder.prettyPrint(); - ChunkedToXContent.wrapAsToXContent(asyncSearchResponse).toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals(Strings.format(""" - { - "id" : "id", - "is_partial" : true, - "is_running" : true, - "start_time_in_millis" : %s, - "expiration_time_in_millis" : %s - }""", date.getTime(), date.getTime()), Strings.toString(builder)); - } - - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { - builder.prettyPrint(); - builder.humanReadable(true); - ChunkedToXContent.wrapAsToXContent(asyncSearchResponse) - .toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("human", "true"))); - assertEquals( - Strings.format( - """ - { - "id" : "id", - "is_partial" : true, - "is_running" : true, - "start_time" : "%s", - "start_time_in_millis" : %s, - "expiration_time" : "%s", - "expiration_time_in_millis" : %s - }""", - XContentElasticsearchExtension.DEFAULT_FORMATTER.format(date.toInstant()), - date.getTime(), - XContentElasticsearchExtension.DEFAULT_FORMATTER.format(date.toInstant()), - date.getTime() - ), - Strings.toString(builder) - ); + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + builder.prettyPrint(); + builder.humanReadable(true); + ChunkedToXContent.wrapAsToXContent(asyncSearchResponse) + .toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("human", "true"))); + assertEquals( + Strings.format( + """ + { + "id" : "id", + "is_partial" : true, + "is_running" : true, + "start_time" : "%s", + "start_time_in_millis" : %s, + "expiration_time" : "%s", + "expiration_time_in_millis" : %s + }""", + XContentElasticsearchExtension.DEFAULT_FORMATTER.format(date.toInstant()), + date.getTime(), + XContentElasticsearchExtension.DEFAULT_FORMATTER.format(date.toInstant()), + date.getTime() + ), + Strings.toString(builder) + ); + } + } finally { + asyncSearchResponse.decRef(); } } @@ -227,89 +238,98 @@ public void testToXContentWithSearchResponseAfterCompletion() throws IOException SearchResponse.Clusters.EMPTY ); - AsyncSearchResponse asyncSearchResponse = new AsyncSearchResponse( - "id", - searchResponse, - null, - false, - isRunning, - startTimeMillis, - expirationTimeMillis - ); - - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { - builder.prettyPrint(); - ChunkedToXContent.wrapAsToXContent(asyncSearchResponse).toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals(Strings.format(""" - { - "id" : "id", - "is_partial" : false, - "is_running" : false, - "start_time_in_millis" : %s, - "expiration_time_in_millis" : %s, - "completion_time_in_millis" : %s, - "response" : { - "took" : %s, - "timed_out" : false, - "num_reduce_phases" : 2, - "_shards" : { - "total" : 10, - "successful" : 9, - "skipped" : 1, - "failed" : 0 - }, - "hits" : { - "max_score" : 0.0, - "hits" : [ ] - } - } - }""", startTimeMillis, expirationTimeMillis, expectedCompletionTime, took), Strings.toString(builder)); + AsyncSearchResponse asyncSearchResponse; + try { + asyncSearchResponse = new AsyncSearchResponse( + "id", + searchResponse, + null, + false, + isRunning, + startTimeMillis, + expirationTimeMillis + ); + } finally { + searchResponse.decRef(); } - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { - builder.prettyPrint(); - builder.humanReadable(true); - ChunkedToXContent.wrapAsToXContent(asyncSearchResponse) - .toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("human", "true"))); - assertEquals( - Strings.format( - """ - { - "id" : "id", - "is_partial" : false, - "is_running" : false, - "start_time" : "%s", - "start_time_in_millis" : %s, - "expiration_time" : "%s", - "expiration_time_in_millis" : %s, - "completion_time" : "%s", - "completion_time_in_millis" : %s, - "response" : { - "took" : %s, - "timed_out" : false, - "num_reduce_phases" : 2, - "_shards" : { - "total" : 10, - "successful" : 9, - "skipped" : 1, - "failed" : 0 - }, - "hits" : { - "max_score" : 0.0, - "hits" : [ ] - } - } - }""", - XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(startTimeMillis)), - startTimeMillis, - XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(expirationTimeMillis)), - expirationTimeMillis, - XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(expectedCompletionTime)), - expectedCompletionTime, - took - ), - Strings.toString(builder) - ); + try { + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + builder.prettyPrint(); + ChunkedToXContent.wrapAsToXContent(asyncSearchResponse).toXContent(builder, ToXContent.EMPTY_PARAMS); + assertEquals(Strings.format(""" + { + "id" : "id", + "is_partial" : false, + "is_running" : false, + "start_time_in_millis" : %s, + "expiration_time_in_millis" : %s, + "completion_time_in_millis" : %s, + "response" : { + "took" : %s, + "timed_out" : false, + "num_reduce_phases" : 2, + "_shards" : { + "total" : 10, + "successful" : 9, + "skipped" : 1, + "failed" : 0 + }, + "hits" : { + "max_score" : 0.0, + "hits" : [ ] + } + } + }""", startTimeMillis, expirationTimeMillis, expectedCompletionTime, took), Strings.toString(builder)); + } + + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + builder.prettyPrint(); + builder.humanReadable(true); + ChunkedToXContent.wrapAsToXContent(asyncSearchResponse) + .toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("human", "true"))); + assertEquals( + Strings.format( + """ + { + "id" : "id", + "is_partial" : false, + "is_running" : false, + "start_time" : "%s", + "start_time_in_millis" : %s, + "expiration_time" : "%s", + "expiration_time_in_millis" : %s, + "completion_time" : "%s", + "completion_time_in_millis" : %s, + "response" : { + "took" : %s, + "timed_out" : false, + "num_reduce_phases" : 2, + "_shards" : { + "total" : 10, + "successful" : 9, + "skipped" : 1, + "failed" : 0 + }, + "hits" : { + "max_score" : 0.0, + "hits" : [ ] + } + } + }""", + XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(startTimeMillis)), + startTimeMillis, + XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(expirationTimeMillis)), + expirationTimeMillis, + XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(expectedCompletionTime)), + expectedCompletionTime, + took + ), + Strings.toString(builder) + ); + } + } finally { + asyncSearchResponse.decRef(); } } @@ -339,135 +359,143 @@ public void testToXContentWithCCSSearchResponseWhileRunning() throws IOException ShardSearchFailure.EMPTY_ARRAY, clusters ); + AsyncSearchResponse asyncSearchResponse; + try { + asyncSearchResponse = new AsyncSearchResponse( + "id", + searchResponse, + null, + true, + isRunning, + startTimeMillis, + expirationTimeMillis + ); + } finally { + searchResponse.decRef(); + } - AsyncSearchResponse asyncSearchResponse = new AsyncSearchResponse( - "id", - searchResponse, - null, - true, - isRunning, - startTimeMillis, - expirationTimeMillis - ); - - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { - builder.prettyPrint(); - ChunkedToXContent.wrapAsToXContent(asyncSearchResponse).toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals(Strings.format(""" - { - "id" : "id", - "is_partial" : true, - "is_running" : true, - "start_time_in_millis" : %s, - "expiration_time_in_millis" : %s, - "response" : { - "took" : %s, - "timed_out" : false, - "num_reduce_phases" : 2, - "_shards" : { - "total" : 10, - "successful" : 9, - "skipped" : 1, - "failed" : 0 - }, - "_clusters" : { - "total" : 3, - "successful" : 0, - "skipped" : 0, - "running" : 3, - "partial" : 0, - "failed" : 0, - "details" : { - "cluster_1" : { - "status" : "running", - "indices" : "foo,bar*", - "timed_out" : false + try { + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + builder.prettyPrint(); + ChunkedToXContent.wrapAsToXContent(asyncSearchResponse).toXContent(builder, ToXContent.EMPTY_PARAMS); + assertEquals(Strings.format(""" + { + "id" : "id", + "is_partial" : true, + "is_running" : true, + "start_time_in_millis" : %s, + "expiration_time_in_millis" : %s, + "response" : { + "took" : %s, + "timed_out" : false, + "num_reduce_phases" : 2, + "_shards" : { + "total" : 10, + "successful" : 9, + "skipped" : 1, + "failed" : 0 }, - "cluster_2" : { - "status" : "running", - "indices" : "foo,bar*", - "timed_out" : false + "_clusters" : { + "total" : 3, + "successful" : 0, + "skipped" : 0, + "running" : 3, + "partial" : 0, + "failed" : 0, + "details" : { + "cluster_1" : { + "status" : "running", + "indices" : "foo,bar*", + "timed_out" : false + }, + "cluster_2" : { + "status" : "running", + "indices" : "foo,bar*", + "timed_out" : false + }, + "cluster_0" : { + "status" : "running", + "indices" : "foo,bar*", + "timed_out" : false + } + } }, - "cluster_0" : { - "status" : "running", - "indices" : "foo,bar*", - "timed_out" : false + "hits" : { + "max_score" : 0.0, + "hits" : [ ] } } - }, - "hits" : { - "max_score" : 0.0, - "hits" : [ ] - } - } - }""", startTimeMillis, expirationTimeMillis, took), Strings.toString(builder)); - } + }""", startTimeMillis, expirationTimeMillis, took), Strings.toString(builder)); + } - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { - builder.prettyPrint(); - builder.humanReadable(true); - ChunkedToXContent.wrapAsToXContent(asyncSearchResponse) - .toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("human", "true"))); - assertEquals( - Strings.format( - """ - { - "id" : "id", - "is_partial" : true, - "is_running" : true, - "start_time" : "%s", - "start_time_in_millis" : %s, - "expiration_time" : "%s", - "expiration_time_in_millis" : %s, - "response" : { - "took" : %s, - "timed_out" : false, - "num_reduce_phases" : 2, - "_shards" : { - "total" : 10, - "successful" : 9, - "skipped" : 1, - "failed" : 0 - }, - "_clusters" : { - "total" : 3, - "successful" : 0, - "skipped" : 0, - "running" : 3, - "partial" : 0, - "failed" : 0, - "details" : { - "cluster_1" : { - "status" : "running", - "indices" : "foo,bar*", - "timed_out" : false + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + builder.prettyPrint(); + builder.humanReadable(true); + ChunkedToXContent.wrapAsToXContent(asyncSearchResponse) + .toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("human", "true"))); + assertEquals( + Strings.format( + """ + { + "id" : "id", + "is_partial" : true, + "is_running" : true, + "start_time" : "%s", + "start_time_in_millis" : %s, + "expiration_time" : "%s", + "expiration_time_in_millis" : %s, + "response" : { + "took" : %s, + "timed_out" : false, + "num_reduce_phases" : 2, + "_shards" : { + "total" : 10, + "successful" : 9, + "skipped" : 1, + "failed" : 0 }, - "cluster_2" : { - "status" : "running", - "indices" : "foo,bar*", - "timed_out" : false + "_clusters" : { + "total" : 3, + "successful" : 0, + "skipped" : 0, + "running" : 3, + "partial" : 0, + "failed" : 0, + "details" : { + "cluster_1" : { + "status" : "running", + "indices" : "foo,bar*", + "timed_out" : false + }, + "cluster_2" : { + "status" : "running", + "indices" : "foo,bar*", + "timed_out" : false + }, + "cluster_0" : { + "status" : "running", + "indices" : "foo,bar*", + "timed_out" : false + } + } }, - "cluster_0" : { - "status" : "running", - "indices" : "foo,bar*", - "timed_out" : false + "hits" : { + "max_score" : 0.0, + "hits" : [ ] } } - }, - "hits" : { - "max_score" : 0.0, - "hits" : [ ] - } - } - }""", - XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(startTimeMillis)), - startTimeMillis, - XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(expirationTimeMillis)), - expirationTimeMillis, - took - ), - Strings.toString(builder) - ); + }""", + XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(startTimeMillis)), + startTimeMillis, + XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(expirationTimeMillis)), + expirationTimeMillis, + took + ), + Strings.toString(builder) + ); + } + } finally { + asyncSearchResponse.decRef(); } } @@ -566,15 +594,20 @@ public void testToXContentWithCCSSearchResponseAfterCompletion() throws IOExcept clusters ); - AsyncSearchResponse asyncSearchResponse = new AsyncSearchResponse( - "id", - searchResponse, - null, - false, - isRunning, - startTimeMillis, - expirationTimeMillis - ); + AsyncSearchResponse asyncSearchResponse; + try { + asyncSearchResponse = new AsyncSearchResponse( + "id", + searchResponse, + null, + false, + isRunning, + startTimeMillis, + expirationTimeMillis + ); + } finally { + searchResponse.decRef(); + } try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { builder.prettyPrint(); @@ -680,6 +713,8 @@ public void testToXContentWithCCSSearchResponseAfterCompletion() throws IOExcept } } }""", startTimeMillis, expirationTimeMillis, expectedCompletionTime, took), Strings.toString(builder)); + } finally { + asyncSearchResponse.decRef(); } } @@ -707,85 +742,92 @@ public void testToXContentWithSearchResponseWhileRunning() throws IOException { ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); - - AsyncSearchResponse asyncSearchResponse = new AsyncSearchResponse( - "id", - searchResponse, - null, - true, - isRunning, - startTimeMillis, - expirationTimeMillis - ); - - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { - builder.prettyPrint(); - ChunkedToXContent.wrapAsToXContent(asyncSearchResponse).toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals(Strings.format(""" - { - "id" : "id", - "is_partial" : true, - "is_running" : true, - "start_time_in_millis" : %s, - "expiration_time_in_millis" : %s, - "response" : { - "took" : %s, - "timed_out" : false, - "num_reduce_phases" : 2, - "_shards" : { - "total" : 10, - "successful" : 9, - "skipped" : 1, - "failed" : 0 - }, - "hits" : { - "max_score" : 0.0, - "hits" : [ ] - } - } - }""", startTimeMillis, expirationTimeMillis, took), Strings.toString(builder)); + AsyncSearchResponse asyncSearchResponse; + try { + asyncSearchResponse = new AsyncSearchResponse( + "id", + searchResponse, + null, + true, + isRunning, + startTimeMillis, + expirationTimeMillis + ); + } finally { + searchResponse.decRef(); } + try { + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + builder.prettyPrint(); + ChunkedToXContent.wrapAsToXContent(asyncSearchResponse).toXContent(builder, ToXContent.EMPTY_PARAMS); + assertEquals(Strings.format(""" + { + "id" : "id", + "is_partial" : true, + "is_running" : true, + "start_time_in_millis" : %s, + "expiration_time_in_millis" : %s, + "response" : { + "took" : %s, + "timed_out" : false, + "num_reduce_phases" : 2, + "_shards" : { + "total" : 10, + "successful" : 9, + "skipped" : 1, + "failed" : 0 + }, + "hits" : { + "max_score" : 0.0, + "hits" : [ ] + } + } + }""", startTimeMillis, expirationTimeMillis, took), Strings.toString(builder)); + } - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { - builder.prettyPrint(); - builder.humanReadable(true); - ChunkedToXContent.wrapAsToXContent(asyncSearchResponse) - .toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("human", "true"))); - assertEquals( - Strings.format( - """ - { - "id" : "id", - "is_partial" : true, - "is_running" : true, - "start_time" : "%s", - "start_time_in_millis" : %s, - "expiration_time" : "%s", - "expiration_time_in_millis" : %s, - "response" : { - "took" : %s, - "timed_out" : false, - "num_reduce_phases" : 2, - "_shards" : { - "total" : 10, - "successful" : 9, - "skipped" : 1, - "failed" : 0 - }, - "hits" : { - "max_score" : 0.0, - "hits" : [ ] - } - } - }""", - XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(startTimeMillis)), - startTimeMillis, - XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(expirationTimeMillis)), - expirationTimeMillis, - took - ), - Strings.toString(builder) - ); + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + builder.prettyPrint(); + builder.humanReadable(true); + ChunkedToXContent.wrapAsToXContent(asyncSearchResponse) + .toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("human", "true"))); + assertEquals( + Strings.format( + """ + { + "id" : "id", + "is_partial" : true, + "is_running" : true, + "start_time" : "%s", + "start_time_in_millis" : %s, + "expiration_time" : "%s", + "expiration_time_in_millis" : %s, + "response" : { + "took" : %s, + "timed_out" : false, + "num_reduce_phases" : 2, + "_shards" : { + "total" : 10, + "successful" : 9, + "skipped" : 1, + "failed" : 0 + }, + "hits" : { + "max_score" : 0.0, + "hits" : [ ] + } + } + }""", + XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(startTimeMillis)), + startTimeMillis, + XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(expirationTimeMillis)), + expirationTimeMillis, + took + ), + Strings.toString(builder) + ); + } + } finally { + asyncSearchResponse.decRef(); } } diff --git a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncStatusResponseTests.java b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncStatusResponseTests.java index 2786d9772108a..6be128ac733b4 100644 --- a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncStatusResponseTests.java +++ b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncStatusResponseTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.search; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Strings; @@ -27,7 +26,6 @@ import static org.elasticsearch.xpack.core.async.GetAsyncResultRequestTests.randomSearchId; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104838") public class AsyncStatusResponseTests extends AbstractWireSerializingTestCase { @Override @@ -266,10 +264,13 @@ public void testToXContent() throws IOException { public void testGetStatusFromStoredSearchRandomizedInputs() { boolean ccs = randomBoolean(); String searchId = randomSearchId(); - AsyncSearchResponse asyncSearchResponse = AsyncSearchResponseTests.randomAsyncSearchResponse( - searchId, - AsyncSearchResponseTests.randomSearchResponse(ccs) - ); + SearchResponse searchResponse = AsyncSearchResponseTests.randomSearchResponse(ccs); + AsyncSearchResponse asyncSearchResponse; + try { + asyncSearchResponse = AsyncSearchResponseTests.randomAsyncSearchResponse(searchId, searchResponse); + } finally { + searchResponse.decRef(); + } try { if (asyncSearchResponse.getSearchResponse() == null && asyncSearchResponse.getFailure() == null @@ -339,8 +340,12 @@ public void testGetStatusFromStoredSearchFailedShardsScenario() { new ShardSearchFailure[] { new ShardSearchFailure(new RuntimeException("foo")) }, clusters ); - - AsyncSearchResponse asyncSearchResponse = new AsyncSearchResponse(searchId, searchResponse, null, false, false, 100, 200); + AsyncSearchResponse asyncSearchResponse; + try { + asyncSearchResponse = new AsyncSearchResponse(searchId, searchResponse, null, false, false, 100, 200); + } finally { + searchResponse.decRef(); + } try { AsyncStatusResponse statusFromStoredSearch = AsyncStatusResponse.getStatusFromStoredSearch(asyncSearchResponse, 100, searchId); assertNotNull(statusFromStoredSearch); @@ -368,8 +373,12 @@ public void testGetStatusFromStoredSearchWithEmptyClustersSuccessfullyCompleted( ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); - - AsyncSearchResponse asyncSearchResponse = new AsyncSearchResponse(searchId, searchResponse, null, false, false, 100, 200); + AsyncSearchResponse asyncSearchResponse; + try { + asyncSearchResponse = new AsyncSearchResponse(searchId, searchResponse, null, false, false, 100, 200); + } finally { + searchResponse.decRef(); + } try { AsyncStatusResponse statusFromStoredSearch = AsyncStatusResponse.getStatusFromStoredSearch(asyncSearchResponse, 100, searchId); assertNotNull(statusFromStoredSearch); @@ -415,8 +424,12 @@ public void testGetStatusFromStoredSearchWithNonEmptyClustersSuccessfullyComplet ShardSearchFailure.EMPTY_ARRAY, clusters ); - - AsyncSearchResponse asyncSearchResponse = new AsyncSearchResponse(searchId, searchResponse, null, false, false, 100, 200); + AsyncSearchResponse asyncSearchResponse; + try { + asyncSearchResponse = new AsyncSearchResponse(searchId, searchResponse, null, false, false, 100, 200); + } finally { + searchResponse.decRef(); + } try { AsyncStatusResponse statusFromStoredSearch = AsyncStatusResponse.getStatusFromStoredSearch(asyncSearchResponse, 100, searchId); assertNotNull(statusFromStoredSearch); @@ -464,9 +477,13 @@ public void testGetStatusFromStoredSearchWithNonEmptyClustersStillRunning() { ShardSearchFailure.EMPTY_ARRAY, clusters ); - boolean isRunning = true; - AsyncSearchResponse asyncSearchResponse = new AsyncSearchResponse(searchId, searchResponse, null, false, isRunning, 100, 200); + AsyncSearchResponse asyncSearchResponse; + try { + asyncSearchResponse = new AsyncSearchResponse(searchId, searchResponse, null, false, isRunning, 100, 200); + } finally { + searchResponse.decRef(); + } try { AsyncStatusResponse statusFromStoredSearch = AsyncStatusResponse.getStatusFromStoredSearch(asyncSearchResponse, 100, searchId); assertNotNull(statusFromStoredSearch); diff --git a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchActionTests.java b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchActionTests.java index 2c9708a930186..fe6ed8b57d1e0 100644 --- a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchActionTests.java +++ b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchActionTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.search; -import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -30,7 +29,6 @@ import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Mockito.mock; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104838") public class RestSubmitAsyncSearchActionTests extends RestActionTestCase { private RestSubmitAsyncSearchAction action; From 2d4a49af539818b7244f140d37745c13e0346f8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Wed, 21 Feb 2024 15:06:32 +0100 Subject: [PATCH 35/49] [DOCS] Fixes get settings and update settings security API docs (#105686) * [DOCS] Fixes get settings and update settings security API docs. * [DOCS] Further edits. --- docs/reference/rest-api/security.asciidoc | 2 + .../rest-api/security/get-settings.asciidoc | 9 ++++- .../security/update-settings.asciidoc | 39 ++++++++++++------- 3 files changed, 34 insertions(+), 16 deletions(-) diff --git a/docs/reference/rest-api/security.asciidoc b/docs/reference/rest-api/security.asciidoc index 94b632490ad86..e5c42a93d34b1 100644 --- a/docs/reference/rest-api/security.asciidoc +++ b/docs/reference/rest-api/security.asciidoc @@ -12,6 +12,8 @@ Use the following APIs to perform security activities. * <> * <> * <> +* <> +* <> * <> [discrete] diff --git a/docs/reference/rest-api/security/get-settings.asciidoc b/docs/reference/rest-api/security/get-settings.asciidoc index 5c38b96903cbd..46e4e0cf529bb 100644 --- a/docs/reference/rest-api/security/get-settings.asciidoc +++ b/docs/reference/rest-api/security/get-settings.asciidoc @@ -5,6 +5,8 @@ Get Security settings ++++ +Retrieves settings for the security internal indices. + [[security-api-get-settings-prereqs]] ==== {api-prereq-title} @@ -14,11 +16,16 @@ ==== {api-description-title} This API allows a user to retrieve the user-configurable settings for the Security internal index (`.security` and associated indices). Only a subset of -the index settings — those that are user-configurable—will be shown. This includes: +the index settings — those that are user-configurable—will be shown. This +includes: - `index.auto_expand_replicas` - `index.number_of_replicas` + +[[security-api-get-settings-example]] +==== {api-examples-title} + An example of retrieving the security settings: [source,console] diff --git a/docs/reference/rest-api/security/update-settings.asciidoc b/docs/reference/rest-api/security/update-settings.asciidoc index 0ea41d86e85ed..652b722b0af48 100644 --- a/docs/reference/rest-api/security/update-settings.asciidoc +++ b/docs/reference/rest-api/security/update-settings.asciidoc @@ -5,11 +5,31 @@ Update Security settings ++++ +Updates the settings of the security internal indices. + + [[security-api-update-settings-prereqs]] ==== {api-prereq-title} * To use this API, you must have at least the `manage_security` cluster privilege. + +[[security-api-update-settings-request-body]] +==== {api-request-body-title} + +`security`:: +(Optional, object) Settings to be used for the index used for most security +configuration, including Native realm users and roles configured via the API. + +`security-tokens`:: +(Optional, object) Settings to be used for the index used to store +<>. + +`security`:: +(Optional, object) Settings to be used for the index used to store +<> information. + + [[security-api-update-settings-desc]] ==== {api-description-title} This API allows a user to modify the settings for the Security internal indices @@ -19,6 +39,10 @@ be modified. This includes: - `index.auto_expand_replicas` - `index.number_of_replicas` + +[[security-api-update-settings-example]] +==== {api-examples-title} + An example of modifying the Security settings: [source,console] @@ -43,18 +67,3 @@ The configured settings can be retrieved using the is not in use on the system, but settings are provided for it, the request will be rejected - this API does not yet support configuring the settings for these indices before they are in use. - - -==== {api-request-body-title} - -`security`:: -(Optional, object) Settings to be used for the index used for most security -configuration, including Native realm users and roles configured via the API. - -`security-tokens`:: -(Optional, object) Settings to be used for the index used to store -<>. - -`security`:: -(Optional, object) Settings to be used for the index used to store -<> information. From edf96a5212c07e2af74608b94fcc2a3781fe18a3 Mon Sep 17 00:00:00 2001 From: Michael Peterson Date: Wed, 21 Feb 2024 09:28:20 -0500 Subject: [PATCH 36/49] Add RCS1.0 security test for the ResolveCluster API (#105524) --- ...teClusterSecurityRCS1ResolveClusterIT.java | 241 ++++++++++++++++++ ...eClusterSecurityRCS2ResolveClusterIT.java} | 19 +- 2 files changed, 246 insertions(+), 14 deletions(-) create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRCS1ResolveClusterIT.java rename x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/{RemoteClusterSecurityResolveClusterIT.java => RemoteClusterSecurityRCS2ResolveClusterIT.java} (96%) diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRCS1ResolveClusterIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRCS1ResolveClusterIT.java new file mode 100644 index 0000000000000..813739a8e0d06 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRCS1ResolveClusterIT.java @@ -0,0 +1,241 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.remotecluster; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.core.Strings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.action.search.SearchResponse.LOCAL_CLUSTER_NAME_REPRESENTATION; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +/** + * Tests the _resolve/cluster API under RCS1.0 security model + */ +public class RemoteClusterSecurityRCS1ResolveClusterIT extends AbstractRemoteClusterSecurityTestCase { + + static { + fulfillingCluster = ElasticsearchCluster.local().name("fulfilling-cluster").nodes(3).apply(commonClusterConfig).build(); + + queryCluster = ElasticsearchCluster.local().name("query-cluster").apply(commonClusterConfig).build(); + } + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster); + + @SuppressWarnings("unchecked") + public void testResolveClusterUnderRCS1() throws Exception { + // Setup RCS 1.0 (basicSecurity=true) + configureRemoteCluster("my_remote_cluster", fulfillingCluster, true, randomBoolean(), randomBoolean()); + + { + // Query cluster -> add role for test user - do not give any privileges for remote_indices + var putRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + putRoleRequest.setJsonEntity(""" + { + "indices": [ + { + "names": ["local_index"], + "privileges": ["read"] + } + ] + }"""); + assertOK(adminClient().performRequest(putRoleRequest)); + + // Query cluster -> create user and assign role + var putUserRequest = new Request("PUT", "/_security/user/" + REMOTE_SEARCH_USER); + putUserRequest.setJsonEntity(""" + { + "password": "x-pack-test-password", + "roles" : ["remote_search"] + }"""); + assertOK(adminClient().performRequest(putUserRequest)); + + // Query cluster -> create test index + var indexDocRequest = new Request("POST", "/local_index/_doc?refresh=true"); + indexDocRequest.setJsonEntity("{\"local_foo\": \"local_bar\"}"); + assertOK(client().performRequest(indexDocRequest)); + + // Fulfilling cluster -> create test indices + Request bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(Strings.format(""" + { "index": { "_index": "index1" } } + { "foo": "bar" } + { "index": { "_index": "secretindex" } } + { "bar": "foo" } + """)); + assertOK(performRequestAgainstFulfillingCluster(bulkRequest)); + } + { + // TEST CASE 1: Query cluster -> try to resolve local and remote star patterns (no access to remote cluster) + Request starResolveRequest = new Request("GET", "_resolve/cluster/*,my_remote_cluster:*"); + Response response = performRequestWithRemoteSearchUser(starResolveRequest); + assertOK(response); + Map responseMap = responseAsMap(response); + assertLocalMatching(responseMap); + + Map remoteClusterResponse = (Map) responseMap.get("my_remote_cluster"); + assertThat((Boolean) remoteClusterResponse.get("connected"), equalTo(true)); + assertThat((String) remoteClusterResponse.get("error"), containsString("unauthorized for user [remote_search_user]")); + + // TEST CASE 2: Query cluster -> add user role and user on remote cluster and try resolve again + var putRoleOnRemoteClusterRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + putRoleOnRemoteClusterRequest.setJsonEntity(""" + { + "indices": [ + { + "names": ["index*"], + "privileges": ["read", "read_cross_cluster"] + } + ] + }"""); + assertOK(performRequestAgainstFulfillingCluster(putRoleOnRemoteClusterRequest)); + + var putUserOnRemoteClusterRequest = new Request("PUT", "/_security/user/" + REMOTE_SEARCH_USER); + putUserOnRemoteClusterRequest.setJsonEntity(""" + { + "password": "x-pack-test-password", + "roles" : ["remote_search"] + }"""); + assertOK(performRequestAgainstFulfillingCluster(putUserOnRemoteClusterRequest)); + + // Query cluster -> resolve local and remote with proper access + response = performRequestWithRemoteSearchUser(starResolveRequest); + assertOK(response); + responseMap = responseAsMap(response); + assertLocalMatching(responseMap); + assertRemoteMatching(responseMap); + } + { + // TEST CASE 3: Query cluster -> resolve index1 for local index without any local privilege + Request localOnly1 = new Request("GET", "_resolve/cluster/index1"); + ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(localOnly1)); + assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(403)); + assertThat( + exc.getMessage(), + containsString( + "action [indices:admin/resolve/cluster] is unauthorized for user " + + "[remote_search_user] with effective roles [remote_search] on indices [index1]" + ) + ); + } + { + // TEST CASE 4: Query cluster -> resolve local for local index without any local privilege using wildcard + Request localOnlyWildcard1 = new Request("GET", "_resolve/cluster/index1*"); + Response response = performRequestWithRemoteSearchUser(localOnlyWildcard1); + assertOK(response); + Map responseMap = responseAsMap(response); + assertMatching((Map) responseMap.get(LOCAL_CLUSTER_NAME_REPRESENTATION), false); + } + { + // TEST CASE 5: Query cluster -> resolve remote and local without permission where using wildcard 'index1*' + Request localNoPermsRemoteWithPerms = new Request("GET", "_resolve/cluster/index1*,my_remote_cluster:index1"); + Response response = performRequestWithRemoteSearchUser(localNoPermsRemoteWithPerms); + assertOK(response); + Map responseMap = responseAsMap(response); + assertMatching((Map) responseMap.get(LOCAL_CLUSTER_NAME_REPRESENTATION), false); + assertRemoteMatching(responseMap); + } + { + // TEST CASE 6: Query cluster -> resolve remote only for existing and privileged index + Request remoteOnly1 = new Request("GET", "_resolve/cluster/my_remote_cluster:index1"); + Response response = performRequestWithRemoteSearchUser(remoteOnly1); + assertOK(response); + Map responseMap = responseAsMap(response); + assertThat(responseMap.get(LOCAL_CLUSTER_NAME_REPRESENTATION), nullValue()); + assertRemoteMatching(responseMap); + } + { + // TEST CASE 7: Query cluster -> resolve remote only for existing but non-privileged index + Request remoteOnly2 = new Request("GET", "_resolve/cluster/my_remote_cluster:secretindex"); + Response response = performRequestWithRemoteSearchUser(remoteOnly2); + assertOK(response); + Map responseMap = responseAsMap(response); + assertThat(responseMap.get(LOCAL_CLUSTER_NAME_REPRESENTATION), nullValue()); + Map remoteClusterResponse = (Map) responseMap.get("my_remote_cluster"); + assertThat((Boolean) remoteClusterResponse.get("connected"), equalTo(true)); + assertThat((String) remoteClusterResponse.get("error"), containsString("unauthorized for user [remote_search_user]")); + assertThat((String) remoteClusterResponse.get("error"), containsString("on indices [secretindex]")); + } + { + // TEST CASE 8: Query cluster -> resolve remote only for non-existing and non-privileged index + Request remoteOnly3 = new Request("GET", "_resolve/cluster/my_remote_cluster:doesnotexist"); + Response response = performRequestWithRemoteSearchUser(remoteOnly3); + assertOK(response); + Map responseMap = responseAsMap(response); + assertThat(responseMap.get(LOCAL_CLUSTER_NAME_REPRESENTATION), nullValue()); + Map remoteClusterResponse = (Map) responseMap.get("my_remote_cluster"); + assertThat((Boolean) remoteClusterResponse.get("connected"), equalTo(true)); + assertThat((String) remoteClusterResponse.get("error"), containsString("unauthorized for user [remote_search_user]")); + assertThat((String) remoteClusterResponse.get("error"), containsString("on indices [doesnotexist]")); + } + { + // TEST CASE 9: Query cluster -> resolve remote only for non-existing but privileged (by index pattern) index + Request remoteOnly4 = new Request("GET", "_resolve/cluster/my_remote_cluster:index99"); + Response response = performRequestWithRemoteSearchUser(remoteOnly4); + assertOK(response); + Map responseMap = responseAsMap(response); + assertThat(responseMap.get(LOCAL_CLUSTER_NAME_REPRESENTATION), nullValue()); + Map remoteClusterResponse = (Map) responseMap.get("my_remote_cluster"); + assertThat((Boolean) remoteClusterResponse.get("connected"), equalTo(true)); + assertThat((String) remoteClusterResponse.get("error"), containsString("no such index [index99]")); + } + { + // TEST CASE 10: Query cluster -> resolve remote only for some existing/privileged, + // non-existing/privileged, existing/non-privileged + Request remoteOnly5 = new Request( + "GET", + "_resolve/cluster/my_remote_cluster:index1,my_remote_cluster:secretindex,my_remote_cluster:index99" + ); + Response response = performRequestWithRemoteSearchUser(remoteOnly5); + assertOK(response); + Map responseMap = responseAsMap(response); + assertThat(responseMap.get(LOCAL_CLUSTER_NAME_REPRESENTATION), nullValue()); + Map remoteClusterResponse = (Map) responseMap.get("my_remote_cluster"); + assertThat((Boolean) remoteClusterResponse.get("connected"), equalTo(true)); + assertThat((String) remoteClusterResponse.get("error"), containsString("unauthorized for user [remote_search_user]")); + assertThat((String) remoteClusterResponse.get("error"), containsString("on indices [secretindex]")); + } + } + + private Response performRequestWithRemoteSearchUser(final Request request) throws IOException { + request.setOptions( + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", headerFromRandomAuthMethod(REMOTE_SEARCH_USER, PASS)) + ); + return client().performRequest(request); + } + + @SuppressWarnings("unchecked") + private void assertLocalMatching(Map responseMap) { + assertMatching((Map) responseMap.get(LOCAL_CLUSTER_NAME_REPRESENTATION), true); + } + + @SuppressWarnings("unchecked") + private void assertRemoteMatching(Map responseMap) { + assertMatching((Map) responseMap.get("my_remote_cluster"), true); + } + + private void assertMatching(Map perClusterResponse, boolean matching) { + assertThat((Boolean) perClusterResponse.get("connected"), equalTo(true)); + assertThat((Boolean) perClusterResponse.get("matching_indices"), equalTo(matching)); + assertThat(perClusterResponse.get("version"), notNullValue()); + } +} diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityResolveClusterIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRCS2ResolveClusterIT.java similarity index 96% rename from x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityResolveClusterIT.java rename to x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRCS2ResolveClusterIT.java index da6d930371bc9..a3bc56dafce98 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityResolveClusterIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRCS2ResolveClusterIT.java @@ -36,7 +36,10 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class RemoteClusterSecurityResolveClusterIT extends AbstractRemoteClusterSecurityTestCase { +/** + * Tests the _resolve/cluster API under RCS2.0 security model + */ +public class RemoteClusterSecurityRCS2ResolveClusterIT extends AbstractRemoteClusterSecurityTestCase { private static final AtomicReference> API_KEY_MAP_REF = new AtomicReference<>(); private static final AtomicReference> REST_API_KEY_MAP_REF = new AtomicReference<>(); @@ -168,7 +171,6 @@ public void testResolveCluster() throws Exception { """)); assertOK(performRequestAgainstFulfillingCluster(bulkRequest)); } - { // TEST CASE 1: Query cluster -> try to resolve local and remote star patterns (no access to remote cluster) final Request starResolveRequest = new Request("GET", "_resolve/cluster/*,my_remote_cluster:*"); @@ -212,9 +214,8 @@ public void testResolveCluster() throws Exception { assertLocalMatching(responseMap); assertRemoteMatching(responseMap); } - - // TEST CASE 3: Query cluster -> resolve index1 for local index without any local privilege { + // TEST CASE 3: Query cluster -> resolve index1 for local index without any local privilege final Request localOnly1 = new Request("GET", "_resolve/cluster/index1"); ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(localOnly1)); assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(403)); @@ -226,7 +227,6 @@ public void testResolveCluster() throws Exception { ) ); } - { // TEST CASE 4: Query cluster -> resolve local for local index without any local privilege using wildcard final Request localOnlyWildcard1 = new Request("GET", "_resolve/cluster/index1*"); @@ -235,7 +235,6 @@ public void testResolveCluster() throws Exception { Map responseMap = responseAsMap(response); assertMatching((Map) responseMap.get(LOCAL_CLUSTER_NAME_REPRESENTATION), false); } - { // TEST CASE 5: Query cluster -> resolve remote and local without permission where using wildcard 'index1*' final Request localNoPermsRemoteWithPerms = new Request("GET", "_resolve/cluster/index1*,my_remote_cluster:index1"); @@ -245,7 +244,6 @@ public void testResolveCluster() throws Exception { assertMatching((Map) responseMap.get(LOCAL_CLUSTER_NAME_REPRESENTATION), false); assertRemoteMatching(responseMap); } - { // TEST CASE 6: Query cluster -> resolve remote only for existing and privileged index final Request remoteOnly1 = new Request("GET", "_resolve/cluster/my_remote_cluster:index1"); @@ -255,7 +253,6 @@ public void testResolveCluster() throws Exception { assertThat(responseMap.get(LOCAL_CLUSTER_NAME_REPRESENTATION), nullValue()); assertRemoteMatching(responseMap); } - { // TEST CASE 7: Query cluster -> resolve remote only for existing but non-privileged index final Request remoteOnly2 = new Request("GET", "_resolve/cluster/my_remote_cluster:secretindex"); @@ -266,10 +263,8 @@ public void testResolveCluster() throws Exception { Map remoteClusterResponse = (Map) responseMap.get("my_remote_cluster"); assertThat((Boolean) remoteClusterResponse.get("connected"), equalTo(true)); assertThat((String) remoteClusterResponse.get("error"), containsString("is unauthorized for user")); - assertThat((String) remoteClusterResponse.get("error"), containsString("with assigned roles [remote_search]")); assertThat((String) remoteClusterResponse.get("error"), containsString("on indices [secretindex]")); } - { // TEST CASE 8: Query cluster -> resolve remote only for non-existing and non-privileged index final Request remoteOnly3 = new Request("GET", "_resolve/cluster/my_remote_cluster:doesnotexist"); @@ -280,10 +275,8 @@ public void testResolveCluster() throws Exception { Map remoteClusterResponse = (Map) responseMap.get("my_remote_cluster"); assertThat((Boolean) remoteClusterResponse.get("connected"), equalTo(true)); assertThat((String) remoteClusterResponse.get("error"), containsString("is unauthorized for user")); - assertThat((String) remoteClusterResponse.get("error"), containsString("with assigned roles [remote_search]")); assertThat((String) remoteClusterResponse.get("error"), containsString("on indices [doesnotexist]")); } - { // TEST CASE 9: Query cluster -> resolve remote only for non-existing but privileged (by index pattern) index final Request remoteOnly4 = new Request("GET", "_resolve/cluster/my_remote_cluster:index99"); @@ -296,7 +289,6 @@ public void testResolveCluster() throws Exception { assertThat((Boolean) remoteClusterResponse.get("skip_unavailable"), equalTo(false)); assertThat((String) remoteClusterResponse.get("error"), containsString("no such index [index99]")); } - { // TEST CASE 10: Query cluster -> resolve remote only for some existing/privileged, // non-existing/privileged, existing/non-privileged @@ -311,7 +303,6 @@ public void testResolveCluster() throws Exception { Map remoteClusterResponse = (Map) responseMap.get("my_remote_cluster"); assertThat((Boolean) remoteClusterResponse.get("connected"), equalTo(true)); assertThat((String) remoteClusterResponse.get("error"), containsString("is unauthorized for user")); - assertThat((String) remoteClusterResponse.get("error"), containsString("with assigned roles [remote_search]")); assertThat((String) remoteClusterResponse.get("error"), containsString("on indices [secretindex]")); } } From 5f508a1d16ddd1d9c3a80bd120873c8e0d704ab3 Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Wed, 21 Feb 2024 10:02:56 -0500 Subject: [PATCH 37/49] Fix transport bug handling errors in rule query (#105667) --- .../test/entsearch/260_rule_query_search.yml | 38 +++++++++++++++ .../application/rules/RuleQueryBuilder.java | 47 ++++++------------- 2 files changed, 53 insertions(+), 32 deletions(-) diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/260_rule_query_search.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/260_rule_query_search.yml index f67c955126235..edd9d7c2e140d 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/260_rule_query_search.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/260_rule_query_search.yml @@ -69,6 +69,44 @@ setup: - 'doc3' +--- +"Perform a rule query specifying a ruleset that does not exist": + - skip: + version: " - 8.12.99" + reason: Bugfix that was broken in previous versions + + - do: + catch: /resource_not_found_exception/ + search: + body: + query: + rule_query: + organic: + query_string: + default_field: text + query: search + match_criteria: + foo: bar + ruleset_id: nonexistent-ruleset + +--- +"Perform a rule query with malformed rule": + - skip: + version: " - 8.12.99" + reason: Bugfix that was broken in previous versions + + - do: + catch: bad_request + search: + body: + query: + rule_query: + organic: + query_string: + default_field: text + query: search + ruleset_id: test-ruleset + --- "Perform a rule query with an ID match": diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java index 11d2945a97354..bc45b24027e0e 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java @@ -8,21 +8,17 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.client.internal.OriginSettingClient; +import org.elasticsearch.action.get.TransportGetAction; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.HeaderWarning; -import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; @@ -45,6 +41,7 @@ import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xpack.core.ClientHelper.ENT_SEARCH_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilder.MAX_NUM_PINNED_HITS; /** @@ -207,36 +204,22 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) { AppliedQueryRules appliedRules = new AppliedQueryRules(); queryRewriteContext.registerAsyncAction((client, listener) -> { - Client clientWithOrigin = new OriginSettingClient(client, ENT_SEARCH_ORIGIN); - clientWithOrigin.get(getRequest, new ActionListener<>() { - @Override - public void onResponse(GetResponse getResponse) { - if (getResponse.isExists() == false) { - throw new ResourceNotFoundException("query ruleset " + rulesetId + " not found"); - } - QueryRuleset queryRuleset = QueryRuleset.fromXContentBytes( - rulesetId, - getResponse.getSourceAsBytesRef(), - XContentType.JSON - ); - for (QueryRule rule : queryRuleset.rules()) { - rule.applyRule(appliedRules, matchCriteria); - } - pinnedIdsSetOnce.set(appliedRules.pinnedIds().stream().distinct().toList()); - pinnedDocsSetOnce.set(appliedRules.pinnedDocs().stream().distinct().toList()); - listener.onResponse(null); + executeAsyncWithOrigin(client, ENT_SEARCH_ORIGIN, TransportGetAction.TYPE, getRequest, ActionListener.wrap(getResponse -> { + + if (getResponse.isExists() == false) { + listener.onFailure(new ResourceNotFoundException("query ruleset " + rulesetId + " not found")); + return; } - @Override - public void onFailure(Exception e) { - Throwable cause = ExceptionsHelper.unwrapCause(e); - if (cause instanceof IndexNotFoundException) { - listener.onFailure(new ResourceNotFoundException("query ruleset " + rulesetId + " not found")); - } else { - listener.onFailure(e); - } + QueryRuleset queryRuleset = QueryRuleset.fromXContentBytes(rulesetId, getResponse.getSourceAsBytesRef(), XContentType.JSON); + for (QueryRule rule : queryRuleset.rules()) { + rule.applyRule(appliedRules, matchCriteria); } - }); + pinnedIdsSetOnce.set(appliedRules.pinnedIds().stream().distinct().toList()); + pinnedDocsSetOnce.set(appliedRules.pinnedDocs().stream().distinct().toList()); + listener.onResponse(null); + + }, listener::onFailure)); }); return new RuleQueryBuilder(organicQuery, matchCriteria, this.rulesetId, null, null, pinnedIdsSetOnce::get, pinnedDocsSetOnce::get) From 86f4b1819412d6862c1aa5b874707cc90c8460b5 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Wed, 21 Feb 2024 16:07:05 +0100 Subject: [PATCH 38/49] Reduce InternalGeoGrid in a streaming fashion (#105651) --- .../bucket/geogrid/InternalGeoGrid.java | 47 ++++++++----------- 1 file changed, 19 insertions(+), 28 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java index c040e50da1aa6..bbf92cbf679d0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java @@ -17,11 +17,11 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregatorsReducer; import org.elasticsearch.search.aggregations.support.SamplingContext; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -81,34 +81,35 @@ public List getBuckets() { protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) { return new AggregatorReducer() { - LongObjectPagedHashMap> buckets = null; + final LongObjectPagedHashMap bucketsReducer = new LongObjectPagedHashMap<>( + size, + reduceContext.bigArrays() + ); @Override public void accept(InternalAggregation aggregation) { @SuppressWarnings("unchecked") - InternalGeoGrid grid = (InternalGeoGrid) aggregation; - if (buckets == null) { - buckets = new LongObjectPagedHashMap<>(grid.buckets.size(), reduceContext.bigArrays()); - } - for (InternalGeoGridBucket bucket : grid.buckets) { - List existingBuckets = buckets.get(bucket.hashAsLong()); - if (existingBuckets == null) { - existingBuckets = new ArrayList<>(size); - buckets.put(bucket.hashAsLong(), existingBuckets); + final InternalGeoGrid grid = (InternalGeoGrid) aggregation; + for (InternalGeoGridBucket bucket : grid.getBuckets()) { + MultiBucketAggregatorsReducer reducer = bucketsReducer.get(bucket.hashAsLong()); + if (reducer == null) { + reducer = new MultiBucketAggregatorsReducer(reduceContext, size); + bucketsReducer.put(bucket.hashAsLong(), reducer); } - existingBuckets.add(bucket); + reducer.accept(bucket); } } @Override public InternalAggregation get() { final int size = Math.toIntExact( - reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size()) + reduceContext.isFinalReduce() == false ? bucketsReducer.size() : Math.min(requiredSize, bucketsReducer.size()) ); final BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); - for (LongObjectPagedHashMap.Cursor> cursor : buckets) { - ordered.insertWithOverflow(reduceBucket(cursor.value, reduceContext)); - } + bucketsReducer.iterator().forEachRemaining(entry -> { + InternalGeoGridBucket bucket = createBucket(entry.key, entry.value.getDocCount(), entry.value.get()); + ordered.insertWithOverflow(bucket); + }); final InternalGeoGridBucket[] list = new InternalGeoGridBucket[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; i--) { list[i] = ordered.pop(); @@ -119,7 +120,8 @@ public InternalAggregation get() { @Override public void close() { - Releasables.close(buckets); + bucketsReducer.iterator().forEachRemaining(r -> Releasables.close(r.value)); + Releasables.close(bucketsReducer); } }; } @@ -142,17 +144,6 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) { ); } - private InternalGeoGridBucket reduceBucket(List buckets, AggregationReduceContext context) { - assert buckets.isEmpty() == false; - long docCount = 0; - for (InternalGeoGridBucket bucket : buckets) { - docCount += bucket.docCount; - } - final List aggregations = new BucketAggregationList<>(buckets); - final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); - return createBucket(buckets.get(0).hashAsLong, docCount, aggs); - } - protected abstract B createBucket(long hashAsLong, long docCount, InternalAggregations aggregations); @Override From 0da52203723cb75755320acd83fc0aeee8e3834d Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Wed, 21 Feb 2024 16:17:34 +0100 Subject: [PATCH 39/49] Call real memory circuit breaker in BucketsAggregator#collectBucket instead of BucketsAggregator#collectExistingBucket (#105668) --- .../aggregations/bucket/BucketsAggregator.java | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java index fc2e7f04f2c59..b55d98685ab54 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java @@ -78,13 +78,6 @@ public final void grow(long maxBucketOrd) { */ public final void collectBucket(LeafBucketCollector subCollector, int doc, long bucketOrd) throws IOException { grow(bucketOrd + 1); - collectExistingBucket(subCollector, doc, bucketOrd); - } - - /** - * Same as {@link #collectBucket(LeafBucketCollector, int, long)}, but doesn't check if the docCounts needs to be re-sized. - */ - public final void collectExistingBucket(LeafBucketCollector subCollector, int doc, long bucketOrd) throws IOException { int docCount = docCountProvider.getDocCount(doc); if (docCounts.increment(bucketOrd, docCount) == docCount) { // We call the circuit breaker the time to time in order to give it a chance to check available @@ -97,6 +90,14 @@ public final void collectExistingBucket(LeafBucketCollector subCollector, int do subCollector.collect(doc, bucketOrd); } + /** + * Same as {@link #collectBucket(LeafBucketCollector, int, long)}, but doesn't check if the docCounts needs to be re-sized. + */ + public final void collectExistingBucket(LeafBucketCollector subCollector, int doc, long bucketOrd) throws IOException { + docCounts.increment(bucketOrd, docCountProvider.getDocCount(doc)); + subCollector.collect(doc, bucketOrd); + } + /** * Merge doc counts. If the {@linkplain Aggregator} is delayed then you must also call * {@link BestBucketsDeferringCollector#rewriteBuckets(LongUnaryOperator)} to merge the delayed buckets. From 280fa408bedb2243670669e1d8e999a44e9dfa51 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 21 Feb 2024 17:23:10 +0100 Subject: [PATCH 40/49] Improve EarlyDeprecationindexingIT test reliability (#105696) this test intends to test the bulkProcessor2's request consumer (see DeprecationIndexingComponent#getBulkProcessor) scheduling requests before the startup is completed (flush is enabled). To verify this behaviour the flush has to happen before the templates are loaded. To test this reliably the flush interval in the test should be as small as possible (not hardcoded 5s as of now) This commit introduces a setting (not meant to be exposed/documented) to allow for the flush interval to be configured. It also adds additional trace logging to help with troubleshooting. relates #104716 --- .../qa/early-deprecation-rest/build.gradle | 5 ++++- .../xpack/deprecation/Deprecation.java | 8 +++++++- .../logging/DeprecationIndexingAppender.java | 19 ++++++++++++++++--- .../logging/DeprecationIndexingComponent.java | 14 +++++++++++++- 4 files changed, 40 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/deprecation/qa/early-deprecation-rest/build.gradle b/x-pack/plugin/deprecation/qa/early-deprecation-rest/build.gradle index ab09c31d6f80c..2d8859bdcea3d 100644 --- a/x-pack/plugin/deprecation/qa/early-deprecation-rest/build.gradle +++ b/x-pack/plugin/deprecation/qa/early-deprecation-rest/build.gradle @@ -27,9 +27,12 @@ restResources { testClusters.configureEach { testDistribution = 'DEFAULT' - setting 'cluster.deprecation_indexing.enabled', 'true' setting 'xpack.security.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' + setting 'cluster.deprecation_indexing.enabled', 'true' + setting 'cluster.deprecation_indexing.flush_interval', '1ms' + setting 'logger.org.elasticsearch.xpack.deprecation','TRACE' + setting 'logger.org.elasticsearch.xpack.deprecation.logging','TRACE' } // Test clusters run with security disabled diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/Deprecation.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/Deprecation.java index 4e2c9da25e78b..85b7c89e7cb85 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/Deprecation.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/Deprecation.java @@ -34,6 +34,7 @@ import java.util.function.Supplier; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.SKIP_DEPRECATIONS_SETTING; +import static org.elasticsearch.xpack.deprecation.logging.DeprecationIndexingComponent.DEPRECATION_INDEXING_FLUSH_INTERVAL; /** * The plugin class for the Deprecation API @@ -110,6 +111,11 @@ public Collection createComponents(PluginServices services) { @Override public List> getSettings() { - return List.of(USE_X_OPAQUE_ID_IN_FILTERING, WRITE_DEPRECATION_LOGS_TO_INDEX, SKIP_DEPRECATIONS_SETTING); + return List.of( + USE_X_OPAQUE_ID_IN_FILTERING, + WRITE_DEPRECATION_LOGS_TO_INDEX, + SKIP_DEPRECATIONS_SETTING, + DEPRECATION_INDEXING_FLUSH_INTERVAL + ); } } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingAppender.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingAppender.java index edd9a85862b01..22637b1640b51 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingAppender.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingAppender.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.deprecation.logging; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.Appender; import org.apache.logging.log4j.core.Core; import org.apache.logging.log4j.core.Filter; @@ -16,6 +18,7 @@ import org.apache.logging.log4j.core.config.plugins.Plugin; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.core.Strings; import org.elasticsearch.xcontent.XContentType; import java.util.Objects; @@ -28,6 +31,7 @@ */ @Plugin(name = "DeprecationIndexingAppender", category = Core.CATEGORY_NAME, elementType = Appender.ELEMENT_TYPE) public class DeprecationIndexingAppender extends AbstractAppender { + private static final Logger logger = LogManager.getLogger(DeprecationIndexingAppender.class); public static final String DEPRECATION_MESSAGES_DATA_STREAM = ".logs-deprecation.elasticsearch-default"; private final Consumer requestConsumer; @@ -40,9 +44,10 @@ public class DeprecationIndexingAppender extends AbstractAppender { /** * Creates a new appender. - * @param name the appender's name - * @param filter a filter to apply directly on the appender - * @param layout the layout to use for formatting message. It must return a JSON string. + * + * @param name the appender's name + * @param filter a filter to apply directly on the appender + * @param layout the layout to use for formatting message. It must return a JSON string. * @param requestConsumer a callback to handle the actual indexing of the log message. */ public DeprecationIndexingAppender(String name, Filter filter, Layout layout, Consumer requestConsumer) { @@ -56,6 +61,13 @@ public DeprecationIndexingAppender(String name, Filter filter, Layout la */ @Override public void append(LogEvent event) { + logger.trace( + () -> Strings.format( + "Received deprecation log event. Appender is %s. message = %s", + isEnabled ? "enabled" : "disabled", + event.getMessage().getFormattedMessage() + ) + ); if (this.isEnabled == false) { return; } @@ -71,6 +83,7 @@ public void append(LogEvent event) { /** * Sets whether this appender is enabled or disabled. When disabled, the appender will * not perform indexing operations. + * * @param enabled the enabled status of the appender. */ public void setEnabled(boolean enabled) { diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingComponent.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingComponent.java index 6a59a6832c91f..29041b0c58434 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingComponent.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingComponent.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.logging.ECSJsonLayout; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.RateLimitingFilter; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.core.ClientHelper; @@ -46,6 +47,13 @@ * It also starts and stops the appender */ public class DeprecationIndexingComponent extends AbstractLifecycleComponent implements ClusterStateListener { + + public static final Setting DEPRECATION_INDEXING_FLUSH_INTERVAL = Setting.timeSetting( + "cluster.deprecation_indexing.flush_interval", + TimeValue.timeValueSeconds(5), + Setting.Property.NodeScope + ); + private static final Logger logger = LogManager.getLogger(DeprecationIndexingComponent.class); private final DeprecationIndexingAppender appender; @@ -190,6 +198,7 @@ public void enableDeprecationLogIndexing(boolean newEnabled) { * @return an initialised bulk processor */ private BulkProcessor2 getBulkProcessor(Client client, Settings settings) { + TimeValue flushInterval = DEPRECATION_INDEXING_FLUSH_INTERVAL.get(settings); BulkProcessor2.Listener listener = new DeprecationBulkListener(); return BulkProcessor2.builder((bulkRequest, actionListener) -> { /* @@ -198,13 +207,16 @@ private BulkProcessor2 getBulkProcessor(Client client, Settings settings) { * in-flight-bytes limit has been exceeded. This means that we don't have to worry about bounding pendingRequestsBuffer. */ if (flushEnabled.get()) { + logger.trace("Flush is enabled, sending a bulk request"); client.bulk(bulkRequest, actionListener); flushBuffer(); // just in case something was missed after the first flush } else { + logger.trace("Flush is disabled, scheduling a bulk request"); + // this is an unbounded queue, so the entry will always be accepted pendingRequestsBuffer.offer(() -> client.bulk(bulkRequest, actionListener)); } - }, listener, client.threadPool()).setMaxNumberOfRetries(3).setFlushInterval(TimeValue.timeValueSeconds(5)).build(); + }, listener, client.threadPool()).setMaxNumberOfRetries(3).setFlushInterval(flushInterval).build(); } private static class DeprecationBulkListener implements BulkProcessor2.Listener { From 6b50b6ddf9c0f0401dc97781eaf5e167e2e0f543 Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Wed, 21 Feb 2024 17:45:51 +0100 Subject: [PATCH 41/49] Block updates to log level for restricted loggers if less specific than INFO (#105020) To prevent leaking sensitive information such as credentials and keys in logs, this commit prevents configuring some restricted loggers (currently `org.apache.http` and `com.amazonaws.request`) at high verbosity unless the NetworkTraceFlag (`es.insecure_network_trace_enabled`) is enabled. --- docs/reference/setup/logging-config.asciidoc | 6 +- .../snapshot-restore/repository-s3.asciidoc | 15 ++- .../logging/EvilLoggerConfigurationTests.java | 9 +- qa/restricted-loggers/build.gradle | 18 +++ .../common/logging/LoggersTests.java | 110 +++++++++++++++++ server/build.gradle | 2 +- .../ClusterUpdateSettingsRequest.java | 8 ++ .../elasticsearch/common/logging/Loggers.java | 112 ++++++++++++++++-- .../common/logging/LoggersTests.java | 74 ++++++++++++ x-pack/plugin/security/build.gradle | 4 + .../oidc/OpenIdConnectAuthenticatorTests.java | 1 + .../testkit/S3SnapshotRepoTestKitIT.java | 2 + 12 files changed, 343 insertions(+), 18 deletions(-) create mode 100644 qa/restricted-loggers/build.gradle create mode 100644 qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index 0ce2b8f1bfb59..69fa086d67673 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -150,7 +150,9 @@ update settings API>> to change the related logger's log level. Each logger accepts Log4j 2's built-in log levels, from least to most verbose: `OFF`, `FATAL`, `ERROR`, `WARN`, `INFO`, `DEBUG`, and `TRACE`. The default log level is `INFO`. Messages logged at higher verbosity levels (`DEBUG` and `TRACE`) are -only intended for expert use. +only intended for expert use. To prevent leaking sensitive information in logs, +{es} will reject setting certain loggers to higher verbosity levels unless +<> is enabled. [source,console] ---- @@ -227,7 +229,7 @@ to `OFF` in `log4j2.properties` : ---- logger.deprecation.level = OFF ---- -Alternatively, you can change the logging level dynamically: +Alternatively, you can change the logging level dynamically: [source,console] ---- diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index b01f7322f9834..0c79793ee6c5a 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -564,7 +564,15 @@ is usually simplest to collect these logs and provide them to the supplier of your storage system for further analysis. If the incompatibility is not clear from the logs emitted by the storage system, configure {es} to log every request it makes to the S3 API by <> of the `com.amazonaws.request` logger to `DEBUG`: +logging level>> of the `com.amazonaws.request` logger to `DEBUG`. + +To prevent leaking sensitive information such as credentials and keys in logs, +{es} rejects configuring this logger at high verbosity unless +<> is enabled. +To do so, you must explicitly enable it on each node by setting the system +property `es.insecure_network_trace_enabled` to `true`. + +Once enabled, you can configure the `com.amazonaws.request` logger: [source,console] ---- @@ -585,8 +593,9 @@ https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/java-dg-logging.html documentation for further information, including details about other loggers that can be used to obtain even more verbose logs. When you have finished collecting the logs needed by your supplier, set the logger settings back to -`null` to return to the default logging configuration. See <> -and <> for more information. +`null` to return to the default logging configuration and disable insecure network +trace logging again. See <> and <> for +more information. [[repository-s3-linearizable-registers]] ==== Linearizable register implementation diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java index c0b52c80d89a9..db531026dbad5 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java @@ -26,6 +26,7 @@ import java.util.Map; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.notNullValue; @@ -144,7 +145,13 @@ public void testLoggingLevelsFromSettings() throws IOException, UserException { final LoggerContext ctx = (LoggerContext) LogManager.getContext(false); final Configuration config = ctx.getConfiguration(); final Map loggerConfigs = config.getLoggers(); - assertThat(loggerConfigs.size(), equalTo(5)); + + if (rootLevel.isMoreSpecificThan(Level.INFO)) { + assertThat(loggerConfigs.size(), equalTo(5)); + } else { + // below INFO restricted loggers will be set in addition + assertThat(loggerConfigs.size(), greaterThan(5)); + } assertThat(loggerConfigs, hasKey("")); assertThat(loggerConfigs.get("").getLevel(), equalTo(rootLevel)); assertThat(loggerConfigs, hasKey("foo")); diff --git a/qa/restricted-loggers/build.gradle b/qa/restricted-loggers/build.gradle new file mode 100644 index 0000000000000..f08f886d52888 --- /dev/null +++ b/qa/restricted-loggers/build.gradle @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testImplementation project(":test:framework") +} + +tasks.named("test").configure { + // do not enable TRACE_ENABLED + systemProperties.remove('es.insecure_network_trace_enabled') +} diff --git a/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java b/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java new file mode 100644 index 0000000000000..d8229b2401290 --- /dev/null +++ b/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.logging.Loggers.checkRestrictedLoggers; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +public class LoggersTests extends ESTestCase { + + public void testClusterUpdateSettingsRequestValidationForLoggers() { + assertThat(Loggers.RESTRICTED_LOGGERS, hasSize(greaterThan(0))); + + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + for (String logger : Loggers.RESTRICTED_LOGGERS) { + var validation = request.persistentSettings(Map.of("logger." + logger, org.elasticsearch.logging.Level.DEBUG)).validate(); + assertNotNull(validation); + assertThat(validation.validationErrors(), contains("Level [DEBUG] is not permitted for logger [" + logger + "]")); + // INFO is permitted + assertNull(request.persistentSettings(Map.of("logger." + logger, org.elasticsearch.logging.Level.INFO)).validate()); + } + } + + public void testCheckRestrictedLoggers() { + assertThat(Loggers.RESTRICTED_LOGGERS, hasSize(greaterThan(0))); + + Settings settings; + for (String restricted : Loggers.RESTRICTED_LOGGERS) { + for (String suffix : List.of("", ".xyz")) { + String logger = restricted + suffix; + for (Level level : List.of(Level.ALL, Level.TRACE, Level.DEBUG)) { + settings = Settings.builder().put("logger." + logger, level).build(); + List errors = checkRestrictedLoggers(settings); + assertThat(errors, contains("Level [" + level + "] is not permitted for logger [" + logger + "]")); + } + for (Level level : List.of(Level.ERROR, Level.WARN, Level.INFO)) { + settings = Settings.builder().put("logger." + logger, level).build(); + assertThat(checkRestrictedLoggers(settings), hasSize(0)); + } + + settings = Settings.builder().put("logger." + logger, "INVALID").build(); + assertThat(checkRestrictedLoggers(settings), hasSize(0)); + + settings = Settings.builder().put("logger." + logger, (String) null).build(); + assertThat(checkRestrictedLoggers(settings), hasSize(0)); + } + } + } + + public void testSetLevelWithRestrictions() { + assertThat(Loggers.RESTRICTED_LOGGERS, hasSize(greaterThan(0))); + + for (String restricted : Loggers.RESTRICTED_LOGGERS) { + + // 'org.apache.http' is an example of a restricted logger, + // a restricted component logger would be `org.apache.http.client.HttpClient` for instance, + // and the parent logger is `org.apache`. + Logger restrictedLogger = LogManager.getLogger(restricted); + Logger restrictedComponent = LogManager.getLogger(restricted + ".component"); + Logger parentLogger = LogManager.getLogger(restricted.substring(0, restricted.lastIndexOf('.'))); + + Loggers.setLevel(restrictedLogger, Level.INFO); + assertHasINFO(restrictedLogger, restrictedComponent); + + for (Logger log : List.of(restrictedComponent, restrictedLogger)) { + // DEBUG is rejected due to restriction + Loggers.setLevel(log, Level.DEBUG); + assertHasINFO(restrictedComponent, restrictedLogger); + } + + // OK for parent `org.apache`, but restriction is enforced for restricted descendants + Loggers.setLevel(parentLogger, Level.DEBUG); + assertEquals(Level.DEBUG, parentLogger.getLevel()); + assertHasINFO(restrictedComponent, restrictedLogger); + + // Inheriting DEBUG of parent `org.apache` is rejected + Loggers.setLevel(restrictedLogger, (Level) null); + assertHasINFO(restrictedComponent, restrictedLogger); + + // DEBUG of root logger isn't propagated to restricted loggers + Loggers.setLevel(LogManager.getRootLogger(), Level.DEBUG); + assertEquals(Level.DEBUG, LogManager.getRootLogger().getLevel()); + assertHasINFO(restrictedComponent, restrictedLogger); + } + } + + private static void assertHasINFO(Logger... loggers) { + for (Logger log : loggers) { + assertThat("Unexpected log level for [" + log.getName() + "]", log.getLevel(), is(Level.INFO)); + } + } +} diff --git a/server/build.gradle b/server/build.gradle index 9ae223de1748e..8c5c1735c13c8 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -152,7 +152,7 @@ if (BuildParams.isSnapshotBuild() == false) { } tasks.named("test").configure { - systemProperty 'es.insecure_network_trace_enabled', 'true' + systemProperty 'es.insecure_network_trace_enabled', 'true' } tasks.named("thirdPartyAudit").configure { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index 23348716ffcca..5b49a41ed9476 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -62,6 +63,13 @@ public ActionRequestValidationException validate() { if (transientSettings.isEmpty() && persistentSettings.isEmpty()) { validationException = addValidationError("no settings to update", validationException); } + // for bwc we have to reject logger settings on the REST level instead of using a validator + for (String error : Loggers.checkRestrictedLoggers(transientSettings)) { + validationException = addValidationError(error, validationException); + } + for (String error : Loggers.checkRestrictedLoggers(persistentSettings)) { + validationException = addValidationError(error, validationException); + } return validationException; } diff --git a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java b/server/src/main/java/org/elasticsearch/common/logging/Loggers.java index bf0f7c49c80fb..23511fcbe0f72 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java +++ b/server/src/main/java/org/elasticsearch/common/logging/Loggers.java @@ -16,11 +16,17 @@ import org.apache.logging.log4j.core.config.Configuration; import org.apache.logging.log4j.core.config.Configurator; import org.apache.logging.log4j.core.config.LoggerConfig; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.transport.NetworkTraceFlag; +import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.stream.Stream; @@ -29,8 +35,18 @@ */ public class Loggers { + private Loggers() {}; + public static final String SPACE = " "; + /** + * Restricted loggers can't be set to a level less specific than INFO. + * For some loggers this might be permitted if {@link NetworkTraceFlag#TRACE_ENABLED} is enabled. + */ + static final List RESTRICTED_LOGGERS = NetworkTraceFlag.TRACE_ENABLED + ? Collections.emptyList() + : List.of("org.apache.http", "com.amazonaws.request"); + public static final Setting LOG_DEFAULT_LEVEL_SETTING = new Setting<>( "logger.level", Level.INFO.name(), @@ -42,6 +58,30 @@ public class Loggers { (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Setting.Property.Dynamic, Setting.Property.NodeScope) ); + public static List checkRestrictedLoggers(Settings settings) { + return checkRestrictedLoggers(settings, RESTRICTED_LOGGERS); + } + + // visible for testing only + static List checkRestrictedLoggers(Settings settings, List restrictions) { + List errors = null; + for (String key : settings.keySet()) { + if (LOG_LEVEL_SETTING.match(key)) { + Level level = Level.toLevel(settings.get(key), null); + if (level != null) { + String logger = key.substring("logger.".length()); + if (level.intLevel() > Level.INFO.intLevel() && restrictions.stream().anyMatch(r -> isSameOrDescendantOf(logger, r))) { + if (errors == null) { + errors = new ArrayList<>(2); + } + errors.add(Strings.format("Level [%s] is not permitted for logger [%s]", level, logger)); + } + } + } + } + return errors == null ? Collections.emptyList() : errors; + } + public static Logger getLogger(Class clazz, ShardId shardId, String... prefixes) { return getLogger( clazz, @@ -100,33 +140,83 @@ private static String formatPrefix(String... prefixes) { * level. */ public static void setLevel(Logger logger, String level) { - final Level l; - if (level == null) { - l = null; - } else { - l = Level.valueOf(level); - } - setLevel(logger, l); + setLevel(logger, level == null ? null : Level.valueOf(level), RESTRICTED_LOGGERS); } + /** + * Set the level of the logger. If the new level is null, the logger will inherit it's level from its nearest ancestor with a non-null + * level. + */ public static void setLevel(Logger logger, Level level) { - if (LogManager.ROOT_LOGGER_NAME.equals(logger.getName()) == false) { - Configurator.setLevel(logger.getName(), level); - } else { + setLevel(logger, level, RESTRICTED_LOGGERS); + } + + // visible for testing only + static void setLevel(Logger logger, Level level, List restrictions) { + // If configuring an ancestor / root, the restriction has to be explicitly set afterward. + boolean setRestriction = false; + + if (isRootLogger(logger.getName())) { + assert level != null : "Log level is required when configuring the root logger"; final LoggerContext ctx = LoggerContext.getContext(false); final Configuration config = ctx.getConfiguration(); final LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName()); loggerConfig.setLevel(level); ctx.updateLoggers(); + setRestriction = level.intLevel() > Level.INFO.intLevel(); + } else { + Level actual = level != null ? level : parentLoggerLevel(logger); + if (actual.intLevel() > Level.INFO.intLevel()) { + for (String restricted : restrictions) { + if (isSameOrDescendantOf(logger.getName(), restricted)) { + LogManager.getLogger(Loggers.class) + .warn("Level [{}/{}] not permitted for logger [{}], skipping.", level, actual, logger.getName()); + return; + } + if (isDescendantOf(restricted, logger.getName())) { + setRestriction = true; + } + } + } + Configurator.setLevel(logger.getName(), level); } // we have to descend the hierarchy final LoggerContext ctx = LoggerContext.getContext(false); for (final LoggerConfig loggerConfig : ctx.getConfiguration().getLoggers().values()) { - if (LogManager.ROOT_LOGGER_NAME.equals(logger.getName()) || loggerConfig.getName().startsWith(logger.getName() + ".")) { + if (isDescendantOf(loggerConfig.getName(), logger.getName())) { Configurator.setLevel(loggerConfig.getName(), level); } } + + if (setRestriction) { + // if necessary, after setting the level of an ancestor, enforce restriction again + for (String restricted : restrictions) { + if (isDescendantOf(restricted, logger.getName())) { + setLevel(LogManager.getLogger(restricted), Level.INFO, Collections.emptyList()); + } + } + } + } + + private static Level parentLoggerLevel(Logger logger) { + int idx = logger.getName().lastIndexOf('.'); + if (idx != -1) { + return LogManager.getLogger(logger.getName().substring(0, idx)).getLevel(); + } + return LogManager.getRootLogger().getLevel(); + } + + private static boolean isRootLogger(String name) { + return LogManager.ROOT_LOGGER_NAME.equals(name); + } + + private static boolean isDescendantOf(String candidate, String ancestor) { + return isRootLogger(ancestor) || candidate.startsWith(ancestor + "."); + } + + private static boolean isSameOrDescendantOf(String candidate, String ancestor) { + return candidate.equals(ancestor) || isDescendantOf(candidate, ancestor); } public static void addAppender(final Logger logger, final Appender appender) { diff --git a/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java b/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java index 8e8f0c75fa945..77603aaae068d 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java @@ -11,18 +11,86 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.net.UnknownHostException; +import java.util.List; import static java.util.Arrays.asList; +import static org.elasticsearch.common.logging.Loggers.checkRestrictedLoggers; import static org.elasticsearch.core.Strings.format; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; public class LoggersTests extends ESTestCase { + // Loggers.RESTRICTED_LOGGERS may be disabled by NetworkTraceFlag.TRACE_ENABLED, use internal API for testing + private List restrictedLoggers = List.of("org.apache.http", "com.amazonaws.request"); + + public void testCheckRestrictedLoggers() { + Settings settings; + for (String restricted : restrictedLoggers) { + for (String suffix : List.of("", ".xyz")) { + String logger = restricted + suffix; + for (Level level : List.of(Level.ALL, Level.TRACE, Level.DEBUG)) { + settings = Settings.builder().put("logger." + logger, level).build(); + List errors = checkRestrictedLoggers(settings, restrictedLoggers); + assertThat(errors, contains("Level [" + level + "] is not permitted for logger [" + logger + "]")); + } + for (Level level : List.of(Level.ERROR, Level.WARN, Level.INFO)) { + settings = Settings.builder().put("logger." + logger, level).build(); + assertThat(checkRestrictedLoggers(settings, restrictedLoggers), hasSize(0)); + } + + settings = Settings.builder().put("logger." + logger, "INVALID").build(); + assertThat(checkRestrictedLoggers(settings, restrictedLoggers), hasSize(0)); + + settings = Settings.builder().put("logger." + logger, (String) null).build(); + assertThat(checkRestrictedLoggers(settings, restrictedLoggers), hasSize(0)); + } + } + } + + public void testSetLevelWithRestrictions() { + for (String restricted : restrictedLoggers) { + + // 'org.apache.http' is an example of a restricted logger, + // a restricted component logger would be `org.apache.http.client.HttpClient` for instance, + // and the parent logger is `org.apache`. + Logger restrictedLogger = LogManager.getLogger(restricted); + Logger restrictedComponent = LogManager.getLogger(restricted + ".component"); + Logger parentLogger = LogManager.getLogger(restricted.substring(0, restricted.lastIndexOf('.'))); + + Loggers.setLevel(restrictedLogger, Level.INFO, restrictedLoggers); + assertHasINFO(restrictedLogger, restrictedComponent); + + for (Logger log : List.of(restrictedComponent, restrictedLogger)) { + // DEBUG is rejected due to restriction + Loggers.setLevel(log, Level.DEBUG, restrictedLoggers); + assertHasINFO(restrictedComponent, restrictedLogger); + } + + // OK for parent `org.apache`, but restriction is enforced for restricted descendants + Loggers.setLevel(parentLogger, Level.DEBUG, restrictedLoggers); + assertEquals(Level.DEBUG, parentLogger.getLevel()); + assertHasINFO(restrictedComponent, restrictedLogger); + + // Inheriting DEBUG of parent `org.apache` is rejected + Loggers.setLevel(restrictedLogger, null, restrictedLoggers); + assertHasINFO(restrictedComponent, restrictedLogger); + + // DEBUG of root logger isn't propagated to restricted loggers + Loggers.setLevel(LogManager.getRootLogger(), Level.DEBUG, restrictedLoggers); + assertEquals(Level.DEBUG, LogManager.getRootLogger().getLevel()); + assertHasINFO(restrictedComponent, restrictedLogger); + } + } + public void testStringSupplierAndFormatting() throws Exception { // adding a random id to allow test to run multiple times. See AbstractConfiguration#addAppender final MockAppender appender = new MockAppender("trace_appender" + randomInt()); @@ -69,4 +137,10 @@ private Throwable randomException() { new IllegalArgumentException("index must be between 10 and 100") ); } + + private static void assertHasINFO(Logger... loggers) { + for (Logger log : loggers) { + assertThat("Unexpected log level for [" + log.getName() + "]", log.getLevel(), is(Level.INFO)); + } + } } diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index 08496060f431b..07308d5d29a9a 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -153,6 +153,10 @@ dependencies { testImplementation('org.apache.directory.mavibot:mavibot:1.0.0-M8') } +tasks.named("test").configure { + systemProperty 'es.insecure_network_trace_enabled', 'true' +} + tasks.named("processInternalClusterTestResources").configure { from(project(xpackModule('core')).file('src/main/config')) from(project(xpackModule('core')).file('src/test/resources')) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java index 057a55ea4708d..c4e4d58d27178 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java @@ -1065,6 +1065,7 @@ public void testHttpClientConnectionTtlBehaviour() throws URISyntaxException, Il final MockLogAppender appender = new MockLogAppender(); appender.start(); Loggers.addAppender(logger, appender); + // Note: Setting an org.apache.http logger to DEBUG requires es.insecure_network_trace_enabled=true Loggers.setLevel(logger, Level.DEBUG); try { appender.addExpectation( diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java index dcdfc24406a2b..9a57b07d74a79 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java @@ -36,6 +36,8 @@ public class S3SnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTest .setting("logger.org.elasticsearch.repositories.blobstore.testkit", "TRACE") .setting("logger.com.amazonaws.request", "DEBUG") .setting("logger.org.apache.http.wire", "DEBUG") + // Necessary to permit setting the above two restricted loggers to DEBUG + .jvmArg("-Des.insecure_network_trace_enabled=true") .build(); @ClassRule From 80dae50b7fc9ed372d2d844bda849e16ee7803cd Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Wed, 21 Feb 2024 18:16:07 +0100 Subject: [PATCH 42/49] Optimized readString for StreamInputs operating on arrays to minimize copying (#104692) This commit implements a readString operating directly on the byte array for BytesReferenceStreamInput, ByteArrayStreamInput and ByteBufferStreamInput to avoid unnecessary allocations. Unfortunately writeString ignores surrogate pairs and encodes these naively character by character, which isn't compatible with the UTF-8 encoding of surrogate pairs (a 4-byte sequence). In case such a surrogate pair is detected or if the byte array doesn't contain enough bytes to read the entire string, this will fall back to the default implementation of readString. --- .../bytes/BytesReferenceStreamInput.java | 18 +++ .../io/stream/ByteArrayStreamInput.java | 24 ++++ .../io/stream/ByteBufferStreamInput.java | 18 +++ .../common/io/stream/StreamInput.java | 60 +++++++++ .../common/io/stream/StreamInputTests.java | 125 ++++++++++++++++++ 5 files changed, 245 insertions(+) create mode 100644 server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java index d42e1874b2d58..22bed3ea0b1e9 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java @@ -84,6 +84,24 @@ public long readLong() throws IOException { } } + @Override + public String readString() throws IOException { + final int chars = readArraySize(); + if (slice.hasArray()) { + // attempt reading bytes directly into a string to minimize copying + final String string = tryReadStringFromBytes( + slice.array(), + slice.position() + slice.arrayOffset(), + slice.limit() + slice.arrayOffset(), + chars + ); + if (string != null) { + return string; + } + } + return doReadString(chars); + } + @Override public int readVInt() throws IOException { if (slice.remaining() >= 5) { diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java index 478ae231e16ff..52eee5af3f6f5 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java @@ -31,6 +31,16 @@ public ByteArrayStreamInput(byte[] bytes) { reset(bytes); } + @Override + public String readString() throws IOException { + final int chars = readArraySize(); + String string = tryReadStringFromBytes(bytes, pos, limit, chars); + if (string != null) { + return string; + } + return doReadString(chars); + } + @Override public int read() throws IOException { if (limit - pos <= 0) { @@ -65,6 +75,20 @@ public void skipBytes(long count) { pos += (int) count; } + @Override + public long skip(long n) throws IOException { + if (n <= 0L) { + return 0L; + } + int available = available(); + if (n < available) { + pos += (int) n; + return n; + } + pos = limit; + return available; + } + @Override public void close() { // No-op diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java index f4ae17175fa2d..41d129406551f 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java @@ -120,6 +120,24 @@ public static long readVLong(ByteBuffer buffer) throws IOException { return i; } + @Override + public String readString() throws IOException { + final int chars = readArraySize(); + if (buffer.hasArray()) { + // attempt reading bytes directly into a string to minimize copying + final String string = tryReadStringFromBytes( + buffer.array(), + buffer.position() + buffer.arrayOffset(), + buffer.limit() + buffer.arrayOffset(), + chars + ); + if (string != null) { + return string; + } + } + return doReadString(chars); + } + @Override public int read() throws IOException { if (buffer.hasRemaining() == false) { diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 83aa7fb096693..7281616a8d25f 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -58,6 +58,9 @@ import java.util.function.Function; import java.util.function.IntFunction; +import static java.nio.charset.StandardCharsets.ISO_8859_1; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * A stream from this node to another node. Technically, it can also be streamed to a byte array but that is mostly for testing. * @@ -445,7 +448,10 @@ private char[] ensureLargeSpare(int charCount) { public String readString() throws IOException { final int charCount = readArraySize(); + return doReadString(charCount); + } + protected String doReadString(final int charCount) throws IOException { final char[] charBuffer = charCount > SMALL_STRING_LIMIT ? ensureLargeSpare(charCount) : smallSpare.get(); int charsOffset = 0; @@ -531,6 +537,60 @@ public String readString() throws IOException { return new String(charBuffer, 0, charCount); } + protected String tryReadStringFromBytes(final byte[] bytes, final int start, final int limit, final int chars) throws IOException { + final int end = start + chars; + if (limit < end) { + return null; // not enough bytes to read chars + } + for (int pos = start; pos < end; pos++) { + if ((bytes[pos] & 0x80) != 0) { + // not an ASCII char, fall back to reading a UTF-8 string + return tryReadUtf8StringFromBytes(bytes, start, limit, pos, end - pos); + } + } + skip(chars); // skip the number of chars (equals bytes) on the stream input + // We already validated the top bit is never set (so there's no negatives). + // Using ISO_8859_1 over US_ASCII safes another scan to check just that and is equivalent otherwise. + return new String(bytes, start, chars, ISO_8859_1); + } + + private String tryReadUtf8StringFromBytes(final byte[] bytes, final int start, final int limit, int pos, int chars) throws IOException { + while (pos < limit && chars-- > 0) { + int c = bytes[pos] & 0xff; + switch (c >> 4) { + case 0, 1, 2, 3, 4, 5, 6, 7 -> pos++; + case 12, 13 -> pos += 2; + case 14 -> { + // surrogate pairs are incorrectly encoded, these can't be directly read from bytes + if (maybeHighSurrogate(bytes, pos, limit)) return null; + pos += 3; + } + default -> throwOnBrokenChar(c); + } + } + + if (chars == 0 && pos <= limit) { + pos = pos - start; + skip(pos); // skip the number of bytes relative to start on the stream input + return new String(bytes, start, pos, UTF_8); + } + + // not enough bytes to read all chars from array + return null; + } + + private static boolean maybeHighSurrogate(final byte[] bytes, final int pos, final int limit) { + if (pos + 2 >= limit) { + return true; // beyond limit, we can't tell + } + int c1 = bytes[pos] & 0xff; + int c2 = bytes[pos + 1] & 0xff; + int c3 = bytes[pos + 2] & 0xff; + int surrogateCandidate = ((c1 & 0x0F) << 12) | ((c2 & 0x3F) << 6) | (c3 & 0x3F); + // check if in the high surrogate range + return surrogateCandidate >= 0xD800 && surrogateCandidate <= 0xDBFF; + } + private static void throwOnBrokenChar(int c) throws IOException { throw new IOException("Invalid string; unexpected character: " + c + " hex: " + Integer.toHexString(c)); } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java new file mode 100644 index 0000000000000..645461778f637 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.io.stream; + +import org.elasticsearch.test.ESTestCase; +import org.mockito.Mockito; + +import java.io.IOException; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + +// Note: read* methods are tested for concrete implementations, this just covers helpers to read strings +public class StreamInputTests extends ESTestCase { + + private StreamInput in = Mockito.spy(StreamInput.class); + byte[] bytes = "0123456789".getBytes(UTF_8); + + public void testCalculateByteLengthOfAscii() throws IOException { + // not enough bytes to read all chars + assertNull(in.tryReadStringFromBytes(bytes, 1, 10, 10)); + assertNull(in.tryReadStringFromBytes(bytes, 0, 9, 10)); + verify(in, never()).skip(anyLong()); + + assertThat(in.tryReadStringFromBytes(bytes, 9, 10, 1), is("9")); + verify(in).skip(1); + clearInvocations(in); + + assertThat(in.tryReadStringFromBytes(bytes, 0, 10, 10), is("0123456789")); + verify(in).skip(10); + } + + public void testCalculateByteLengthOfNonAscii() throws IOException { + // copy a two bytes char into bytes + System.arraycopy("©".getBytes(UTF_8), 0, bytes, 0, 2); + + assertNull(in.tryReadStringFromBytes(bytes, 0, 1, 1)); + verify(in, never()).skip(anyLong()); + + assertThat(in.tryReadStringFromBytes(bytes, 0, 2, 1), is("©")); + verify(in).skip(2); + clearInvocations(in); + + assertThat(in.tryReadStringFromBytes(bytes, 0, 10, 9), is("©23456789")); + verify(in).skip(10); + clearInvocations(in); + + // copy a three bytes char into bytes + System.arraycopy("€".getBytes(UTF_8), 0, bytes, 0, 3); + + assertNull(in.tryReadStringFromBytes(bytes, 0, 2, 1)); + verify(in, never()).skip(anyLong()); + + assertThat(in.tryReadStringFromBytes(bytes, 0, 3, 1), is("€")); + verify(in).skip(3); + clearInvocations(in); + + assertThat(in.tryReadStringFromBytes(bytes, 0, 10, 8), is("€3456789")); + verify(in).skip(10); + clearInvocations(in); + + // not enough bytes to read all chars + assertNull(in.tryReadStringFromBytes(bytes, 0, 10, 9)); + verify(in, never()).skip(anyLong()); + } + + public void testCalculateByteLengthOfIncompleteNonAscii() throws IOException { + // copy first byte to the end of bytes, this way the string can't ever be read completely + System.arraycopy("©".getBytes(UTF_8), 0, bytes, 9, 1); + + assertThat(in.tryReadStringFromBytes(bytes, 8, 10, 1), is("8")); + verify(in).skip(1); + clearInvocations(in); + + assertNull(in.tryReadStringFromBytes(bytes, 9, 10, 1)); + verify(in, never()).skip(anyLong()); + + // copy first two bytes of a three bytes char into bytes (similar to above) + System.arraycopy("€".getBytes(UTF_8), 0, bytes, 8, 2); + + assertThat(in.tryReadStringFromBytes(bytes, 7, 10, 1), is("7")); + verify(in).skip(1); + clearInvocations(in); + + assertNull(in.tryReadStringFromBytes(bytes, 8, 10, 1)); + verify(in, never()).skip(anyLong()); + } + + public void testCalculateByteLengthOfSurrogate() throws IOException { + BytesStreamOutput bytesOut = new BytesStreamOutput(); + bytesOut.writeString("ab💩"); + bytes = bytesOut.bytes.array(); + + assertThat(bytes[0], is((byte) 4)); // 2+2 characters + assertThat(in.tryReadStringFromBytes(bytes, 1, bytes.length, 2), is("ab")); + verify(in).skip(2); + clearInvocations(in); + + // surrogates use a special encoding, their byte length differs to what new String expects + assertNull(in.tryReadStringFromBytes(bytes, 1, bytes.length, 4)); + assertNull(in.tryReadStringFromBytes(bytes, 3, bytes.length, 2)); + assertNull(in.tryReadStringFromBytes(bytes, 3, bytes.length, 1)); + verify(in, never()).skip(anyLong()); + + // set limit so tight that we cannot read the first 3 byte char + assertNull(in.tryReadStringFromBytes(bytes, 3, 5, 1)); + verify(in, never()).skip(anyLong()); + + // if using the UTF-8 encoding, the surrogate pair is encoded as 4 bytes (rather than 2x 3 bytes) + // this form of encoding isn't supported + System.arraycopy("💩".getBytes(UTF_8), 0, bytes, 0, 4); + assertThrows(IOException.class, () -> in.tryReadStringFromBytes(bytes, 0, bytes.length, 2)); + verify(in, never()).skip(anyLong()); + } +} From ce8402ff555cb159c8242df5bf17f6fdff1e288d Mon Sep 17 00:00:00 2001 From: David Roberts Date: Wed, 21 Feb 2024 17:27:29 +0000 Subject: [PATCH 43/49] [ML] Make regex more efficient (#105705) The regex that was used to detect document IDs left over from version 5.4 had a leading (.*) which can be very inefficient. It's not hard to refactor the test for version 5.4 doc IDs to use a more deterministic regex plus a simple scan for a single character. --- .../ml/job/process/autodetect/state/ModelState.java | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java index de2f6d1fe7849..3c352b4b7dec7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java @@ -21,7 +21,7 @@ public final class ModelState { */ public static final String TYPE = "model_state"; - private static final Pattern V_5_4_DOC_ID_REGEX = Pattern.compile("(.*)-\\d{10}#\\d+"); + private static final Pattern V_5_4_DOC_ID_SUFFIX_REGEX = Pattern.compile("^\\d{10}#\\d+$"); public static String documentId(String jobId, String snapshotId, int docNum) { return jobId + "_" + TYPE + "_" + snapshotId + "#" + docNum; @@ -43,9 +43,13 @@ public static String extractJobId(String docId) { * and ended with hash and an integer. */ private static String v54ExtractJobId(String docId) { - Matcher matcher = V_5_4_DOC_ID_REGEX.matcher(docId); + int potentialSuffixIndex = docId.lastIndexOf('-'); + if (potentialSuffixIndex <= 0 || potentialSuffixIndex >= docId.length() - 1) { + return null; + } + Matcher matcher = V_5_4_DOC_ID_SUFFIX_REGEX.matcher(docId.subSequence(potentialSuffixIndex + 1, docId.length())); if (matcher.matches()) { - return matcher.group(1); + return docId.substring(0, potentialSuffixIndex); } return null; } From a9f4b649ce348d1c1813301091df73c228f0b85b Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Wed, 21 Feb 2024 20:05:18 +0100 Subject: [PATCH 44/49] Delegate readString in FilterStreamInput to possibly use an optimized readString of the delegate. (#105712) This is necessary so that the optimized `readString` added in https://github.com/elastic/elasticsearch/pull/104692 is actually used when wrapped in a `FilterStreamInput`. --- .../elasticsearch/common/io/stream/FilterStreamInput.java | 5 +++++ .../index/translog/BufferedChecksumStreamInput.java | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java index 0e817e16c0b76..c0ef0e0abf39b 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java @@ -26,6 +26,11 @@ protected FilterStreamInput(StreamInput delegate) { this.delegate = delegate; } + @Override + public String readString() throws IOException { + return delegate.readString(); + } + @Override public byte readByte() throws IOException { return delegate.readByte(); diff --git a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java index 6ff91a688c97c..6d1456040c8fa 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java @@ -48,6 +48,11 @@ public long getChecksum() { return this.digest.getValue(); } + @Override + public String readString() throws IOException { + return doReadString(readArraySize()); // always use the unoptimized slow path + } + @Override public byte readByte() throws IOException { final byte b = delegate.readByte(); From b1fcedd7ae30ff232f419ccee234208eab1456cd Mon Sep 17 00:00:00 2001 From: Niels Bauman <33722607+nielsbauman@users.noreply.github.com> Date: Thu, 22 Feb 2024 09:41:11 +0100 Subject: [PATCH 45/49] Fix `uri_parts` processor behaviour for missing extensions (#105689) The `uri_parts` processor was behaving incorrectly for URI's that included a dot in the path but did not have an extension. Also includes YAML REST tests for the same. --- docs/changelog/105689.yaml | 6 +++ .../ingest/common/UriPartsProcessor.java | 13 +++-- .../ingest/common/UriPartsProcessorTests.java | 25 ++++++++++ .../test/ingest/320_uri_parts_processor.yml | 49 +++++++++++++++++++ 4 files changed, 90 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/105689.yaml create mode 100644 modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_uri_parts_processor.yml diff --git a/docs/changelog/105689.yaml b/docs/changelog/105689.yaml new file mode 100644 index 0000000000000..e76281f1b2fc7 --- /dev/null +++ b/docs/changelog/105689.yaml @@ -0,0 +1,6 @@ +pr: 105689 +summary: Fix `uri_parts` processor behaviour for missing extensions +area: Ingest Node +type: bug +issues: + - 105612 diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UriPartsProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UriPartsProcessor.java index 66e6df5fde58d..c476c6a9d3b9d 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UriPartsProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UriPartsProcessor.java @@ -140,9 +140,16 @@ private static Map getUriParts(URI uri, URL fallbackUrl) { } if (path != null) { uriParts.put("path", path); - if (path.contains(".")) { - int periodIndex = path.lastIndexOf('.'); - uriParts.put("extension", periodIndex < path.length() ? path.substring(periodIndex + 1) : ""); + // To avoid any issues with extracting the extension from a path that contains a dot, we explicitly extract the extension + // from the last segment in the path. + var lastSegmentIndex = path.lastIndexOf('/'); + if (lastSegmentIndex >= 0) { + var lastSegment = path.substring(lastSegmentIndex); + int periodIndex = lastSegment.lastIndexOf('.'); + if (periodIndex >= 0) { + // Don't include the dot in the extension field. + uriParts.put("extension", lastSegment.substring(periodIndex + 1)); + } } } if (port != -1) { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UriPartsProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UriPartsProcessorTests.java index c7d3052eaa9f3..e7552d23d659a 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UriPartsProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UriPartsProcessorTests.java @@ -181,6 +181,31 @@ public void testUrlWithCharactersNotToleratedByUri() throws Exception { ); } + public void testDotPathWithoutExtension() throws Exception { + testUriParsing( + "https://www.google.com/path.withdot/filenamewithoutextension", + Map.of("scheme", "https", "domain", "www.google.com", "path", "/path.withdot/filenamewithoutextension") + ); + } + + public void testDotPathWithExtension() throws Exception { + testUriParsing( + "https://www.google.com/path.withdot/filenamewithextension.txt", + Map.of("scheme", "https", "domain", "www.google.com", "path", "/path.withdot/filenamewithextension.txt", "extension", "txt") + ); + } + + /** + * This test verifies that we return an empty extension instead of null if the URI ends with a period. This is probably + * not behaviour we necessarily want to keep forever, but this test ensures that we're conscious about changing that behaviour. + */ + public void testEmptyExtension() throws Exception { + testUriParsing( + "https://www.google.com/foo/bar.", + Map.of("scheme", "https", "domain", "www.google.com", "path", "/foo/bar.", "extension", "") + ); + } + public void testRemoveIfSuccessfulDoesNotRemoveTargetField() throws Exception { String field = "field"; UriPartsProcessor processor = new UriPartsProcessor(null, null, field, field, true, false, false); diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_uri_parts_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_uri_parts_processor.yml new file mode 100644 index 0000000000000..53512a4a505f2 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_uri_parts_processor.yml @@ -0,0 +1,49 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "uri-parts-pipeline" + ignore: 404 + +--- +"Test URI parts Processor": + - do: + ingest.put_pipeline: + id: "uri-parts-pipeline" + body: > + { + "processors": [ + { + "uri_parts" : { + "field" : "my_uri" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: "1" + pipeline: "uri-parts-pipeline" + body: { + my_uri: "https://user:pw@testing.google.com:8080/foo/bar.txt?foo1=bar1&foo2=bar2#anchorVal" + } + + - do: + get: + index: test + id: "1" + - match: { _source.my_uri: "https://user:pw@testing.google.com:8080/foo/bar.txt?foo1=bar1&foo2=bar2#anchorVal" } + - match: { _source.url.original: "https://user:pw@testing.google.com:8080/foo/bar.txt?foo1=bar1&foo2=bar2#anchorVal" } + - match: { _source.url.scheme: "https" } + - match: { _source.url.domain: "testing.google.com" } + - match: { _source.url.fragment: "anchorVal" } + - match: { _source.url.path: "/foo/bar.txt" } + - match: { _source.url.port: 8080 } + - match: { _source.url.username: "user" } + - match: { _source.url.password: "pw" } + - match: { _source.url.user_info: "user:pw" } + - match: { _source.url.query: "foo1=bar1&foo2=bar2" } + - match: { _source.url.extension: "txt" } From 50c1dcb8c62d56077b612293e29ab80adb178e50 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 22 Feb 2024 09:22:29 +0000 Subject: [PATCH 46/49] Small improvements to test-only `Netty4HttpClient` (#105694) - Today we flush every pipelined request, but really we should also be checking that we do the right thing if multiple requests all arrive in a single `read()` call. This commit randomly skips some of the intervening flushes to improve coverage in this area. - Today we call `shutdownGracefully` with the default timeout, which adds a couple of seconds to every single test even though there won't be any more tasks to execute. This commit specifies a zero timeout which saves a bunch of time in these tests. --- .../http/netty4/Netty4HttpClient.java | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java index 56ba3ae1958f7..7ce962ff56b67 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.Tuple; import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.netty4.NettyAllocator; import java.io.Closeable; @@ -139,9 +140,20 @@ private synchronized List sendRequests(final SocketAddress rem channelFuture = clientBootstrap.connect(remoteAddress); channelFuture.sync(); + boolean needsFinalFlush = false; for (HttpRequest request : requests) { - channelFuture.channel().writeAndFlush(request); + if (ESTestCase.randomBoolean()) { + channelFuture.channel().writeAndFlush(request); + needsFinalFlush = false; + } else { + channelFuture.channel().write(request); + needsFinalFlush = true; + } + } + if (needsFinalFlush) { + channelFuture.channel().flush(); } + if (latch.await(30L, TimeUnit.SECONDS) == false) { fail("Failed to get all expected responses."); } @@ -157,7 +169,7 @@ private synchronized List sendRequests(final SocketAddress rem @Override public void close() { - clientBootstrap.config().group().shutdownGracefully().awaitUninterruptibly(); + clientBootstrap.config().group().shutdownGracefully(0L, 0L, TimeUnit.SECONDS).awaitUninterruptibly(); } /** From 09bdb16aa3b903a78655d141cf0540a0339165ea Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Thu, 22 Feb 2024 10:50:37 +0100 Subject: [PATCH 47/49] Support chunked bulk loading of larger data files in CSV tests (#105701) --- .../xpack/esql/CsvTestsDataLoader.java | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 302fda9b331e3..9763c362c9b4b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -48,6 +48,7 @@ import static org.elasticsearch.xpack.esql.CsvTestUtils.multiValuesAwareCsvToStringArray; public class CsvTestsDataLoader { + private static final int BULK_DATA_SIZE = 100_000; private static final TestsDataset EMPLOYEES = new TestsDataset("employees", "mapping-default.json", "employees.csv"); private static final TestsDataset HOSTS = new TestsDataset("hosts", "mapping-hosts.json", "hosts.csv"); private static final TestsDataset APPS = new TestsDataset("apps", "mapping-apps.json", "apps.csv"); @@ -243,8 +244,6 @@ private static void loadCsvData( CheckedBiFunction p, Logger logger ) throws IOException { - // The indexName is optional for a bulk request, but we use it for routing in MultiClusterSpecIT. - Request request = new Request("POST", "/" + indexName + "/_bulk"); StringBuilder builder = new StringBuilder(); try (BufferedReader reader = org.elasticsearch.xpack.ql.TestUtils.reader(resource)) { String line; @@ -359,10 +358,22 @@ private static void loadCsvData( } } lineNumber++; + if (builder.length() > BULK_DATA_SIZE) { + sendBulkRequest(indexName, builder, client, logger); + builder.setLength(0); + } } - builder.append("\n"); } + if (builder.length() > 0) { + sendBulkRequest(indexName, builder, client, logger); + } + } + private static void sendBulkRequest(String indexName, StringBuilder builder, RestClient client, Logger logger) throws IOException { + // The indexName is optional for a bulk request, but we use it for routing in MultiClusterSpecIT. + builder.append("\n"); + logger.debug("Sending bulk request of [{}] bytes for [{}]", builder.length(), indexName); + Request request = new Request("POST", "/" + indexName + "/_bulk"); request.setJsonEntity(builder.toString()); request.addParameter("refresh", "false"); // will be _forcemerge'd next Response response = client.performRequest(request); @@ -373,7 +384,7 @@ private static void loadCsvData( Map result = XContentHelper.convertToMap(xContentType.xContent(), content, false); Object errors = result.get("errors"); if (Boolean.FALSE.equals(errors)) { - logger.info("Data loading of [{}] OK", indexName); + logger.info("Data loading of [{}] bytes into [{}] OK", builder.length(), indexName); } else { throw new IOException("Data loading of [" + indexName + "] failed with errors: " + errors); } From d842ce9f826cece783570f0a02189fa992fd83c2 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 22 Feb 2024 10:13:46 +0000 Subject: [PATCH 48/49] Remove control-flow exception in `Netty4HttpPipeliningHandler#write` (#105679) There's no need for a try/catch block here when the only exception in sight is being thrown from within the same method and immediately swallowed. This commit replaces the logic with equivalent code using regular branches. --- .../netty4/Netty4HttpPipeliningHandler.java | 52 +++++++------------ 1 file changed, 20 insertions(+), 32 deletions(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java index 86fa635078d4f..b86e168e2e620 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java @@ -133,46 +133,34 @@ protected void handlePipelinedRequest(ChannelHandlerContext ctx, Netty4HttpReque } @Override - public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) throws IOException { + public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) { assert msg instanceof Netty4HttpResponse : "Invalid message type: " + msg.getClass(); - boolean success = false; - try { - final Netty4HttpResponse restResponse = (Netty4HttpResponse) msg; - if (restResponse.getSequence() != writeSequence) { - assert restResponse.getSequence() > writeSequence - : "response sequence [" + restResponse.getSequence() + "] we below write sequence [" + writeSequence + "]"; - if (outboundHoldingQueue.size() >= maxEventsHeld) { - int eventCount = outboundHoldingQueue.size() + 1; - throw new IllegalStateException( - "Too many pipelined events [" + eventCount + "]. Max events allowed [" + maxEventsHeld + "]." - ); - } - // response is not at the current sequence number so we add it to the outbound queue and return - assert outboundHoldingQueue.stream().noneMatch(t -> t.v1().getSequence() == writeSequence) - : "duplicate outbound entries for seqno " + writeSequence; - outboundHoldingQueue.add(new Tuple<>(restResponse, promise)); - success = true; - return; - } - - // response is at the current sequence number and does not need to wait for any other response to be written so we write - // it out directly + final Netty4HttpResponse restResponse = (Netty4HttpResponse) msg; + if (restResponse.getSequence() != writeSequence) { + // response is not at the current sequence number so we add it to the outbound queue + enqueuePipelinedResponse(ctx, restResponse, promise); + } else { + // response is at the current sequence number and does not need to wait for any other response to be written doWrite(ctx, restResponse, promise); - success = true; // see if we have any queued up responses that became writeable due to the above write doWriteQueued(ctx); - } catch (IllegalStateException e) { + } + } + + private void enqueuePipelinedResponse(ChannelHandlerContext ctx, Netty4HttpResponse restResponse, ChannelPromise promise) { + assert restResponse.getSequence() > writeSequence + : "response sequence [" + restResponse.getSequence() + "] we below write sequence [" + writeSequence + "]"; + if (outboundHoldingQueue.size() >= maxEventsHeld) { ctx.channel().close(); - } finally { - if (success == false && promise.isDone() == false) { - // The preceding failure may already have failed the promise; use tryFailure() to avoid log noise about double-completion, - // but also check isDone() first to avoid even constructing another exception in most cases. - promise.tryFailure(new ClosedChannelException()); - } + promise.tryFailure(new ClosedChannelException()); + } else { + assert outboundHoldingQueue.stream().noneMatch(t -> t.v1().getSequence() == restResponse.getSequence()) + : "duplicate outbound entries for seqno " + restResponse.getSequence(); + outboundHoldingQueue.add(new Tuple<>(restResponse, promise)); } } - private void doWriteQueued(ChannelHandlerContext ctx) throws IOException { + private void doWriteQueued(ChannelHandlerContext ctx) { while (outboundHoldingQueue.isEmpty() == false && outboundHoldingQueue.peek().v1().getSequence() == writeSequence) { final Tuple top = outboundHoldingQueue.poll(); assert top != null : "we know the outbound holding queue to not be empty at this point"; From c8a35d349cd72682c57b42806b62e96de5975f1b Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 22 Feb 2024 10:17:39 +0000 Subject: [PATCH 49/49] `IndexShardRoutingTable` should always be nonempty (#105720) There's only one remaining test that creates an empty `IndexShardRoutingTable`. This commit fixes that, and then adds an assertion to enforce that all `IndexShardRoutingTable` instances include at least a primary shard. --- .../elasticsearch/action/get/TransportGetAction.java | 6 +++--- .../replication/TransportReplicationAction.java | 2 +- .../cluster/routing/IndexShardRoutingTable.java | 3 ++- .../org/elasticsearch/snapshots/SnapshotsService.java | 7 +++---- .../cluster/routing/IndexShardRoutingTableTests.java | 11 +++++++---- 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index d3d19fe1714ba..db26da382d3e1 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -296,11 +296,11 @@ private void tryGetFromTranslog(GetRequest request, IndexShard indexShard, Disco } static DiscoveryNode getCurrentNodeOfPrimary(ClusterState clusterState, ShardId shardId) { - var shardRoutingTable = clusterState.routingTable().shardRoutingTable(shardId); - if (shardRoutingTable.primaryShard() == null || shardRoutingTable.primaryShard().active() == false) { + final var primaryShard = clusterState.routingTable().shardRoutingTable(shardId).primaryShard(); + if (primaryShard.active() == false) { throw new NoShardAvailableActionException(shardId, "primary shard is not active"); } - DiscoveryNode node = clusterState.nodes().get(shardRoutingTable.primaryShard().currentNodeId()); + DiscoveryNode node = clusterState.nodes().get(primaryShard.currentNodeId()); assert node != null; return node; } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index a935c0e4e06bb..d7ff0359bfd27 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -848,7 +848,7 @@ protected void doRun() { : "request waitForActiveShards must be set in resolveRequest"; final ShardRouting primary = state.getRoutingTable().shardRoutingTable(request.shardId()).primaryShard(); - if (primary == null || primary.active() == false) { + if (primary.active() == false) { logger.trace( "primary shard [{}] is not yet active, scheduling a retry: action [{}], request [{}], " + "cluster state version [{}]", diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 8e257ff2c7a54..1e5aaa46c1157 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -114,7 +114,8 @@ public class IndexShardRoutingTable { allShardsStarted = false; } } - assert primary != null || shards.isEmpty() : shards; + assert shards.isEmpty() == false : "cannot have an empty shard routing table"; + assert primary != null : shards; this.primary = primary; this.replicas = CollectionUtils.wrapUnmodifiableOrEmptySingleton(replicas); this.activeShards = CollectionUtils.wrapUnmodifiableOrEmptySingleton(activeShards); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 3b2868298cf65..a0782fa8814cd 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -1189,7 +1189,7 @@ private static ImmutableOpenMap processWaitingShar IndexRoutingTable indexShardRoutingTable = routingTable.index(shardId.getIndex()); if (indexShardRoutingTable != null) { IndexShardRoutingTable shardRouting = indexShardRoutingTable.shard(shardId.id()); - if (shardRouting != null && shardRouting.primaryShard() != null) { + if (shardRouting != null) { final var primaryNodeId = shardRouting.primaryShard().currentNodeId(); if (nodeIdRemovalPredicate.test(primaryNodeId)) { if (shardStatus.state() == ShardState.PAUSED_FOR_NODE_REMOVAL) { @@ -1274,9 +1274,8 @@ private static boolean waitingShardsStartedOrUnassigned(SnapshotsInProgress snap return true; } ShardRouting shardRouting = indexShardRoutingTable.shard(shardId.shardId()).primaryShard(); - if (shardRouting != null - && (shardRouting.started() && snapshotsInProgress.isNodeIdForRemoval(shardRouting.currentNodeId()) == false - || shardRouting.unassigned())) { + if (shardRouting.started() && snapshotsInProgress.isNodeIdForRemoval(shardRouting.currentNodeId()) == false + || shardRouting.unassigned()) { return true; } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java index 838a4268fa1cf..2ae9414711801 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -35,9 +34,13 @@ public void testEquals() { Index index = new Index("a", "b"); ShardId shardId = new ShardId(index, 1); ShardId shardId2 = new ShardId(index, 2); - IndexShardRoutingTable table1 = new IndexShardRoutingTable(shardId, new ArrayList<>()); - IndexShardRoutingTable table2 = new IndexShardRoutingTable(shardId, new ArrayList<>()); - IndexShardRoutingTable table3 = new IndexShardRoutingTable(shardId2, new ArrayList<>()); + ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, null, true, ShardRoutingState.UNASSIGNED); + IndexShardRoutingTable table1 = new IndexShardRoutingTable(shardId, List.of(shardRouting)); + IndexShardRoutingTable table2 = new IndexShardRoutingTable(shardId, List.of(shardRouting)); + IndexShardRoutingTable table3 = new IndexShardRoutingTable( + shardId2, + List.of(TestShardRouting.newShardRouting(shardId2, null, true, ShardRoutingState.UNASSIGNED)) + ); String s = "Some other random object"; assertEquals(table1, table1); assertEquals(table1, table2);