diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index adf33dd070a22..c34bdc95046b3 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.13.0 -lucene = 9.9.0 +lucene = 9.9.0-snapshot-bb4fec631e6 bundled_jdk_vendor = openjdk bundled_jdk = 21.0.1+12@415e3f918a1f4062a0074a2794853d0d diff --git a/docs/changelog/103003.yaml b/docs/changelog/103003.yaml new file mode 100644 index 0000000000000..accacc2b62416 --- /dev/null +++ b/docs/changelog/103003.yaml @@ -0,0 +1,6 @@ +pr: 103003 +summary: "Fix: Watcher REST API `GET /_watcher/settings` now includes product header" +area: "Watcher" +type: bug +issues: + - 102928 diff --git a/docs/changelog/103025.yaml b/docs/changelog/103025.yaml new file mode 100644 index 0000000000000..856a7c022d5dd --- /dev/null +++ b/docs/changelog/103025.yaml @@ -0,0 +1,5 @@ +pr: 103025 +summary: "Metrics: Allow `AsyncCounters` to switch providers" +area: Infra/Core +type: bug +issues: [] diff --git a/docs/changelog/103091.yaml b/docs/changelog/103091.yaml new file mode 100644 index 0000000000000..ae4ac11933d4e --- /dev/null +++ b/docs/changelog/103091.yaml @@ -0,0 +1,5 @@ +pr: 103091 +summary: "Metrics: Handle null observations in observers" +area: Infra/Core +type: bug +issues: [] diff --git a/docs/changelog/103130.yaml b/docs/changelog/103130.yaml new file mode 100644 index 0000000000000..3ef56ae84d123 --- /dev/null +++ b/docs/changelog/103130.yaml @@ -0,0 +1,5 @@ +pr: 103130 +summary: Create a DSL health indicator as part of the health API +area: Health +type: feature +issues: [] diff --git a/docs/changelog/103151.yaml b/docs/changelog/103151.yaml new file mode 100644 index 0000000000000..bd9eea97cac6d --- /dev/null +++ b/docs/changelog/103151.yaml @@ -0,0 +1,6 @@ +pr: 103151 +summary: Wrap painless explain error +area: Infra/Scripting +type: bug +issues: + - 103018 diff --git a/docs/changelog/103185.yaml b/docs/changelog/103185.yaml new file mode 100644 index 0000000000000..3a1a4960ba98c --- /dev/null +++ b/docs/changelog/103185.yaml @@ -0,0 +1,5 @@ +pr: 103185 +summary: Fix format string in `OldLuceneVersions` +area: Search +type: bug +issues: [] diff --git a/docs/changelog/103203.yaml b/docs/changelog/103203.yaml new file mode 100644 index 0000000000000..d2aa3e9961c6a --- /dev/null +++ b/docs/changelog/103203.yaml @@ -0,0 +1,5 @@ +pr: 103203 +summary: Fix NPE & empty result handling in `CountOnlyQueryPhaseResultConsumer` +area: Search +type: bug +issues: [] diff --git a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc index 89b8bbeb880c3..53bd3c2b96f0b 100644 --- a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc @@ -59,6 +59,16 @@ duration the document could be deleted. When empty, every document in this data If defined, it turns data streqm lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle that's disabled (`enabled: false`) will have no effect on the data stream. Defaults to `true`. + +`downsampling`:: +(Optional, array) +An optional array of downsampling configuration objects, each defining an `after` +interval representing when the backing index is meant to be downsampled (the time +frame is calculated since the index was rolled over, i.e. generation time) and +a `fixed_interval` representing the downsampling interval (the minimum `fixed_interval` +value is `5m`). A maximum number of 10 downsampling rounds can be configured. +See <> below. + ==== [[data-streams-put-lifecycle-example]] @@ -84,3 +94,29 @@ When the lifecycle is successfully updated in all data streams, you receive the "acknowledged": true } -------------------------------------------------- + +[[data-streams-put-lifecycle-downsampling-example]] +==== {api-examples-title} + +The following example configures two downsampling rounds, the first one starting +one day after the backing index is rolled over (or later, if the index is still +within its write-accepting <>) with an interval +of `10m`, and a second round starting 7 days after rollover at an interval of `1d`: + +[source,console] +-------------------------------------------------------------------- +PUT _data_stream/my-weather-sensor-data-stream/_lifecycle +{ + "downsampling": [ + { + "after": "1d", + "fixed_interval": "10m" + }, + { + "after": "7d", + "fixed_interval": "1d" + } + ] +} +-------------------------------------------------------------------- +//TEST[skip:downsampling requires waiting for indices to be out of time bounds] diff --git a/docs/reference/data-streams/lifecycle/index.asciidoc b/docs/reference/data-streams/lifecycle/index.asciidoc index 6c0220ef0a80f..ef5558817885e 100644 --- a/docs/reference/data-streams/lifecycle/index.asciidoc +++ b/docs/reference/data-streams/lifecycle/index.asciidoc @@ -18,6 +18,10 @@ and backwards incompatible mapping changes. * Configurable retention, which allows you to configure the time period for which your data is guaranteed to be stored. {es} is allowed at a later time to delete data older than this time period. +A data stream lifecycle also supports downsampling the data stream backing indices. +See <> for +more details. + [discrete] [[data-streams-lifecycle-how-it-works]] === How does it work? @@ -35,7 +39,9 @@ into tiers of exponential sizes, merging the long tail of small segments is only fraction of the cost of force mergeing to a single segment. The small segments would usually hold the most recent data so tail mergeing will focus the merging resources on the higher-value data that is most likely to keep being queried. -4. Applies retention to the remaining backing indices. This means deleting the backing indices whose +4. If <> is configured it will execute +all the configured downsampling rounds. +5. Applies retention to the remaining backing indices. This means deleting the backing indices whose `generation_time` is longer than the configured retention period. The `generation_time` is only applicable to rolled over backing indices and it is either the time since the backing index got rolled over, or the time optionally configured in the <> setting. diff --git a/docs/reference/esql/functions/binary.asciidoc b/docs/reference/esql/functions/binary.asciidoc index ba93f57af7ad6..32e97b7316d84 100644 --- a/docs/reference/esql/functions/binary.asciidoc +++ b/docs/reference/esql/functions/binary.asciidoc @@ -9,4 +9,12 @@ These binary comparison operators are supported: * less than: `<` * less than or equal: `<=` * larger than: `>` -* larger than or equal: `>=` \ No newline at end of file +* larger than or equal: `>=` + +And these mathematical operators are supported: + +[.text-center] +image::esql/functions/signature/add.svg[Embedded,opts=inline] + +[.text-center] +image::esql/functions/signature/sub.svg[Embedded,opts=inline] diff --git a/docs/reference/esql/functions/cos.asciidoc b/docs/reference/esql/functions/cos.asciidoc index 5dcbb7bea37f4..7227f57e28120 100644 --- a/docs/reference/esql/functions/cos.asciidoc +++ b/docs/reference/esql/functions/cos.asciidoc @@ -4,7 +4,7 @@ [.text-center] image::esql/functions/signature/cos.svg[Embedded,opts=inline] -https://en.wikipedia.org/wiki/Sine_and_cosine[Cosine] trigonometric function. +https://en.wikipedia.org/wiki/Sine_and_cosine[Cosine] trigonometric function. Input expected in radians. [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/functions/operators.asciidoc b/docs/reference/esql/functions/operators.asciidoc index c236413b5dd7e..1c88fa200ca11 100644 --- a/docs/reference/esql/functions/operators.asciidoc +++ b/docs/reference/esql/functions/operators.asciidoc @@ -9,6 +9,7 @@ Boolean operators for comparing against one or multiple expressions. // tag::op_list[] * <> +* <> * <> * <> * <> @@ -23,6 +24,7 @@ Boolean operators for comparing against one or multiple expressions. // end::op_list[] include::binary.asciidoc[] +include::unary.asciidoc[] include::logical.asciidoc[] include::predicates.asciidoc[] include::cidr_match.asciidoc[] diff --git a/docs/reference/esql/functions/signature/add.svg b/docs/reference/esql/functions/signature/add.svg new file mode 100644 index 0000000000000..10d89efc65f3b --- /dev/null +++ b/docs/reference/esql/functions/signature/add.svg @@ -0,0 +1 @@ +lhs+rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/div.svg b/docs/reference/esql/functions/signature/div.svg new file mode 100644 index 0000000000000..ea061ae6ef625 --- /dev/null +++ b/docs/reference/esql/functions/signature/div.svg @@ -0,0 +1 @@ +lhs/rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/equals.svg b/docs/reference/esql/functions/signature/equals.svg new file mode 100644 index 0000000000000..ade4b1260128f --- /dev/null +++ b/docs/reference/esql/functions/signature/equals.svg @@ -0,0 +1 @@ +lhs==rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/greater_than.svg b/docs/reference/esql/functions/signature/greater_than.svg new file mode 100644 index 0000000000000..f5eb082d14642 --- /dev/null +++ b/docs/reference/esql/functions/signature/greater_than.svg @@ -0,0 +1 @@ +lhs>rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/less_than.svg b/docs/reference/esql/functions/signature/less_than.svg new file mode 100644 index 0000000000000..9858a17450f60 --- /dev/null +++ b/docs/reference/esql/functions/signature/less_than.svg @@ -0,0 +1 @@ +lhs<rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/mod.svg b/docs/reference/esql/functions/signature/mod.svg new file mode 100644 index 0000000000000..20a134a26f232 --- /dev/null +++ b/docs/reference/esql/functions/signature/mod.svg @@ -0,0 +1 @@ +lhs%rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/mul.svg b/docs/reference/esql/functions/signature/mul.svg new file mode 100644 index 0000000000000..b15c488eb874b --- /dev/null +++ b/docs/reference/esql/functions/signature/mul.svg @@ -0,0 +1 @@ +lhs*rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/neg.svg b/docs/reference/esql/functions/signature/neg.svg new file mode 100644 index 0000000000000..6090a85310684 --- /dev/null +++ b/docs/reference/esql/functions/signature/neg.svg @@ -0,0 +1 @@ +-v \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/not_equals.svg b/docs/reference/esql/functions/signature/not_equals.svg new file mode 100644 index 0000000000000..d4808abbac5a5 --- /dev/null +++ b/docs/reference/esql/functions/signature/not_equals.svg @@ -0,0 +1 @@ +lhs!=rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/sub.svg b/docs/reference/esql/functions/signature/sub.svg new file mode 100644 index 0000000000000..8d49ad6b0ac1e --- /dev/null +++ b/docs/reference/esql/functions/signature/sub.svg @@ -0,0 +1 @@ +lhs-rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/sin.asciidoc b/docs/reference/esql/functions/sin.asciidoc index 5fa33a315392d..d948bf2ec39a3 100644 --- a/docs/reference/esql/functions/sin.asciidoc +++ b/docs/reference/esql/functions/sin.asciidoc @@ -4,7 +4,7 @@ [.text-center] image::esql/functions/signature/sin.svg[Embedded,opts=inline] -https://en.wikipedia.org/wiki/Sine_and_cosine[Sine] trigonometric function. +https://en.wikipedia.org/wiki/Sine_and_cosine[Sine] trigonometric function. Input expected in radians. [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/functions/tan.asciidoc b/docs/reference/esql/functions/tan.asciidoc index 1c66562eada7a..03e450ff23b0e 100644 --- a/docs/reference/esql/functions/tan.asciidoc +++ b/docs/reference/esql/functions/tan.asciidoc @@ -4,7 +4,7 @@ [.text-center] image::esql/functions/signature/tan.svg[Embedded,opts=inline] -https://en.wikipedia.org/wiki/Sine_and_cosine[Tangent] trigonometric function. +https://en.wikipedia.org/wiki/Sine_and_cosine[Tangent] trigonometric function. Input expected in radians. [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/functions/unary.asciidoc b/docs/reference/esql/functions/unary.asciidoc new file mode 100644 index 0000000000000..2ee35b6c6256f --- /dev/null +++ b/docs/reference/esql/functions/unary.asciidoc @@ -0,0 +1,8 @@ +[discrete] +[[esql-unary-operators]] +=== Unary operators + +These unary mathematical operators are supported: + +[.text-center] +image::esql/functions/signature/neg.svg[Embedded,opts=inline] diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index 68d286b3f267b..96e3ae43f98dc 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -111,6 +111,13 @@ By default, you cannot page through more than 10,000 hits using the `from` and include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] +`include_named_queries_score`:: +(Optional, Boolean) If `true`, includes the score contribution from any named queries. +This functionality reruns each named query on every hit in a search +response. Typically, this adds a small overhead to a request. However, using +computationally expensive named queries on a large number of hits may add +significant overhead. Defaults to `false`. + include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=lenient] diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index 032d4f47bf678..e204061c28458 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -123,7 +123,9 @@ settings belong in the `elasticsearch.yml` file. `https`. Defaults to `https`. When using HTTPS, this repository type validates the repository's certificate chain using the JVM-wide truststore. Ensure that the root certificate authority is in this truststore using the JVM's - `keytool` tool. + `keytool` tool. If you have a custom certificate authority for your S3 repository + and you use the {es} <>, then you will need to reinstall your + CA certificate every time you upgrade {es}. `proxy.host`:: diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 7f672ece21f66..263602c9841a8 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -2664,121 +2664,241 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java index 51f008db646fa..cd6d3d209b3ed 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java @@ -48,12 +48,12 @@ */ public class APMMeterRegistry implements MeterRegistry { private final Registrar doubleCounters = new Registrar<>(); + private final Registrar doubleAsynchronousCounters = new Registrar<>(); private final Registrar doubleUpDownCounters = new Registrar<>(); private final Registrar doubleGauges = new Registrar<>(); private final Registrar doubleHistograms = new Registrar<>(); private final Registrar longCounters = new Registrar<>(); private final Registrar longAsynchronousCounters = new Registrar<>(); - private final Registrar doubleAsynchronousCounters = new Registrar<>(); private final Registrar longUpDownCounters = new Registrar<>(); private final Registrar longGauges = new Registrar<>(); private final Registrar longHistograms = new Registrar<>(); @@ -66,10 +66,12 @@ public APMMeterRegistry(Meter meter) { private final List> registrars = List.of( doubleCounters, + doubleAsynchronousCounters, doubleUpDownCounters, doubleGauges, doubleHistograms, longCounters, + longAsynchronousCounters, longUpDownCounters, longGauges, longHistograms @@ -81,7 +83,7 @@ public APMMeterRegistry(Meter meter) { @Override public DoubleCounter registerDoubleCounter(String name, String description, String unit) { try (ReleasableLock lock = registerLock.acquire()) { - return doubleCounters.register(new DoubleCounterAdapter(meter, name, description, unit)); + return register(doubleCounters, new DoubleCounterAdapter(meter, name, description, unit)); } } @@ -90,10 +92,27 @@ public DoubleCounter getDoubleCounter(String name) { return doubleCounters.get(name); } + @Override + public DoubleAsyncCounter registerDoubleAsyncCounter( + String name, + String description, + String unit, + Supplier observer + ) { + try (ReleasableLock lock = registerLock.acquire()) { + return register(doubleAsynchronousCounters, new DoubleAsyncCounterAdapter(meter, name, description, unit, observer)); + } + } + + @Override + public DoubleAsyncCounter getDoubleAsyncCounter(String name) { + return doubleAsynchronousCounters.get(name); + } + @Override public DoubleUpDownCounter registerDoubleUpDownCounter(String name, String description, String unit) { try (ReleasableLock lock = registerLock.acquire()) { - return doubleUpDownCounters.register(new DoubleUpDownCounterAdapter(meter, name, description, unit)); + return register(doubleUpDownCounters, new DoubleUpDownCounterAdapter(meter, name, description, unit)); } } @@ -105,7 +124,7 @@ public DoubleUpDownCounter getDoubleUpDownCounter(String name) { @Override public DoubleGauge registerDoubleGauge(String name, String description, String unit, Supplier observer) { try (ReleasableLock lock = registerLock.acquire()) { - return doubleGauges.register(new DoubleGaugeAdapter(meter, name, description, unit, observer)); + return register(doubleGauges, new DoubleGaugeAdapter(meter, name, description, unit, observer)); } } @@ -117,7 +136,7 @@ public DoubleGauge getDoubleGauge(String name) { @Override public DoubleHistogram registerDoubleHistogram(String name, String description, String unit) { try (ReleasableLock lock = registerLock.acquire()) { - return doubleHistograms.register(new DoubleHistogramAdapter(meter, name, description, unit)); + return register(doubleHistograms, new DoubleHistogramAdapter(meter, name, description, unit)); } } @@ -129,14 +148,14 @@ public DoubleHistogram getDoubleHistogram(String name) { @Override public LongCounter registerLongCounter(String name, String description, String unit) { try (ReleasableLock lock = registerLock.acquire()) { - return longCounters.register(new LongCounterAdapter(meter, name, description, unit)); + return register(longCounters, new LongCounterAdapter(meter, name, description, unit)); } } @Override public LongAsyncCounter registerLongAsyncCounter(String name, String description, String unit, Supplier observer) { try (ReleasableLock lock = registerLock.acquire()) { - return longAsynchronousCounters.register(new LongAsyncCounterAdapter(meter, name, description, unit, observer)); + return register(longAsynchronousCounters, new LongAsyncCounterAdapter(meter, name, description, unit, observer)); } } @@ -145,23 +164,6 @@ public LongAsyncCounter getLongAsyncCounter(String name) { return longAsynchronousCounters.get(name); } - @Override - public DoubleAsyncCounter registerDoubleAsyncCounter( - String name, - String description, - String unit, - Supplier observer - ) { - try (ReleasableLock lock = registerLock.acquire()) { - return doubleAsynchronousCounters.register(new DoubleAsyncCounterAdapter(meter, name, description, unit, observer)); - } - } - - @Override - public DoubleAsyncCounter getDoubleAsyncCounter(String name) { - return doubleAsynchronousCounters.get(name); - } - @Override public LongCounter getLongCounter(String name) { return longCounters.get(name); @@ -170,7 +172,7 @@ public LongCounter getLongCounter(String name) { @Override public LongUpDownCounter registerLongUpDownCounter(String name, String description, String unit) { try (ReleasableLock lock = registerLock.acquire()) { - return longUpDownCounters.register(new LongUpDownCounterAdapter(meter, name, description, unit)); + return register(longUpDownCounters, new LongUpDownCounterAdapter(meter, name, description, unit)); } } @@ -182,7 +184,7 @@ public LongUpDownCounter getLongUpDownCounter(String name) { @Override public LongGauge registerLongGauge(String name, String description, String unit, Supplier observer) { try (ReleasableLock lock = registerLock.acquire()) { - return longGauges.register(new LongGaugeAdapter(meter, name, description, unit, observer)); + return register(longGauges, new LongGaugeAdapter(meter, name, description, unit, observer)); } } @@ -194,7 +196,7 @@ public LongGauge getLongGauge(String name) { @Override public LongHistogram registerLongHistogram(String name, String description, String unit) { try (ReleasableLock lock = registerLock.acquire()) { - return longHistograms.register(new LongHistogramAdapter(meter, name, description, unit)); + return register(longHistograms, new LongHistogramAdapter(meter, name, description, unit)); } } @@ -203,6 +205,11 @@ public LongHistogram getLongHistogram(String name) { return longHistograms.get(name); } + private > T register(Registrar registrar, T adapter) { + assert registrars.contains(registrar) : "usage of unknown registrar"; + return registrar.register(adapter); + } + public void setProvider(Meter meter) { try (ReleasableLock lock = registerLock.acquire()) { this.meter = meter; diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java index bbeaba0f6f088..72c6ccf905873 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java @@ -17,6 +17,7 @@ import java.security.PrivilegedAction; import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; /** * An instrument that contains the name, description and unit. The delegate may be replaced when @@ -25,27 +26,14 @@ * @param delegated instrument */ public abstract class AbstractInstrument implements Instrument { - private static final int MAX_NAME_LENGTH = 255; - private final AtomicReference delegate; + private final AtomicReference delegate = new AtomicReference<>(); private final String name; - private final String description; - private final String unit; + private final Function instrumentBuilder; - @SuppressWarnings("this-escape") - public AbstractInstrument(Meter meter, String name, String description, String unit) { - this.name = Objects.requireNonNull(name); - if (name.length() > MAX_NAME_LENGTH) { - throw new IllegalArgumentException( - "Instrument name [" + name + "] with length [" + name.length() + "] exceeds maximum length [" + MAX_NAME_LENGTH + "]" - ); - } - this.description = Objects.requireNonNull(description); - this.unit = Objects.requireNonNull(unit); - this.delegate = new AtomicReference<>(doBuildInstrument(meter)); - } - - private T doBuildInstrument(Meter meter) { - return AccessController.doPrivileged((PrivilegedAction) () -> buildInstrument(meter)); + public AbstractInstrument(Meter meter, Builder builder) { + this.name = builder.getName(); + this.instrumentBuilder = m -> AccessController.doPrivileged((PrivilegedAction) () -> builder.build(m)); + this.delegate.set(this.instrumentBuilder.apply(meter)); } @Override @@ -53,21 +41,36 @@ public String getName() { return name; } - public String getUnit() { - return unit.toString(); - } - protected T getInstrument() { return delegate.get(); } - protected String getDescription() { - return description; - } - void setProvider(@Nullable Meter meter) { - delegate.set(doBuildInstrument(Objects.requireNonNull(meter))); + delegate.set(instrumentBuilder.apply(Objects.requireNonNull(meter))); } - protected abstract T buildInstrument(Meter meter); + protected abstract static class Builder { + private static final int MAX_NAME_LENGTH = 255; + + protected final String name; + protected final String description; + protected final String unit; + + public Builder(String name, String description, String unit) { + if (name.length() > MAX_NAME_LENGTH) { + throw new IllegalArgumentException( + "Instrument name [" + name + "] with length [" + name.length() + "] exceeds maximum length [" + MAX_NAME_LENGTH + "]" + ); + } + this.name = Objects.requireNonNull(name); + this.description = Objects.requireNonNull(description); + this.unit = Objects.requireNonNull(unit); + } + + public String getName() { + return name; + } + + public abstract T build(Meter meter); + } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java index a1ea9f33e31fb..6b17a83619ef7 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java @@ -11,47 +11,40 @@ import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.ObservableDoubleCounter; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.telemetry.apm.AbstractInstrument; import org.elasticsearch.telemetry.metric.DoubleAsyncCounter; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; import java.util.Objects; -import java.util.concurrent.locks.ReentrantLock; import java.util.function.Supplier; public class DoubleAsyncCounterAdapter extends AbstractInstrument implements DoubleAsyncCounter { - private final Supplier observer; - private final ReleasableLock closedLock = new ReleasableLock(new ReentrantLock()); - private boolean closed = false; public DoubleAsyncCounterAdapter(Meter meter, String name, String description, String unit, Supplier observer) { - super(meter, name, description, unit); - this.observer = observer; + super(meter, new Builder(name, description, unit, observer)); } @Override - protected ObservableDoubleCounter buildInstrument(Meter meter) { - var builder = Objects.requireNonNull(meter).counterBuilder(getName()); - return builder.setDescription(getDescription()).setUnit(getUnit()).ofDoubles().buildWithCallback(measurement -> { - DoubleWithAttributes observation; - try { - observation = observer.get(); - } catch (RuntimeException err) { - assert false : "observer must not throw [" + err.getMessage() + "]"; - return; - } - measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); - }); + public void close() throws Exception { + getInstrument().close(); } - @Override - public void close() throws Exception { - try (ReleasableLock lock = closedLock.acquire()) { - if (closed == false) { - getInstrument().close(); - } - closed = true; + private static class Builder extends AbstractInstrument.Builder { + private final Supplier observer; + + private Builder(String name, String description, String unit, Supplier observer) { + super(name, description, unit); + this.observer = Objects.requireNonNull(observer); + } + + @Override + public ObservableDoubleCounter build(Meter meter) { + return Objects.requireNonNull(meter) + .counterBuilder(name) + .setDescription(description) + .setUnit(unit) + .ofDoubles() + .buildWithCallback(OtelHelper.doubleMeasurementCallback(observer)); } } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleCounterAdapter.java index faba8c2e3e67e..398419f6c3f69 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleCounterAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleCounterAdapter.java @@ -22,16 +22,7 @@ public class DoubleCounterAdapter extends AbstractInstrument implements org.elasticsearch.telemetry.metric.DoubleCounter { public DoubleCounterAdapter(Meter meter, String name, String description, String unit) { - super(meter, name, description, unit); - } - - protected io.opentelemetry.api.metrics.DoubleCounter buildInstrument(Meter meter) { - return Objects.requireNonNull(meter) - .counterBuilder(getName()) - .ofDoubles() - .setDescription(getDescription()) - .setUnit(getUnit()) - .build(); + super(meter, new Builder(name, description, unit)); } @Override @@ -50,4 +41,15 @@ public void incrementBy(double inc, Map attributes) { assert inc >= 0; getInstrument().add(inc, OtelHelper.fromMap(attributes)); } + + private static class Builder extends AbstractInstrument.Builder { + private Builder(String name, String description, String unit) { + super(name, description, unit); + } + + @Override + public DoubleCounter build(Meter meter) { + return Objects.requireNonNull(meter).counterBuilder(name).ofDoubles().setDescription(description).setUnit(unit).build(); + } + } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java index faef8bd723fcf..ed6ecee66d696 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java @@ -11,12 +11,10 @@ import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.ObservableDoubleGauge; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.telemetry.apm.AbstractInstrument; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; import java.util.Objects; -import java.util.concurrent.locks.ReentrantLock; import java.util.function.Supplier; /** @@ -26,40 +24,30 @@ public class DoubleGaugeAdapter extends AbstractInstrument observer; - private final ReleasableLock closedLock = new ReleasableLock(new ReentrantLock()); - private boolean closed = false; - public DoubleGaugeAdapter(Meter meter, String name, String description, String unit, Supplier observer) { - super(meter, name, description, unit); - this.observer = observer; + super(meter, new Builder(name, description, unit, observer)); } @Override - protected io.opentelemetry.api.metrics.ObservableDoubleGauge buildInstrument(Meter meter) { - return Objects.requireNonNull(meter) - .gaugeBuilder(getName()) - .setDescription(getDescription()) - .setUnit(getUnit()) - .buildWithCallback(measurement -> { - DoubleWithAttributes observation; - try { - observation = observer.get(); - } catch (RuntimeException err) { - assert false : "observer must not throw [" + err.getMessage() + "]"; - return; - } - measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); - }); + public void close() throws Exception { + getInstrument().close(); } - @Override - public void close() throws Exception { - try (ReleasableLock lock = closedLock.acquire()) { - if (closed == false) { - getInstrument().close(); - } - closed = true; + private static class Builder extends AbstractInstrument.Builder { + private final Supplier observer; + + private Builder(String name, String description, String unit, Supplier observer) { + super(name, description, unit); + this.observer = Objects.requireNonNull(observer); + } + + @Override + public ObservableDoubleGauge build(Meter meter) { + return Objects.requireNonNull(meter) + .gaugeBuilder(name) + .setDescription(description) + .setUnit(unit) + .buildWithCallback(OtelHelper.doubleMeasurementCallback(observer)); } } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleHistogramAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleHistogramAdapter.java index e126aa6af7cf0..47b54a84d0aa9 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleHistogramAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleHistogramAdapter.java @@ -24,13 +24,7 @@ public class DoubleHistogramAdapter extends AbstractInstrument org.elasticsearch.telemetry.metric.DoubleHistogram { public DoubleHistogramAdapter(Meter meter, String name, String description, String unit) { - super(meter, name, description, unit); - } - - @Override - protected io.opentelemetry.api.metrics.DoubleHistogram buildInstrument(Meter meter) { - var builder = Objects.requireNonNull(meter).histogramBuilder(getName()); - return builder.setDescription(getDescription()).setUnit(getUnit()).build(); + super(meter, new Builder(name, description, unit)); } @Override @@ -42,4 +36,15 @@ public void record(double value) { public void record(double value, Map attributes) { getInstrument().record(value, OtelHelper.fromMap(attributes)); } + + private static class Builder extends AbstractInstrument.Builder { + private Builder(String name, String description, String unit) { + super(name, description, unit); + } + + @Override + public DoubleHistogram build(Meter meter) { + return Objects.requireNonNull(meter).histogramBuilder(name).setDescription(description).setUnit(unit).build(); + } + } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleUpDownCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleUpDownCounterAdapter.java index a204627a04f1e..7cb7100ba2631 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleUpDownCounterAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleUpDownCounterAdapter.java @@ -24,17 +24,7 @@ public class DoubleUpDownCounterAdapter extends AbstractInstrument attributes) { getInstrument().add(inc, OtelHelper.fromMap(attributes)); } + + private static class Builder extends AbstractInstrument.Builder { + private Builder(String name, String description, String unit) { + super(name, description, unit); + } + + @Override + public DoubleUpDownCounter build(Meter meter) { + return Objects.requireNonNull(meter).upDownCounterBuilder(name).ofDoubles().setDescription(description).setUnit(unit).build(); + } + } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java index 126cca1964283..14c58139d03e1 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java @@ -11,47 +11,39 @@ import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.ObservableLongCounter; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.telemetry.apm.AbstractInstrument; import org.elasticsearch.telemetry.metric.LongAsyncCounter; import org.elasticsearch.telemetry.metric.LongWithAttributes; import java.util.Objects; -import java.util.concurrent.locks.ReentrantLock; import java.util.function.Supplier; public class LongAsyncCounterAdapter extends AbstractInstrument implements LongAsyncCounter { - private final Supplier observer; - private final ReleasableLock closedLock = new ReleasableLock(new ReentrantLock()); - private boolean closed = false; public LongAsyncCounterAdapter(Meter meter, String name, String description, String unit, Supplier observer) { - super(meter, name, description, unit); - this.observer = observer; + super(meter, new Builder(name, description, unit, observer)); } @Override - protected ObservableLongCounter buildInstrument(Meter meter) { - var builder = Objects.requireNonNull(meter).counterBuilder(getName()); - return builder.setDescription(getDescription()).setUnit(getUnit()).buildWithCallback(measurement -> { - LongWithAttributes observation; - try { - observation = observer.get(); - } catch (RuntimeException err) { - assert false : "observer must not throw [" + err.getMessage() + "]"; - return; - } - measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); - }); + public void close() throws Exception { + getInstrument().close(); } - @Override - public void close() throws Exception { - try (ReleasableLock lock = closedLock.acquire()) { - if (closed == false) { - getInstrument().close(); - } - closed = true; + private static class Builder extends AbstractInstrument.Builder { + private final Supplier observer; + + private Builder(String name, String description, String unit, Supplier observer) { + super(name, description, unit); + this.observer = Objects.requireNonNull(observer); + } + + @Override + public ObservableLongCounter build(Meter meter) { + return Objects.requireNonNull(meter) + .counterBuilder(name) + .setDescription(description) + .setUnit(unit) + .buildWithCallback(OtelHelper.longMeasurementCallback(observer)); } } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongCounterAdapter.java index 9b46b8c97994a..54a7c3e3cab29 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongCounterAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongCounterAdapter.java @@ -20,15 +20,8 @@ * LongCounterAdapter wraps an otel LongCounter */ public class LongCounterAdapter extends AbstractInstrument implements org.elasticsearch.telemetry.metric.LongCounter { - public LongCounterAdapter(Meter meter, String name, String description, String unit) { - super(meter, name, description, unit); - } - - @Override - protected io.opentelemetry.api.metrics.LongCounter buildInstrument(Meter meter) { - var builder = Objects.requireNonNull(meter).counterBuilder(getName()); - return builder.setDescription(getDescription()).setUnit(getUnit()).build(); + super(meter, new Builder(name, description, unit)); } @Override @@ -47,4 +40,15 @@ public void incrementBy(long inc, Map attributes) { assert inc >= 0; getInstrument().add(inc, OtelHelper.fromMap(attributes)); } + + private static class Builder extends AbstractInstrument.Builder { + private Builder(String name, String description, String unit) { + super(name, description, unit); + } + + @Override + public LongCounter build(Meter meter) { + return Objects.requireNonNull(meter).counterBuilder(name).setDescription(description).setUnit(unit).build(); + } + } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java index e297ba7ee963a..52c19c80c284f 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java @@ -11,53 +11,41 @@ import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.ObservableLongGauge; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.telemetry.apm.AbstractInstrument; import org.elasticsearch.telemetry.metric.LongWithAttributes; import java.util.Objects; -import java.util.concurrent.locks.ReentrantLock; import java.util.function.Supplier; /** * LongGaugeAdapter wraps an otel ObservableLongGauge */ public class LongGaugeAdapter extends AbstractInstrument implements org.elasticsearch.telemetry.metric.LongGauge { - private final Supplier observer; - private final ReleasableLock closedLock = new ReleasableLock(new ReentrantLock()); - private boolean closed = false; - public LongGaugeAdapter(Meter meter, String name, String description, String unit, Supplier observer) { - super(meter, name, description, unit); - this.observer = observer; + super(meter, new Builder(name, description, unit, observer)); } @Override - protected io.opentelemetry.api.metrics.ObservableLongGauge buildInstrument(Meter meter) { - return Objects.requireNonNull(meter) - .gaugeBuilder(getName()) - .ofLongs() - .setDescription(getDescription()) - .setUnit(getUnit()) - .buildWithCallback(measurement -> { - LongWithAttributes observation; - try { - observation = observer.get(); - } catch (RuntimeException err) { - assert false : "observer must not throw [" + err.getMessage() + "]"; - return; - } - measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); - }); + public void close() throws Exception { + getInstrument().close(); } - @Override - public void close() throws Exception { - try (ReleasableLock lock = closedLock.acquire()) { - if (closed == false) { - getInstrument().close(); - } - closed = true; + private static class Builder extends AbstractInstrument.Builder { + private final Supplier observer; + + private Builder(String name, String description, String unit, Supplier observer) { + super(name, description, unit); + this.observer = Objects.requireNonNull(observer); + } + + @Override + public ObservableLongGauge build(Meter meter) { + return Objects.requireNonNull(meter) + .gaugeBuilder(name) + .ofLongs() + .setDescription(description) + .setUnit(unit) + .buildWithCallback(OtelHelper.longMeasurementCallback(observer)); } } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongHistogramAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongHistogramAdapter.java index 2b8e76df0dd0e..286922027ce2a 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongHistogramAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongHistogramAdapter.java @@ -20,19 +20,8 @@ * LongHistogramAdapter wraps an otel LongHistogram */ public class LongHistogramAdapter extends AbstractInstrument implements org.elasticsearch.telemetry.metric.LongHistogram { - public LongHistogramAdapter(Meter meter, String name, String description, String unit) { - super(meter, name, description, unit); - } - - @Override - protected io.opentelemetry.api.metrics.LongHistogram buildInstrument(Meter meter) { - return Objects.requireNonNull(meter) - .histogramBuilder(getName()) - .ofLongs() - .setDescription(getDescription()) - .setUnit(getUnit()) - .build(); + super(meter, new Builder(name, description, unit)); } @Override @@ -44,4 +33,15 @@ public void record(long value) { public void record(long value, Map attributes) { getInstrument().record(value, OtelHelper.fromMap(attributes)); } + + private static class Builder extends AbstractInstrument.Builder { + private Builder(String name, String description, String unit) { + super(name, description, unit); + } + + @Override + public LongHistogram build(Meter meter) { + return Objects.requireNonNull(meter).histogramBuilder(name).ofLongs().setDescription(description).setUnit(unit).build(); + } + } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongUpDownCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongUpDownCounterAdapter.java index a59a114bc2264..fa09740085aa3 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongUpDownCounterAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongUpDownCounterAdapter.java @@ -24,13 +24,7 @@ public class LongUpDownCounterAdapter extends AbstractInstrument attributes) { getInstrument().add(inc, OtelHelper.fromMap(attributes)); } + + private static class Builder extends AbstractInstrument.Builder { + private Builder(String name, String description, String unit) { + super(name, description, unit); + } + + @Override + public LongUpDownCounter build(Meter meter) { + return Objects.requireNonNull(meter).upDownCounterBuilder(name).setDescription(description).setUnit(unit).build(); + } + } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/OtelHelper.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/OtelHelper.java index 18bf66cee5391..3e8ab415bd25e 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/OtelHelper.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/OtelHelper.java @@ -9,10 +9,21 @@ package org.elasticsearch.telemetry.apm.internal.metrics; import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.ObservableDoubleMeasurement; +import io.opentelemetry.api.metrics.ObservableLongMeasurement; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import org.elasticsearch.telemetry.metric.LongWithAttributes; import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Supplier; class OtelHelper { + private static final Logger logger = LogManager.getLogger(OtelHelper.class); + static Attributes fromMap(Map attributes) { if (attributes == null || attributes.isEmpty()) { return Attributes.empty(); @@ -41,4 +52,38 @@ static Attributes fromMap(Map attributes) { }); return builder.build(); } + + static Consumer doubleMeasurementCallback(Supplier observer) { + return measurement -> { + DoubleWithAttributes observation; + try { + observation = observer.get(); + } catch (RuntimeException err) { + assert false : "observer must not throw [" + err.getMessage() + "]"; + logger.error("doubleMeasurementCallback observer unexpected error", err); + return; + } + if (observation == null) { + return; + } + measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); + }; + } + + static Consumer longMeasurementCallback(Supplier observer) { + return measurement -> { + LongWithAttributes observation; + try { + observation = observer.get(); + } catch (RuntimeException err) { + assert false : "observer must not throw [" + err.getMessage() + "]"; + logger.error("longMeasurementCallback observer unexpected error", err); + return; + } + if (observation == null) { + return; + } + measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); + }; + } } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java index 82dd911d1b821..778ca108dc5fe 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java @@ -12,19 +12,37 @@ import io.opentelemetry.api.metrics.Meter; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.apm.internal.APMAgentSettings; import org.elasticsearch.telemetry.apm.internal.APMMeterService; import org.elasticsearch.telemetry.apm.internal.TestAPMMeterService; +import org.elasticsearch.telemetry.metric.DoubleAsyncCounter; import org.elasticsearch.telemetry.metric.DoubleCounter; +import org.elasticsearch.telemetry.metric.DoubleGauge; +import org.elasticsearch.telemetry.metric.DoubleHistogram; +import org.elasticsearch.telemetry.metric.DoubleUpDownCounter; +import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import org.elasticsearch.telemetry.metric.Instrument; +import org.elasticsearch.telemetry.metric.LongAsyncCounter; import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.LongGauge; +import org.elasticsearch.telemetry.metric.LongHistogram; +import org.elasticsearch.telemetry.metric.LongUpDownCounter; +import org.elasticsearch.telemetry.metric.LongWithAttributes; import org.elasticsearch.test.ESTestCase; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; + import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.sameInstance; public class APMMeterRegistryTests extends ESTestCase { - Meter testOtel = new RecordingOtelMeter(); + RecordingOtelMeter testOtel = new RecordingOtelMeter(); Meter noopOtel = OpenTelemetry.noop().getMeter("noop"); @@ -97,4 +115,61 @@ public void testMaxNameLength() { ); assertThat(iae.getMessage(), containsString("exceeds maximum length [255]")); } + + public void testAllInstrumentsSwitchProviders() { + TestAPMMeterService apmMeter = new TestAPMMeterService( + Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), false).build(), + () -> testOtel, + () -> noopOtel + ); + APMMeterRegistry registry = apmMeter.getMeterRegistry(); + + Supplier doubleObserver = () -> new DoubleWithAttributes(1.5, Collections.emptyMap()); + DoubleCounter dc = registry.registerDoubleCounter("dc", "", ""); + DoubleUpDownCounter dudc = registry.registerDoubleUpDownCounter("dudc", "", ""); + DoubleHistogram dh = registry.registerDoubleHistogram("dh", "", ""); + DoubleAsyncCounter dac = registry.registerDoubleAsyncCounter("dac", "", "", doubleObserver); + DoubleGauge dg = registry.registerDoubleGauge("dg", "", "", doubleObserver); + + Supplier longObserver = () -> new LongWithAttributes(100, Collections.emptyMap()); + LongCounter lc = registry.registerLongCounter("lc", "", ""); + LongUpDownCounter ludc = registry.registerLongUpDownCounter("ludc", "", ""); + LongHistogram lh = registry.registerLongHistogram("lh", "", ""); + LongAsyncCounter lac = registry.registerLongAsyncCounter("lac", "", "", longObserver); + LongGauge lg = registry.registerLongGauge("lg", "", "", longObserver); + + apmMeter.setEnabled(true); + + testOtel.collectMetrics(); + dc.increment(); + dudc.add(1.0); + dh.record(1.0); + lc.increment(); + ludc.add(1); + lh.record(1); + + hasOneDouble(dc, 1.0d); + hasOneDouble(dudc, 1.0d); + hasOneDouble(dh, 1.0d); + hasOneDouble(dac, 1.5d); + hasOneDouble(dg, 1.5d); + + hasOneLong(lc, 1L); + hasOneLong(ludc, 1L); + hasOneLong(lh, 1L); + hasOneLong(lac, 100L); + hasOneLong(lg, 100L); + } + + private void hasOneDouble(Instrument instrument, double value) { + List measurements = testOtel.getRecorder().getMeasurements(instrument); + assertThat(measurements, hasSize(1)); + assertThat(measurements.get(0).getDouble(), equalTo(value)); + } + + private void hasOneLong(Instrument instrument, long value) { + List measurements = testOtel.getRecorder().getMeasurements(instrument); + assertThat(measurements, hasSize(1)); + assertThat(measurements.get(0).getLong(), equalTo(value)); + } } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java index 94627c1e53813..793803fb8c277 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java @@ -355,7 +355,7 @@ public ObservableDoubleMeasurement buildObserver() { private class DoubleUpDownRecorder extends AbstractInstrument implements DoubleUpDownCounter, OtelInstrument { DoubleUpDownRecorder(String name) { - super(name, InstrumentType.LONG_UP_DOWN_COUNTER); + super(name, InstrumentType.DOUBLE_UP_DOWN_COUNTER); } protected DoubleUpDownRecorder(String name, InstrumentType instrument) { @@ -616,7 +616,9 @@ public LongHistogramBuilder ofLongs() { @Override public DoubleHistogram build() { - return new DoubleHistogramRecorder(name); + DoubleHistogramRecorder histogram = new DoubleHistogramRecorder(name); + recorder.register(histogram, histogram.getInstrument(), name, description, unit); + return histogram; } } @@ -661,7 +663,9 @@ public LongHistogramBuilder setUnit(String unit) { @Override public LongHistogram build() { - return new LongHistogramRecorder(name); + LongHistogramRecorder histogram = new LongHistogramRecorder(name); + recorder.register(histogram, histogram.getInstrument(), name, description, unit); + return histogram; } } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java index fa8706deee870..3e23b741e01e5 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java @@ -99,4 +99,26 @@ public void testDoubleAsyncAdapter() throws Exception { metrics = otelMeter.getRecorder().getMeasurements(doubleAsyncCounter); assertThat(metrics, hasSize(0)); } + + public void testNullGaugeRecord() throws Exception { + DoubleAsyncCounter dcounter = registry.registerDoubleAsyncCounter( + "name", + "desc", + "unit", + new AtomicReference()::get + ); + otelMeter.collectMetrics(); + List metrics = otelMeter.getRecorder().getMeasurements(dcounter); + assertThat(metrics, hasSize(0)); + + LongAsyncCounter lcounter = registry.registerLongAsyncCounter( + "name", + "desc", + "unit", + new AtomicReference()::get + ); + otelMeter.collectMetrics(); + metrics = otelMeter.getRecorder().getMeasurements(lcounter); + assertThat(metrics, hasSize(0)); + } } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java index e8cd18521f842..10f2d58768d48 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java @@ -100,4 +100,16 @@ public void testDoubleGaugeRecord() throws Exception { metrics = otelMeter.getRecorder().getMeasurements(gauge); assertThat(metrics, hasSize(0)); } + + public void testNullGaugeRecord() throws Exception { + DoubleGauge dgauge = registry.registerDoubleGauge("name", "desc", "unit", new AtomicReference()::get); + otelMeter.collectMetrics(); + List metrics = otelMeter.getRecorder().getMeasurements(dgauge); + assertThat(metrics, hasSize(0)); + + LongGauge lgauge = registry.registerLongGauge("name", "desc", "unit", new AtomicReference()::get); + otelMeter.collectMetrics(); + metrics = otelMeter.getRecorder().getMeasurements(lgauge); + assertThat(metrics, hasSize(0)); + } } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index d3eaee36f67f7..03bd753e29068 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.datastreams.lifecycle.ErrorEntry; import org.elasticsearch.action.datastreams.lifecycle.ExplainIndexDataStreamLifecycle; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamAction; @@ -46,6 +47,11 @@ import org.elasticsearch.datastreams.DataStreamsPlugin; import org.elasticsearch.datastreams.lifecycle.action.ExplainDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamLifecycleAction; +import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService; +import org.elasticsearch.health.Diagnosis; +import org.elasticsearch.health.GetHealthAction; +import org.elasticsearch.health.HealthIndicatorResult; +import org.elasticsearch.health.HealthStatus; import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo; import org.elasticsearch.health.node.DslErrorInfo; import org.elasticsearch.health.node.FetchHealthInfoCacheAction; @@ -76,9 +82,12 @@ import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService.DATA_STREAM_MERGE_POLICY_TARGET_FLOOR_SEGMENT_SETTING; import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService.ONE_HUNDRED_MB; import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService.TARGET_MERGE_FACTOR_VALUE; +import static org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService.STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF; +import static org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService.STAGNATING_INDEX_IMPACT; import static org.elasticsearch.index.IndexSettings.LIFECYCLE_ORIGINATION_DATE; import static org.elasticsearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -447,6 +456,27 @@ public void testErrorRecordingOnRollover() throws Exception { assertThat(errorInfo.retryCount(), greaterThanOrEqualTo(3)); }); + GetHealthAction.Response healthResponse = client().execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(true, 1000)) + .actionGet(); + HealthIndicatorResult masterIsStableIndicator = healthResponse.findIndicator(StableMasterHealthIndicatorService.NAME); + // if the cluster doesn't have a stable master we'll avoid asserting on the health report API as some indicators will not + // be computed + if (masterIsStableIndicator.status() == HealthStatus.GREEN) { + // the shards capacity indicator is dictating the overall status + assertThat(healthResponse.getStatus(), is(HealthStatus.RED)); + HealthIndicatorResult dslIndicator = healthResponse.findIndicator(DataStreamLifecycleHealthIndicatorService.NAME); + assertThat(dslIndicator.status(), is(HealthStatus.YELLOW)); + assertThat(dslIndicator.impacts(), is(STAGNATING_INDEX_IMPACT)); + assertThat( + dslIndicator.symptom(), + is("A backing index has repeatedly encountered errors whilst trying to advance in its lifecycle") + ); + + Diagnosis diagnosis = dslIndicator.diagnosisList().get(0); + assertThat(diagnosis.definition(), is(STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF)); + assertThat(diagnosis.affectedResources().get(0).getValues(), containsInAnyOrder(writeIndexName)); + } + // let's reset the cluster max shards per node limit to allow rollover to proceed and check the error store is empty updateClusterSettings(Settings.builder().putNull("*")); @@ -476,6 +506,14 @@ public void testErrorRecordingOnRollover() throws Exception { DataStreamLifecycleHealthInfo dslHealthInfoOnHealthNode = healthNodeResponse.getHealthInfo().dslHealthInfo(); assertThat(dslHealthInfoOnHealthNode, is(DataStreamLifecycleHealthInfo.NO_DSL_ERRORS)); }); + + healthResponse = client().execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(true, 1000)).actionGet(); + masterIsStableIndicator = healthResponse.findIndicator(StableMasterHealthIndicatorService.NAME); + // if the cluster doesn't have a stable master we'll avoid asserting on the health report API as some indicators will not + // be computed + if (masterIsStableIndicator.status() == HealthStatus.GREEN) { + assertThat(healthResponse.getStatus(), is(HealthStatus.GREEN)); + } } public void testErrorRecordingOnRetention() throws Exception { @@ -569,6 +607,30 @@ public void testErrorRecordingOnRetention() throws Exception { assertThat(List.of(firstGenerationIndex, secondGenerationIndex).contains(errorInfo.indexName()), is(true)); }); + GetHealthAction.Response healthResponse = client().execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(true, 1000)) + .actionGet(); + HealthIndicatorResult masterIsStableIndicator = healthResponse.findIndicator(StableMasterHealthIndicatorService.NAME); + // if the cluster doesn't have a stable master we'll avoid asserting on the health report API as some indicators will not + // be computed + if (masterIsStableIndicator.status() == HealthStatus.GREEN) { + // the dsl indicator should turn the overall status yellow + assertThat(healthResponse.getStatus(), is(HealthStatus.YELLOW)); + HealthIndicatorResult dslIndicator = healthResponse.findIndicator(DataStreamLifecycleHealthIndicatorService.NAME); + assertThat(dslIndicator.status(), is(HealthStatus.YELLOW)); + assertThat(dslIndicator.impacts(), is(STAGNATING_INDEX_IMPACT)); + assertThat( + dslIndicator.symptom(), + is("2 backing indices have repeatedly encountered errors whilst trying to advance in its lifecycle") + ); + + Diagnosis diagnosis = dslIndicator.diagnosisList().get(0); + assertThat(diagnosis.definition(), is(STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF)); + assertThat( + diagnosis.affectedResources().get(0).getValues(), + containsInAnyOrder(firstGenerationIndex, secondGenerationIndex) + ); + } + // let's mark the index as writeable and make sure it's deleted and the error store is empty updateIndexSettings(Settings.builder().put(READ_ONLY.settingName(), false), firstGenerationIndex); @@ -598,6 +660,20 @@ public void testErrorRecordingOnRetention() throws Exception { DataStreamLifecycleHealthInfo dslHealthInfoOnHealthNode = healthNodeResponse.getHealthInfo().dslHealthInfo(); assertThat(dslHealthInfoOnHealthNode, is(DataStreamLifecycleHealthInfo.NO_DSL_ERRORS)); }); + + healthResponse = client().execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(true, 1000)).actionGet(); + masterIsStableIndicator = healthResponse.findIndicator(StableMasterHealthIndicatorService.NAME); + // if the cluster doesn't have a stable master we'll avoid asserting on the health report API as some indicators will not + // be computed + if (masterIsStableIndicator.status() == HealthStatus.GREEN) { + // the dsl indicator should turn the overall status yellow + assertThat(healthResponse.getStatus(), is(HealthStatus.GREEN)); + HealthIndicatorResult dslIndicator = healthResponse.findIndicator(DataStreamLifecycleHealthIndicatorService.NAME); + assertThat(dslIndicator.status(), is(HealthStatus.GREEN)); + assertThat(dslIndicator.impacts().size(), is(0)); + assertThat(dslIndicator.symptom(), is("Data streams are executing their lifecycles without issues")); + assertThat(dslIndicator.diagnosisList().size(), is(0)); + } } finally { // when the test executes successfully this will not be needed however, otherwise we need to make sure the index is // "delete-able" for test cleanup diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index 9ac3a1afed5a5..fb93b7d688a74 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -47,6 +47,7 @@ import org.elasticsearch.datastreams.lifecycle.action.TransportGetDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportGetDataStreamLifecycleStatsAction; import org.elasticsearch.datastreams.lifecycle.action.TransportPutDataStreamLifecycleAction; +import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService; import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher; import org.elasticsearch.datastreams.lifecycle.rest.RestDataStreamLifecycleStatsAction; import org.elasticsearch.datastreams.lifecycle.rest.RestDeleteDataStreamLifecycleAction; @@ -60,8 +61,10 @@ import org.elasticsearch.datastreams.rest.RestMigrateToDataStreamAction; import org.elasticsearch.datastreams.rest.RestModifyDataStreamsAction; import org.elasticsearch.datastreams.rest.RestPromoteDataStreamAction; +import org.elasticsearch.health.HealthIndicatorService; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.HealthPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; @@ -76,7 +79,7 @@ import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN; -public class DataStreamsPlugin extends Plugin implements ActionPlugin { +public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlugin { public static final Setting TIME_SERIES_POLL_INTERVAL = Setting.timeSetting( "time_series.poll_interval", @@ -112,6 +115,7 @@ public class DataStreamsPlugin extends Plugin implements ActionPlugin { private final SetOnce dataLifecycleInitialisationService = new SetOnce<>(); private final SetOnce dataStreamLifecycleErrorsPublisher = new SetOnce<>(); + private final SetOnce dataStreamLifecycleHealthIndicatorService = new SetOnce<>(); private final Settings settings; public DataStreamsPlugin(Settings settings) { @@ -184,6 +188,8 @@ public Collection createComponents(PluginServices services) { ) ); dataLifecycleInitialisationService.get().init(); + dataStreamLifecycleHealthIndicatorService.set(new DataStreamLifecycleHealthIndicatorService()); + components.add(errorStoreInitialisationService.get()); components.add(dataLifecycleInitialisationService.get()); components.add(dataStreamLifecycleErrorsPublisher.get()); @@ -251,4 +257,9 @@ public void close() throws IOException { throw new ElasticsearchException("unable to close the data stream lifecycle service", e); } } + + @Override + public Collection getHealthIndicatorServices() { + return List.of(dataStreamLifecycleHealthIndicatorService.get()); + } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorService.java new file mode 100644 index 0000000000000..0628bed0f9019 --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorService.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.health; + +import org.elasticsearch.health.Diagnosis; +import org.elasticsearch.health.HealthIndicatorDetails; +import org.elasticsearch.health.HealthIndicatorImpact; +import org.elasticsearch.health.HealthIndicatorResult; +import org.elasticsearch.health.HealthIndicatorService; +import org.elasticsearch.health.HealthStatus; +import org.elasticsearch.health.ImpactArea; +import org.elasticsearch.health.SimpleHealthIndicatorDetails; +import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo; +import org.elasticsearch.health.node.DslErrorInfo; +import org.elasticsearch.health.node.HealthInfo; + +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; + +import static java.util.stream.Collectors.toList; + +public class DataStreamLifecycleHealthIndicatorService implements HealthIndicatorService { + + public static final String NAME = "data_stream_lifecycle"; + public static final String DSL_EXPLAIN_HELP_URL = "https://ela.st/explain-data-stream-lifecycle"; + + public static final String STAGNATING_BACKING_INDEX_IMPACT_ID = "stagnating_backing_index"; + + public static final List STAGNATING_INDEX_IMPACT = List.of( + new HealthIndicatorImpact( + NAME, + STAGNATING_BACKING_INDEX_IMPACT_ID, + 3, + "Data streams backing indices cannot make progress in their lifecycle. The performance and " + + "stability of the indices and/or the cluster could be impacted.", + List.of(ImpactArea.DEPLOYMENT_MANAGEMENT) + ) + ); + + public static final Diagnosis.Definition STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF = new Diagnosis.Definition( + NAME, + "stagnating_dsl_backing_index", + "Some backing indices are repeatedly encountering errors in their lifecycle execution.", + "Check the current status of the affected indices using the [GET //_lifecycle/explain] API. Please " + + "replace the in the API with the actual index name (or the data stream name for a wider overview).", + DSL_EXPLAIN_HELP_URL + ); + + @Override + public String name() { + return NAME; + } + + @Override + public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { + DataStreamLifecycleHealthInfo dataStreamLifecycleHealthInfo = healthInfo.dslHealthInfo(); + if (dataStreamLifecycleHealthInfo == null) { + // DSL reports health information on every run, so data will eventually arrive to the health node. In the meantime, let's + // report UNKNOWN health + return createIndicator( + HealthStatus.GREEN, + "No data stream lifecycle health data available yet. Health information will be reported after the first run.", + HealthIndicatorDetails.EMPTY, + List.of(), + List.of() + ); + } + + List stagnatingBackingIndices = dataStreamLifecycleHealthInfo.dslErrorsInfo(); + if (stagnatingBackingIndices.isEmpty()) { + return createIndicator( + HealthStatus.GREEN, + "Data streams are executing their lifecycles without issues", + createDetails(verbose, dataStreamLifecycleHealthInfo), + List.of(), + List.of() + ); + } else { + List affectedIndices = stagnatingBackingIndices.stream() + .map(DslErrorInfo::indexName) + .limit(Math.min(maxAffectedResourcesCount, stagnatingBackingIndices.size())) + .collect(toList()); + return createIndicator( + HealthStatus.YELLOW, + (stagnatingBackingIndices.size() > 1 ? stagnatingBackingIndices.size() + " backing indices have" : "A backing index has") + + " repeatedly encountered errors whilst trying to advance in its lifecycle", + createDetails(verbose, dataStreamLifecycleHealthInfo), + STAGNATING_INDEX_IMPACT, + List.of( + new Diagnosis( + STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF, + List.of(new Diagnosis.Resource(Diagnosis.Resource.Type.INDEX, affectedIndices)) + ) + ) + ); + } + } + + private static HealthIndicatorDetails createDetails(boolean verbose, DataStreamLifecycleHealthInfo dataStreamLifecycleHealthInfo) { + if (verbose == false) { + return HealthIndicatorDetails.EMPTY; + } + + var details = new HashMap(); + details.put("total_backing_indices_in_error", dataStreamLifecycleHealthInfo.totalErrorEntriesCount()); + details.put("stagnating_backing_indices_count", dataStreamLifecycleHealthInfo.dslErrorsInfo().size()); + if (dataStreamLifecycleHealthInfo.dslErrorsInfo().isEmpty() == false) { + details.put("stagnating_backing_indices", dataStreamLifecycleHealthInfo.dslErrorsInfo().stream().map(dslError -> { + LinkedHashMap errorDetails = new LinkedHashMap<>(3, 1L); + errorDetails.put("index_name", dslError.indexName()); + errorDetails.put("first_occurrence_timestamp", dslError.firstOccurrence()); + errorDetails.put("retry_count", dslError.retryCount()); + return errorDetails; + }).toList()); + } + return new SimpleHealthIndicatorDetails(details); + } +} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorServiceTests.java new file mode 100644 index 0000000000000..877b463301311 --- /dev/null +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorServiceTests.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.health; + +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.common.Strings; +import org.elasticsearch.health.Diagnosis; +import org.elasticsearch.health.HealthIndicatorDetails; +import org.elasticsearch.health.HealthIndicatorResult; +import org.elasticsearch.health.HealthStatus; +import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo; +import org.elasticsearch.health.node.DslErrorInfo; +import org.elasticsearch.health.node.HealthInfo; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService.STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF; +import static org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService.STAGNATING_INDEX_IMPACT; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.core.IsNot.not; + +public class DataStreamLifecycleHealthIndicatorServiceTests extends ESTestCase { + + private DataStreamLifecycleHealthIndicatorService service; + + @Before + public void setupService() { + service = new DataStreamLifecycleHealthIndicatorService(); + } + + public void testGreenWhenNoDSLHealthData() { + HealthIndicatorResult result = service.calculate(true, new HealthInfo(Map.of(), null)); + assertThat(result.status(), is(HealthStatus.GREEN)); + assertThat( + result.symptom(), + is("No data stream lifecycle health data available yet. Health information will be reported after the first run.") + ); + assertThat(result.details(), is(HealthIndicatorDetails.EMPTY)); + assertThat(result.impacts(), is(List.of())); + assertThat(result.diagnosisList(), is(List.of())); + } + + public void testGreenWhenEmptyListOfStagnatingIndices() { + HealthIndicatorResult result = service.calculate(true, new HealthInfo(Map.of(), new DataStreamLifecycleHealthInfo(List.of(), 15))); + assertThat(result.status(), is(HealthStatus.GREEN)); + assertThat(result.symptom(), is("Data streams are executing their lifecycles without issues")); + assertThat(result.details(), is(not(HealthIndicatorDetails.EMPTY))); + assertThat(Strings.toString(result.details()), containsString("\"total_backing_indices_in_error\":15")); + assertThat(result.impacts(), is(List.of())); + assertThat(result.diagnosisList(), is(List.of())); + } + + public void testYellowWhenStagnatingIndicesPresent() { + String secondGenerationIndex = DataStream.getDefaultBackingIndexName("foo", 2L); + String firstGenerationIndex = DataStream.getDefaultBackingIndexName("foo", 1L); + HealthIndicatorResult result = service.calculate( + true, + new HealthInfo( + Map.of(), + new DataStreamLifecycleHealthInfo( + List.of(new DslErrorInfo(secondGenerationIndex, 1L, 200), new DslErrorInfo(firstGenerationIndex, 3L, 100)), + 15 + ) + ) + ); + assertThat(result.status(), is(HealthStatus.YELLOW)); + assertThat(result.symptom(), is("2 backing indices have repeatedly encountered errors whilst trying to advance in its lifecycle")); + assertThat(result.details(), is(not(HealthIndicatorDetails.EMPTY))); + String detailsAsString = Strings.toString(result.details()); + assertThat(detailsAsString, containsString("\"total_backing_indices_in_error\":15")); + assertThat(detailsAsString, containsString("\"stagnating_backing_indices_count\":2")); + assertThat( + detailsAsString, + containsString( + String.format( + Locale.ROOT, + "\"index_name\":\"%s\"," + + "\"first_occurrence_timestamp\":1,\"retry_count\":200},{\"index_name\":\"%s\"," + + "\"first_occurrence_timestamp\":3,\"retry_count\":100", + secondGenerationIndex, + firstGenerationIndex + ) + ) + ); + assertThat(result.impacts(), is(STAGNATING_INDEX_IMPACT)); + Diagnosis diagnosis = result.diagnosisList().get(0); + assertThat(diagnosis.definition(), is(STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF)); + assertThat(diagnosis.affectedResources().get(0).getValues(), containsInAnyOrder(secondGenerationIndex, firstGenerationIndex)); + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ErrorCauseWrapper.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ErrorCauseWrapper.java index aeaf44bfd014c..308d6223c666e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ErrorCauseWrapper.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ErrorCauseWrapper.java @@ -23,6 +23,7 @@ class ErrorCauseWrapper extends ElasticsearchException { private static final List> wrappedErrors = List.of( PainlessError.class, + PainlessExplainError.class, OutOfMemoryError.class, StackOverflowError.class, LinkageError.class diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java index 87b199cd1b43f..48da785e801d3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java @@ -20,6 +20,7 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.not; @@ -30,29 +31,32 @@ public class DebugTests extends ScriptTestCase { public void testExplain() { // Debug.explain can explain an object Object dummy = new Object(); - PainlessExplainError e = expectScriptThrows( - PainlessExplainError.class, - () -> exec("Debug.explain(params.a)", singletonMap("a", dummy), true) - ); + var wrapper = expectScriptThrows(ErrorCauseWrapper.class, () -> exec("Debug.explain(params.a)", singletonMap("a", dummy), true)); + assertThat(wrapper.realCause.getClass(), equalTo(PainlessExplainError.class)); + var e = (PainlessExplainError) wrapper.realCause; assertSame(dummy, e.getObjectToExplain()); assertThat(e.getHeaders(painlessLookup), hasEntry("es.to_string", singletonList(dummy.toString()))); assertThat(e.getHeaders(painlessLookup), hasEntry("es.java_class", singletonList("java.lang.Object"))); assertThat(e.getHeaders(painlessLookup), hasEntry("es.painless_class", singletonList("java.lang.Object"))); // Null should be ok - e = expectScriptThrows(PainlessExplainError.class, () -> exec("Debug.explain(null)")); + wrapper = expectScriptThrows(ErrorCauseWrapper.class, () -> exec("Debug.explain(null)")); + assertThat(wrapper.realCause.getClass(), equalTo(PainlessExplainError.class)); + e = (PainlessExplainError) wrapper.realCause; assertNull(e.getObjectToExplain()); assertThat(e.getHeaders(painlessLookup), hasEntry("es.to_string", singletonList("null"))); assertThat(e.getHeaders(painlessLookup), not(hasKey("es.java_class"))); assertThat(e.getHeaders(painlessLookup), not(hasKey("es.painless_class"))); // You can't catch the explain exception - e = expectScriptThrows(PainlessExplainError.class, () -> exec(""" + wrapper = expectScriptThrows(ErrorCauseWrapper.class, () -> exec(""" try { Debug.explain(params.a) } catch (Exception e) { return 1 }""", singletonMap("a", dummy), true)); + assertThat(wrapper.realCause.getClass(), equalTo(PainlessExplainError.class)); + e = (PainlessExplainError) wrapper.realCause; assertSame(dummy, e.getObjectToExplain()); } diff --git a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java index ae1adf4160c2a..02776eb277020 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.common.lucene.search.function.CombineFunction; @@ -69,6 +68,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertScrollResponsesAndHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -1403,22 +1403,15 @@ public void testParentChildQueriesViaScrollApi() throws Exception { boolQuery().must(matchAllQuery()).filter(hasParentQuery("parent", matchAllQuery(), false)) }; for (QueryBuilder query : queries) { - SearchResponse scrollResponse = prepareSearch("test").setScroll(TimeValue.timeValueSeconds(30)) - .setSize(1) - .addStoredField("_id") - .setQuery(query) - .get(); - - assertNoFailures(scrollResponse); - assertThat(scrollResponse.getHits().getTotalHits().value, equalTo(10L)); - int scannedDocs = 0; - do { - assertThat(scrollResponse.getHits().getTotalHits().value, equalTo(10L)); - scannedDocs += scrollResponse.getHits().getHits().length; - scrollResponse = client().prepareSearchScroll(scrollResponse.getScrollId()).setScroll(TimeValue.timeValueSeconds(30)).get(); - } while (scrollResponse.getHits().getHits().length > 0); - clearScroll(scrollResponse.getScrollId()); - assertThat(scannedDocs, equalTo(10)); + assertScrollResponsesAndHitCount( + TimeValue.timeValueSeconds(60), + prepareSearch("test").setScroll(TimeValue.timeValueSeconds(30)).setSize(1).addStoredField("_id").setQuery(query), + 10, + (respNum, response) -> { + assertNoFailures(response); + assertThat(response.getHits().getTotalHits().value, equalTo(10L)); + } + ); } } diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java index f165f00c5cc2f..64dd20f1fdfc4 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java @@ -33,6 +33,8 @@ public class ClusterHealthRestCancellationIT extends HttpSmokeTestCase { @TestIssueLogging( issueUrl = "https://github.com/elastic/elasticsearch/issues/100062", value = "org.elasticsearch.test.TaskAssertions:TRACE" + + ",org.elasticsearch.cluster.service.MasterService:TRACE" + + ",org.elasticsearch.tasks.TaskManager:TRACE" ) public void testClusterHealthRestCancellation() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java index df8f3825a5ea6..a856ee36aadc2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java @@ -18,7 +18,8 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertScrollResponsesAndHitCount; @ESIntegTestCase.SuiteScopeTestCase public class AggregationsIntegrationIT extends ESIntegTestCase { @@ -38,32 +39,22 @@ public void setupSuiteScopeCluster() throws Exception { public void testScroll() { final int size = randomIntBetween(1, 4); - final String[] scroll = new String[1]; - final int[] total = new int[1]; - assertNoFailuresAndResponse( - prepareSearch("index").setSize(size).setScroll(TimeValue.timeValueMinutes(1)).addAggregation(terms("f").field("f")), - response -> { - Aggregations aggregations = response.getAggregations(); - assertNotNull(aggregations); - Terms terms = aggregations.get("f"); - assertEquals(Math.min(numDocs, 3L), terms.getBucketByKey("0").getDocCount()); - scroll[0] = response.getScrollId(); - total[0] = response.getHits().getHits().length; + assertScrollResponsesAndHitCount( + TimeValue.timeValueSeconds(60), + prepareSearch("index").setSize(size).addAggregation(terms("f").field("f")), + numDocs, + (respNum, response) -> { + assertNoFailures(response); + + if (respNum == 1) { // initial response. + Aggregations aggregations = response.getAggregations(); + assertNotNull(aggregations); + Terms terms = aggregations.get("f"); + assertEquals(Math.min(numDocs, 3L), terms.getBucketByKey("0").getDocCount()); + } else { + assertNull(response.getAggregations()); + } } ); - int currentTotal = 0; - while (total[0] - currentTotal > 0) { - currentTotal = total[0]; - assertNoFailuresAndResponse( - client().prepareSearchScroll(scroll[0]).setScroll(TimeValue.timeValueMinutes(1)), - scrollResponse -> { - assertNull(scrollResponse.getAggregations()); - total[0] += scrollResponse.getHits().getHits().length; - scroll[0] = scrollResponse.getScrollId(); - } - ); - } - clearScroll(scroll[0]); - assertEquals(numDocs, total[0]); } } diff --git a/server/src/main/java/org/elasticsearch/action/ActionListener.java b/server/src/main/java/org/elasticsearch/action/ActionListener.java index 5017f0af0007c..ae5835686425d 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListener.java @@ -145,15 +145,6 @@ public String toString() { }; } - /** - * @deprecated in favour of {@link #running(Runnable)} because this implementation doesn't "wrap" exceptions from {@link #onResponse} - * into {@link #onFailure}. - */ - @Deprecated(forRemoval = true) - static ActionListener wrap(Runnable runnable) { - return running(runnable); - } - /** * Creates a listener that executes the appropriate consumer when the response (or failure) is received. This listener is "wrapped" in * the sense that an exception from the {@code onResponse} consumer is passed into the {@code onFailure} consumer. diff --git a/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java index 1e67522f6a671..13972ea2bf64a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java @@ -49,12 +49,17 @@ Stream getSuccessfulResults() { public void consumeResult(SearchPhaseResult result, Runnable next) { assert results.contains(result.getShardIndex()) == false : "shardIndex: " + result.getShardIndex() + " is already set"; results.add(result.getShardIndex()); + progressListener.notifyQueryResult(result.getShardIndex(), result.queryResult()); + // We have an empty result, track that we saw it for this shard and continue; + if (result.queryResult().isNull()) { + next.run(); + return; + } // set the relation to the first non-equal relation relationAtomicReference.compareAndSet(TotalHits.Relation.EQUAL_TO, result.queryResult().getTotalHits().relation); totalHits.add(result.queryResult().getTotalHits().value); terminatedEarly.compareAndSet(false, (result.queryResult().terminatedEarly() != null && result.queryResult().terminatedEarly())); timedOut.compareAndSet(false, result.queryResult().searchTimedOut()); - progressListener.notifyQueryResult(result.getShardIndex(), result.queryResult()); next.run(); } @@ -80,7 +85,7 @@ public SearchPhaseController.ReducedQueryPhase reduce() throws Exception { 1, 0, 0, - false + results.isEmpty() ); if (progressListener != SearchProgressListener.NOOP) { progressListener.notifyFinalReduce( diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java index fb5c9f2fea7de..229c34ecc1a14 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java @@ -68,7 +68,7 @@ * this will report GREEN. * If we have had a master within the last 30 seconds, but that master has changed more than 3 times in the last 30 minutes (and that is * confirmed by checking with the last-known master), then this will report YELLOW. - * If we have not had a master within the last 30 seconds, then this will will report RED with one exception. That exception is when: + * If we have not had a master within the last 30 seconds, then this will report RED with one exception. That exception is when: * (1) no node is elected master, (2) this node is not master eligible, (3) some node is master eligible, (4) we ask a master-eligible node * to run this service, and (5) it comes back with a result that is not RED. * Since this service needs to be able to run when there is no master at all, it does not depend on the dedicated health node (which @@ -99,7 +99,7 @@ public class CoordinationDiagnosticsService implements ClusterStateListener { /* * This is a Map of tasks that are periodically reaching out to other master eligible nodes to get their ClusterFormationStates for - * diagnosis. The key is the DisoveryNode for the master eligible node being polled, and the value is a Cancellable. + * diagnosis. The key is the DiscoveryNode for the master eligible node being polled, and the value is a Cancellable. * The field is accessed (reads/writes) from multiple threads, but the reference itself is only ever changed on the cluster change * event thread. */ @@ -121,7 +121,7 @@ public class CoordinationDiagnosticsService implements ClusterStateListener { volatile AtomicReference remoteCoordinationDiagnosisTask = null; /* * This field holds the result of the task in the remoteCoordinationDiagnosisTask field above. The field is accessed - * (reads/writes) from multiple threads, but is only ever reassigned on a the initialization thread and the cluster change event thread. + * (reads/writes) from multiple threads, but is only ever reassigned on the initialization thread and the cluster change event thread. */ volatile AtomicReference remoteCoordinationDiagnosisResult = null; @@ -294,7 +294,7 @@ private static CoordinationDiagnosticsDetails getDetails( /** * Returns the health result when we have detected locally that the master has changed to null repeatedly (by default more than 3 times - * in the last 30 minutes). This method attemtps to use the master history from a remote node to confirm what we are seeing locally. + * in the last 30 minutes). This method attempts to use the master history from a remote node to confirm what we are seeing locally. * If the information from the remote node confirms that the master history has been unstable, a YELLOW status is returned. If the * information from the remote node shows that the master history has been stable, then we assume that the problem is with this node * and a GREEN status is returned (the problems with this node will be covered in a separate health indicator). If there had been @@ -1133,7 +1133,7 @@ private Scheduler.Cancellable sendTransportRequest ); responseConsumer.accept(responseTransformationFunction.apply(response, null)); }, e -> { - logger.warn("Exception in remote request to master" + masterEligibleNode, e); + logger.warn("Exception in remote request to master " + masterEligibleNode, e); responseConsumer.accept(responseTransformationFunction.apply(null, e)); })); @@ -1143,7 +1143,7 @@ public void run() { if (masterEligibleNode == null) { /* * This node's PeerFinder hasn't yet discovered the master-eligible nodes. By notifying the responseConsumer with a null - * value we effectively do nothing, and allow this request to be recheduled. + * value we effectively do nothing, and allow this request to be rescheduled. */ responseConsumer.accept(null); } else { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java index 2160f52cbec02..a9781aeb9e2db 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java @@ -558,7 +558,8 @@ private static class SingletonOrdinals extends BlockDocValuesReader { private BlockLoader.Block readSingleDoc(BlockFactory factory, int docId) throws IOException { if (ordinals.advanceExact(docId)) { BytesRef v = ordinals.lookupOrd(ordinals.ordValue()); - return factory.constantBytes(v); + // the returned BytesRef can be reused + return factory.constantBytes(BytesRef.deepCopyOf(v)); } else { return factory.constantNulls(); } diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 40d4e37045016..7bcafe7005047 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -149,6 +150,7 @@ public void terminatedEarly(boolean terminatedEarly) { this.terminatedEarly = terminatedEarly; } + @Nullable public Boolean terminatedEarly() { return this.terminatedEarly; } @@ -204,10 +206,12 @@ public void setRankShardResult(RankShardResult rankShardResult) { this.rankShardResult = rankShardResult; } + @Nullable public RankShardResult getRankShardResult() { return rankShardResult; } + @Nullable public DocValueFormat[] sortValueFormats() { return sortValueFormats; } @@ -252,6 +256,7 @@ public void aggregations(InternalAggregations aggregations) { hasAggs = aggregations != null; } + @Nullable public DelayableWriteable aggregations() { return aggregations; } @@ -455,6 +460,7 @@ public void writeToNoId(StreamOutput out) throws IOException { } } + @Nullable public TotalHits getTotalHits() { return totalHits; } diff --git a/server/src/test/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumerTests.java b/server/src/test/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumerTests.java new file mode 100644 index 0000000000000..33e6096bab763 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumerTests.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.search; + +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +public class CountOnlyQueryPhaseResultConsumerTests extends ESTestCase { + + public void testProgressListenerExceptionsAreCaught() throws Exception { + ThrowingSearchProgressListener searchProgressListener = new ThrowingSearchProgressListener(); + + List searchShards = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + searchShards.add(new SearchShard(null, new ShardId("index", "uuid", i))); + } + long timestamp = randomLongBetween(1000, Long.MAX_VALUE - 1000); + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + timestamp, + timestamp, + () -> timestamp + 1000 + ); + searchProgressListener.notifyListShards(searchShards, Collections.emptyList(), SearchResponse.Clusters.EMPTY, false, timeProvider); + + CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer(searchProgressListener, 10); + try { + AtomicInteger nextCounter = new AtomicInteger(0); + for (int i = 0; i < 10; i++) { + SearchShardTarget searchShardTarget = new SearchShardTarget("node", new ShardId("index", "uuid", i), null); + QuerySearchResult querySearchResult = new QuerySearchResult(); + TopDocs topDocs = new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]); + querySearchResult.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), new DocValueFormat[0]); + querySearchResult.setSearchShardTarget(searchShardTarget); + querySearchResult.setShardIndex(i); + queryPhaseResultConsumer.consumeResult(querySearchResult, nextCounter::incrementAndGet); + } + + assertEquals(10, searchProgressListener.onQueryResult.get()); + queryPhaseResultConsumer.reduce(); + assertEquals(1, searchProgressListener.onFinalReduce.get()); + assertEquals(10, nextCounter.get()); + } finally { + queryPhaseResultConsumer.decRef(); + } + } + + public void testNullShardResultHandling() throws Exception { + CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer(SearchProgressListener.NOOP, 10); + try { + AtomicInteger nextCounter = new AtomicInteger(0); + for (int i = 0; i < 10; i++) { + SearchShardTarget searchShardTarget = new SearchShardTarget("node", new ShardId("index", "uuid", i), null); + QuerySearchResult querySearchResult = QuerySearchResult.nullInstance(); + querySearchResult.setSearchShardTarget(searchShardTarget); + querySearchResult.setShardIndex(i); + queryPhaseResultConsumer.consumeResult(querySearchResult, nextCounter::incrementAndGet); + } + var reducePhase = queryPhaseResultConsumer.reduce(); + assertEquals(0, reducePhase.totalHits().value); + assertEquals(TotalHits.Relation.EQUAL_TO, reducePhase.totalHits().relation); + assertFalse(reducePhase.isEmptyResult()); + assertEquals(10, nextCounter.get()); + } finally { + queryPhaseResultConsumer.decRef(); + } + } + + public void testEmptyResults() throws Exception { + CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer(SearchProgressListener.NOOP, 10); + try { + var reducePhase = queryPhaseResultConsumer.reduce(); + assertEquals(0, reducePhase.totalHits().value); + assertEquals(TotalHits.Relation.EQUAL_TO, reducePhase.totalHits().relation); + assertTrue(reducePhase.isEmptyResult()); + } finally { + queryPhaseResultConsumer.decRef(); + } + } + + private static class ThrowingSearchProgressListener extends SearchProgressListener { + private final AtomicInteger onQueryResult = new AtomicInteger(0); + private final AtomicInteger onPartialReduce = new AtomicInteger(0); + private final AtomicInteger onFinalReduce = new AtomicInteger(0); + + @Override + protected void onListShards( + List shards, + List skippedShards, + SearchResponse.Clusters clusters, + boolean fetchPhase, + TransportSearchAction.SearchTimeProvider timeProvider + ) { + throw new UnsupportedOperationException(); + } + + @Override + protected void onQueryResult(int shardIndex, QuerySearchResult queryResult) { + onQueryResult.incrementAndGet(); + throw new UnsupportedOperationException(); + } + + @Override + protected void onPartialReduce(List shards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) { + onPartialReduce.incrementAndGet(); + throw new UnsupportedOperationException(); + } + + @Override + protected void onFinalReduce(List shards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) { + onFinalReduce.incrementAndGet(); + throw new UnsupportedOperationException(); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java index e61b89fcff42c..589bc76c55a3d 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java @@ -7,17 +7,22 @@ */ package org.elasticsearch.search; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.search.SearchRequestBuilder; public enum SearchResponseUtils { ; - public static long getTotalHitsValue(SearchRequestBuilder request) { + public static TotalHits getTotalHits(SearchRequestBuilder request) { var resp = request.get(); try { - return resp.getHits().getTotalHits().value; + return resp.getHits().getTotalHits(); } finally { resp.decRef(); } } + + public static long getTotalHitsValue(SearchRequestBuilder request) { + return getTotalHits(request).value; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/MetricRecorder.java b/test/framework/src/main/java/org/elasticsearch/telemetry/MetricRecorder.java index cf8f23f06155d..aa14d0067b68e 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/MetricRecorder.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/MetricRecorder.java @@ -39,7 +39,7 @@ private record RegisteredMetric( ) { void register(String name, String description, String unit, I instrument) { assert registered.containsKey(name) == false - : Strings.format("unexpected [{}]: [{}][{}], already registered[{}]", name, description, unit, registered.get(name)); + : Strings.format("unexpected [%s]: [%s][%s], already registered[%s]", name, description, unit, registered.get(name)); registered.put(name, new Registration(name, description, unit)); instruments.put(name, instrument); if (instrument instanceof Runnable callback) { @@ -48,7 +48,7 @@ void register(String name, String description, String unit, I instrument) { } void call(String name, Measurement call) { - assert registered.containsKey(name) : Strings.format("call for unregistered metric [{}]: [{}]", name, call); + assert registered.containsKey(name) : Strings.format("call for unregistered metric [%s]: [%s]", name, call); called.computeIfAbsent(Objects.requireNonNull(name), k -> new CopyOnWriteArrayList<>()).add(call); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index e0083d5570baa..779d846f4eac2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1881,7 +1881,7 @@ protected void addError(Exception e) { /** * Clears the given scroll Ids */ - public void clearScroll(String... scrollIds) { + public static void clearScroll(String... scrollIds) { ClearScrollResponse clearResponse = client().prepareClearScroll().setScrollIds(Arrays.asList(scrollIds)).get(); assertThat(clearResponse.isSucceeded(), equalTo(true)); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index bba7a6e19deea..0d7ab26faecf9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequestBuilder; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; @@ -51,6 +52,7 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -60,10 +62,13 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; import java.util.function.Consumer; import static org.apache.lucene.tests.util.LuceneTestCase.expectThrows; import static org.apache.lucene.tests.util.LuceneTestCase.expectThrowsAnyOf; +import static org.elasticsearch.test.ESIntegTestCase.clearScroll; +import static org.elasticsearch.test.ESIntegTestCase.client; import static org.elasticsearch.test.LambdaMatchers.transformedArrayItemsMatch; import static org.elasticsearch.test.LambdaMatchers.transformedItemsMatch; import static org.elasticsearch.test.LambdaMatchers.transformedMatch; @@ -73,6 +78,7 @@ import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.greaterThan; @@ -369,6 +375,48 @@ public static void assertRes } } + /** + * A helper to enable the testing of scroll requests with ref-counting. + * + * @param keepAlive The TTL for the scroll context. + * @param searchRequestBuilder The initial search request. + * @param expectedTotalHitCount The number of hits that are expected to be retrieved. + * @param responseConsumer (respNum, response) -> {your assertions here}. + * respNum starts at 1, which contains the resp from the initial request. + */ + public static void assertScrollResponsesAndHitCount( + TimeValue keepAlive, + SearchRequestBuilder searchRequestBuilder, + int expectedTotalHitCount, + BiConsumer responseConsumer + ) { + searchRequestBuilder.setScroll(keepAlive); + List responses = new ArrayList<>(); + var scrollResponse = searchRequestBuilder.get(); + responses.add(scrollResponse); + int retrievedDocsCount = 0; + try { + assertThat(scrollResponse.getHits().getTotalHits().value, equalTo((long) expectedTotalHitCount)); + retrievedDocsCount += scrollResponse.getHits().getHits().length; + responseConsumer.accept(responses.size(), scrollResponse); + while (scrollResponse.getHits().getHits().length > 0) { + scrollResponse = prepareScrollSearch(scrollResponse.getScrollId(), keepAlive).get(); + responses.add(scrollResponse); + assertThat(scrollResponse.getHits().getTotalHits().value, equalTo((long) expectedTotalHitCount)); + retrievedDocsCount += scrollResponse.getHits().getHits().length; + responseConsumer.accept(responses.size(), scrollResponse); + } + } finally { + clearScroll(scrollResponse.getScrollId()); + responses.forEach(SearchResponse::decRef); + } + assertThat(retrievedDocsCount, equalTo(expectedTotalHitCount)); + } + + public static SearchScrollRequestBuilder prepareScrollSearch(String scrollId, TimeValue timeout) { + return client().prepareSearchScroll(scrollId).setScroll(timeout); + } + public static void assertResponse(ActionFuture responseFuture, Consumer consumer) throws ExecutionException, InterruptedException { var res = responseFuture.get(); @@ -442,6 +490,10 @@ public static void assertFailures(SearchRequestBuilder searchRequestBuilder, Res } } + public static void assertFailures(SearchRequestBuilder searchRequestBuilder, RestStatus restStatus) { + assertFailures(searchRequestBuilder, restStatus, containsString("")); + } + public static void assertNoFailures(BaseBroadcastResponse response) { if (response.getFailedShards() != 0) { final AssertionError assertionError = new AssertionError("[" + response.getFailedShards() + "] shard failures"); @@ -791,9 +843,9 @@ public static void assertToXContentEquivalent(BytesReference expected, BytesRefe * Often latches are called as assertTrue(latch.await(1, TimeUnit.SECONDS)); * In case of a failure this will just throw an assertion error without any further message * - * @param latch The latch to wait for - * @param timeout The value of the timeout - * @param unit The unit of the timeout + * @param latch The latch to wait for + * @param timeout The value of the timeout + * @param unit The unit of the timeout * @throws InterruptedException An exception if the waiting is interrupted */ public static void awaitLatch(CountDownLatch latch, long timeout, TimeUnit unit) throws InterruptedException { diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/420_connector_sync_job_check_in.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/420_connector_sync_job_check_in.yml index 9ef37f4a9fe60..d08f7f6a51c91 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/420_connector_sync_job_check_in.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/420_connector_sync_job_check_in.yml @@ -2,6 +2,7 @@ setup: - skip: version: " - 8.11.99" reason: Introduced in 8.12.0 + features: is_after - do: connector.put: connector_id: test-connector @@ -20,13 +21,26 @@ setup: id: test-connector job_type: full trigger_method: on_demand + - set: { id: sync-job-id-to-check-in } + + - do: + connector_sync_job.get: + connector_sync_job_id: $sync-job-id-to-check-in + + - set: { last_seen: last_seen_before_check_in } + - do: connector_sync_job.check_in: connector_sync_job_id: $sync-job-id-to-check-in - match: { acknowledged: true } + - do: + connector_sync_job.get: + connector_sync_job_id: $sync-job-id-to-check-in + + - is_after: { last_seen: $last_seen_before_check_in } --- "Check in a Connector Sync Job - Connector Sync Job does not exist": diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/460_connector_sync_job_update_stats.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/460_connector_sync_job_update_stats.yml index 0e69866ce8b6c..0c7300bd2b436 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/460_connector_sync_job_update_stats.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/460_connector_sync_job_update_stats.yml @@ -2,6 +2,7 @@ setup: - skip: version: " - 8.11.99" reason: Introduced in 8.12.0 + - do: connector.put: connector_id: test-connector @@ -20,7 +21,9 @@ setup: id: test-connector job_type: full trigger_method: on_demand + - set: { id: id } + - do: connector_sync_job.update_stats: connector_sync_job_id: $id @@ -31,6 +34,13 @@ setup: - match: { acknowledged: true } + - do: + connector_sync_job.get: + connector_sync_job_id: $id + + - match: { deleted_document_count: 10 } + - match: { indexed_document_count: 20 } + - match: { indexed_document_volume: 1000 } --- "Update the ingestion stats for a connector sync job - negative deleted document count error": @@ -40,7 +50,9 @@ setup: id: test-connector job_type: full trigger_method: on_demand + - set: { id: id } + - do: connector_sync_job.update_stats: connector_sync_job_id: $id @@ -50,7 +62,6 @@ setup: indexed_document_volume: 1000 catch: bad_request - --- "Update the ingestion stats for a connector sync job - negative indexed document count error": - do: @@ -59,7 +70,9 @@ setup: id: test-connector job_type: full trigger_method: on_demand + - set: { id: id } + - do: connector_sync_job.update_stats: connector_sync_job_id: $id @@ -69,7 +82,6 @@ setup: indexed_document_volume: 1000 catch: bad_request - --- "Update the ingestion stats for a connector sync job - negative indexed document volume error": - do: @@ -78,7 +90,9 @@ setup: id: test-connector job_type: full trigger_method: on_demand + - set: { id: id } + - do: connector_sync_job.update_stats: connector_sync_job_id: $id @@ -96,7 +110,9 @@ setup: id: test-connector job_type: full trigger_method: on_demand + - set: { id: id } + - do: connector_sync_job.update_stats: connector_sync_job_id: $id @@ -115,7 +131,9 @@ setup: id: test-connector job_type: full trigger_method: on_demand + - set: { id: id } + - do: connector_sync_job.update_stats: connector_sync_job_id: $id @@ -127,6 +145,14 @@ setup: - match: { acknowledged: true } + - do: + connector_sync_job.get: + connector_sync_job_id: $id + + - match: { deleted_document_count: 10 } + - match: { indexed_document_count: 20 } + - match: { indexed_document_volume: 1000 } + - match: { total_document_count: 20 } --- "Update the ingestion stats for a connector sync job - with optional last_seen": @@ -137,6 +163,7 @@ setup: job_type: full trigger_method: on_demand - set: { id: id } + - do: connector_sync_job.update_stats: connector_sync_job_id: $id @@ -148,6 +175,15 @@ setup: - match: { acknowledged: true } + - do: + connector_sync_job.get: + connector_sync_job_id: $id + + - match: { deleted_document_count: 10 } + - match: { indexed_document_count: 20 } + - match: { indexed_document_volume: 1000 } + - match: { last_seen: 2023-12-04T08:45:50.567149Z } + --- "Update the ingestion stats for a Connector Sync Job - Connector Sync Job does not exist": - do: diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java index 424f88413af8f..03815dcdaea78 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java @@ -78,6 +78,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -111,6 +112,7 @@ public class ValuesSourceReaderOperatorTests extends OperatorTestCase { private Directory directory = newDirectory(); private IndexReader reader; + private static final Map keyToTags = new HashMap<>(); @After public void closeIndex() throws IOException { @@ -144,12 +146,14 @@ static Operator.OperatorFactory factory(IndexReader reader, String name, BlockLo @Override protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { - // The test wants more than one segment. We shoot for 10. - int commitEvery = Math.max(1, (int) Math.ceil((double) size / 10)); - return simpleInput(driverContext(), size, commitEvery); + return simpleInput(driverContext(), size, commitEvery(size), randomPageSize()); } - private SourceOperator simpleInput(DriverContext context, int size, int commitEvery) { + private int commitEvery(int numDocs) { + return Math.max(1, (int) Math.ceil((double) numDocs / 10)); + } + + private SourceOperator simpleInput(DriverContext context, int size, int commitEvery, int pageSize) { try { initIndex(size, commitEvery); } catch (IOException e) { @@ -160,13 +164,14 @@ private SourceOperator simpleInput(DriverContext context, int size, int commitEv ctx -> new MatchAllDocsQuery(), DataPartitioning.SHARD, randomIntBetween(1, 10), - size, + pageSize, LuceneOperator.NO_LIMIT ); return luceneFactory.get(context); } private void initIndex(int size, int commitEvery) throws IOException { + keyToTags.clear(); try ( IndexWriter writer = new IndexWriter( directory, @@ -181,9 +186,8 @@ private void initIndex(int size, int commitEvery) throws IOException { doc.add(new SortedNumericDocValuesField("short", (short) d)); doc.add(new SortedNumericDocValuesField("byte", (byte) d)); doc.add(new SortedNumericDocValuesField("long", d)); - doc.add( - new KeywordFieldMapper.KeywordField("kwd", new BytesRef(Integer.toString(d)), KeywordFieldMapper.Defaults.FIELD_TYPE) - ); + String tag = keyToTags.computeIfAbsent(d, k -> "tag-" + randomIntBetween(1, 5)); + doc.add(new KeywordFieldMapper.KeywordField("kwd", new BytesRef(tag), KeywordFieldMapper.Defaults.FIELD_TYPE)); doc.add(new StoredField("stored_kwd", new BytesRef(Integer.toString(d)))); doc.add(new StoredField("stored_text", Integer.toString(d))); doc.add(new SortedNumericDocValuesField("bool", d % 2 == 0 ? 1 : 0)); @@ -295,6 +299,37 @@ public void testLoadAllInOnePage() { ); } + public void testManySingleDocPages() { + DriverContext driverContext = driverContext(); + int numDocs = between(10, 100); + List input = CannedSourceOperator.collectPages(simpleInput(driverContext, numDocs, between(1, numDocs), 1)); + Randomness.shuffle(input); + List operators = new ArrayList<>(); + Checks checks = new Checks(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING); + FieldCase testCase = new FieldCase( + new KeywordFieldMapper.KeywordFieldType("kwd"), + checks::tags, + StatusChecks::keywordsFromDocValues + ); + operators.add( + new ValuesSourceReaderOperator.Factory( + List.of(testCase.info, fieldInfo(docValuesNumberField("key", NumberFieldMapper.NumberType.INTEGER))), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), + 0 + ).get(driverContext) + ); + List results = drive(operators, input.iterator(), driverContext); + assertThat(results, hasSize(input.size())); + for (Page page : results) { + assertThat(page.getBlockCount(), equalTo(3)); + IntVector keys = page.getBlock(2).asVector(); + for (int p = 0; p < page.getPositionCount(); p++) { + int key = keys.getInt(p); + testCase.checkResults.check(page.getBlock(1), p, key); + } + } + } + public void testEmpty() { DriverContext driverContext = driverContext(); loadSimpleAndAssert( @@ -440,7 +475,8 @@ public void testLoadAllStatusAllInOnePage() { private void testLoadAllStatus(boolean allInOnePage) { DriverContext driverContext = driverContext(); - List input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(100, 5000))); + int numDocs = between(100, 5000); + List input = CannedSourceOperator.collectPages(simpleInput(driverContext, numDocs, commitEvery(numDocs), numDocs)); assertThat(reader.leaves(), hasSize(10)); assertThat(input, hasSize(10)); List cases = infoAndChecksForEachType(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING); @@ -585,7 +621,7 @@ private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrd r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("bool"), checks::bools, StatusChecks::boolFromDocValues)); r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("mv_bool"), checks::mvBools, StatusChecks::mvBoolFromDocValues)); r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("missing_bool"), checks::constantNulls, StatusChecks::constantNulls)); - r.add(new FieldCase(new KeywordFieldMapper.KeywordFieldType("kwd"), checks::strings, StatusChecks::keywordsFromDocValues)); + r.add(new FieldCase(new KeywordFieldMapper.KeywordFieldType("kwd"), checks::tags, StatusChecks::keywordsFromDocValues)); r.add( new FieldCase( new KeywordFieldMapper.KeywordFieldType("mv_kwd"), @@ -611,7 +647,7 @@ private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrd r.add( new FieldCase( textFieldWithDelegate("text_with_delegate", new KeywordFieldMapper.KeywordFieldType("kwd")), - checks::strings, + checks::tags, StatusChecks::textWithDelegate ) ); @@ -680,6 +716,11 @@ void strings(Block block, int position, int key) { assertThat(keywords.getBytesRef(position, new BytesRef()).utf8ToString(), equalTo(Integer.toString(key))); } + void tags(Block block, int position, int key) { + BytesRefVector keywords = ((BytesRefBlock) block).asVector(); + assertThat(keywords.getBytesRef(position, new BytesRef()).utf8ToString(), equalTo(keyToTags.get(key))); + } + void bools(Block block, int position, int key) { BooleanVector bools = ((BooleanBlock) block).asVector(); assertThat(bools.getBoolean(position), equalTo(key % 2 == 0)); @@ -1285,7 +1326,7 @@ public void testSequentialStoredFieldsBigEnough() { private void testSequentialStoredFields(boolean sequential, int docCount) { DriverContext driverContext = driverContext(); - List source = CannedSourceOperator.collectPages(simpleInput(driverContext, docCount, docCount)); + List source = CannedSourceOperator.collectPages(simpleInput(driverContext, docCount, docCount, docCount)); assertThat(source, hasSize(1)); // We want one page for simpler assertions, and we want them all in one segment assertTrue(source.get(0).getBlock(0).asVector().singleSegmentNonDecreasing()); Operator op = new ValuesSourceReaderOperator.Factory( diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index ffad468790998..083bd1eaf8417 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -6,7 +6,7 @@ v:long ; # TODO: switch this test to ``&format=csv&delimiter=|` output -showFunctions#[skip:-8.11.99] +showFunctions#[skip:-8.12.99] show functions; name:keyword | synopsis:keyword | argNames:keyword | argTypes:keyword | argDescriptions:keyword |returnType:keyword | description:keyword | optionalArgs:boolean | variadic:boolean @@ -22,8 +22,8 @@ ceil |"? ceil(n:integer|long|double|unsigned_long)" |n cidr_match |? cidr_match(arg1:?, arg2...:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | true coalesce |? coalesce(arg1:?, arg2...:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | true concat |? concat(arg1:?, arg2...:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | true -cos |"double cos(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" | "" |double | "" | false | false -cosh |"double cosh(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" | "" |double | "" | false | false +cos |"double cos(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" | "An angle, in radians" |double | "Returns the trigonometric cosine of an angle" | false | false +cosh |"double cosh(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" | "The number who's hyperbolic cosine is to be returned" |double | "Returns the hyperbolic cosine of a number" | false | false count |? count(arg1:?) |arg1 |? | "" |? | "" | false | false count_distinct |? count_distinct(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false date_extract |? date_extract(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false @@ -63,14 +63,14 @@ right |"? right(string:keyword, length:integer)" |[string round |? round(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false rtrim |"keyword|text rtrim(str:keyword|text)" |str |"keyword|text" | "" |"keyword|text" |Removes trailing whitespaces from a string.| false | false sin |"double sin(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" |An angle, in radians |double |Returns the trigonometric sine of an angle | false | false -sinh |"double sinh(n:integer|long|double|unsigned_long)"|n |"integer|long|double|unsigned_long" | "" |double | "" | false | false +sinh |"double sinh(n:integer|long|double|unsigned_long)"|n |"integer|long|double|unsigned_long" | "The number to return the hyperbolic sine of" |double | "Returns the hyperbolic sine of a number" | false | false split |? split(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false sqrt |"? sqrt(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" | "" |? | "" | false | false starts_with |? starts_with(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false substring |? substring(arg1:?, arg2:?, arg3:?) |[arg1, arg2, arg3] |[?, ?, ?] |["", "", ""] |? | "" | [false, false, false]| false sum |? sum(arg1:?) |arg1 |? | "" |? | "" | false | false -tan |"double tan(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" | "" |double | "" | false | false -tanh |"double tanh(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" | "" |double | "" | false | false +tan |"double tan(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" | "An angle, in radians" |double | "Returns the trigonometric tangent of an angle" | false | false +tanh |"double tanh(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" | "The number to return the hyperbolic tangent of" |double | "Returns the hyperbolic tangent of a number" | false | false tau |? tau() | null | null | null |? | "" | null | false to_bool |"boolean to_bool(v:boolean|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|keyword|text|double|long|unsigned_long|integer" | |boolean | |false |false to_boolean |"boolean to_boolean(v:boolean|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|keyword|text|double|long|unsigned_long|integer" | |boolean | |false |false diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cos.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cos.java index c5edc0e0fd6c7..5f8661bb0ae7d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cos.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cos.java @@ -21,8 +21,11 @@ * Cosine trigonometric function. */ public class Cos extends AbstractTrigonometricFunction { - @FunctionInfo(returnType = "double") - public Cos(Source source, @Param(name = "n", type = { "integer", "long", "double", "unsigned_long" }) Expression n) { + @FunctionInfo(returnType = "double", description = "Returns the trigonometric cosine of an angle") + public Cos( + Source source, + @Param(name = "n", type = { "integer", "long", "double", "unsigned_long" }, description = "An angle, in radians") Expression n + ) { super(source, n); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cosh.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cosh.java index cc315c3e9569c..6cc49cec0c32d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cosh.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cosh.java @@ -21,8 +21,15 @@ * Cosine hyperbolic function. */ public class Cosh extends AbstractTrigonometricFunction { - @FunctionInfo(returnType = "double") - public Cosh(Source source, @Param(name = "n", type = { "integer", "long", "double", "unsigned_long" }) Expression n) { + @FunctionInfo(returnType = "double", description = "Returns the hyperbolic cosine of a number") + public Cosh( + Source source, + @Param( + name = "n", + type = { "integer", "long", "double", "unsigned_long" }, + description = "The number who's hyperbolic cosine is to be returned" + ) Expression n + ) { super(source, n); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sinh.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sinh.java index 07228dd1743dd..4b2adef5a2d6f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sinh.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sinh.java @@ -21,8 +21,15 @@ * Sine hyperbolic function. */ public class Sinh extends AbstractTrigonometricFunction { - @FunctionInfo(returnType = "double") - public Sinh(Source source, @Param(name = "n", type = { "integer", "long", "double", "unsigned_long" }) Expression n) { + @FunctionInfo(returnType = "double", description = "Returns the hyperbolic sine of a number") + public Sinh( + Source source, + @Param( + name = "n", + type = { "integer", "long", "double", "unsigned_long" }, + description = "The number to return the hyperbolic sine of" + ) Expression n + ) { super(source, n); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tan.java index e6c7bd8c6e530..5596c9098c034 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tan.java @@ -21,8 +21,11 @@ * Tangent trigonometric function. */ public class Tan extends AbstractTrigonometricFunction { - @FunctionInfo(returnType = "double") - public Tan(Source source, @Param(name = "n", type = { "integer", "long", "double", "unsigned_long" }) Expression n) { + @FunctionInfo(returnType = "double", description = "Returns the trigonometric tangent of an angle") + public Tan( + Source source, + @Param(name = "n", type = { "integer", "long", "double", "unsigned_long" }, description = "An angle, in radians") Expression n + ) { super(source, n); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tanh.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tanh.java index a9720d6d65c11..ce59cec50bcca 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tanh.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tanh.java @@ -21,8 +21,15 @@ * Tangent hyperbolic function. */ public class Tanh extends AbstractTrigonometricFunction { - @FunctionInfo(returnType = "double") - public Tanh(Source source, @Param(name = "n", type = { "integer", "long", "double", "unsigned_long" }) Expression n) { + @FunctionInfo(returnType = "double", description = "Returns the hyperbolic tangent of a number") + public Tanh( + Source source, + @Param( + name = "n", + type = { "integer", "long", "double", "unsigned_long" }, + description = "The number to return the hyperbolic tangent of" + ) Expression n + ) { super(source, n); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 3bac4f1c4b5c0..d6f3b61cb19dd 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -36,6 +36,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.optimizer.FoldNull; +import org.elasticsearch.xpack.esql.parser.ExpressionBuilder; import org.elasticsearch.xpack.esql.planner.Layout; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; @@ -48,6 +49,7 @@ import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.type.EsField; +import org.elasticsearch.xpack.ql.util.NumericUtils; import org.elasticsearch.xpack.ql.util.StringUtils; import org.elasticsearch.xpack.versionfield.Version; import org.hamcrest.Matcher; @@ -91,6 +93,7 @@ import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; @@ -158,8 +161,9 @@ protected static Expression deepCopyOfField(String name, DataType type) { /** * Build the expression being tested, for the given source and list of arguments. Test classes need to implement this * to have something to test. + * * @param source the source - * @param args arg list from the test case, should match the length expected + * @param args arg list from the test case, should match the length expected * @return an expression for evaluating the function being tested on the given arguments */ protected abstract Expression build(Source source, List args); @@ -239,7 +243,7 @@ private void testEvaluate(boolean readFloating) { Object result; try (ExpressionEvaluator evaluator = evaluator(expression).get(driverContext())) { try (Block block = evaluator.eval(row(testCase.getDataValues()))) { - result = toJavaObject(block, 0); + result = toJavaObjectUnsignedLongAware(block, 0); } } assertThat(result, not(equalTo(Double.NaN))); @@ -253,11 +257,21 @@ private void testEvaluate(boolean readFloating) { } } + private Object toJavaObjectUnsignedLongAware(Block block, int position) { + Object result; + result = toJavaObject(block, position); + if (result != null && testCase.expectedType == DataTypes.UNSIGNED_LONG) { + assertThat(result, instanceOf(Long.class)); + result = NumericUtils.unsignedLongAsBigInteger((Long) result); + } + return result; + } + /** * Evaluates a {@link Block} of values, all copied from the input pattern, read directly from the page. *

- * Note that this'll sometimes be a {@link Vector} of values if the - * input pattern contained only a single value. + * Note that this'll sometimes be a {@link Vector} of values if the + * input pattern contained only a single value. *

*/ public final void testEvaluateBlockWithoutNulls() { @@ -267,8 +281,8 @@ public final void testEvaluateBlockWithoutNulls() { /** * Evaluates a {@link Block} of values, all copied from the input pattern, read from an intermediate operator. *

- * Note that this'll sometimes be a {@link Vector} of values if the - * input pattern contained only a single value. + * Note that this'll sometimes be a {@link Vector} of values if the + * input pattern contained only a single value. *

*/ public final void testEvaluateBlockWithoutNullsFloating() { @@ -296,8 +310,8 @@ public final void testEvaluateBlockWithNullsFloating() { * read directly from the {@link Page}, using the * {@link CrankyCircuitBreakerService} which fails randomly. *

- * Note that this'll sometimes be a {@link Vector} of values if the - * input pattern contained only a single value. + * Note that this'll sometimes be a {@link Vector} of values if the + * input pattern contained only a single value. *

*/ public final void testCrankyEvaluateBlockWithoutNulls() { @@ -314,8 +328,8 @@ public final void testCrankyEvaluateBlockWithoutNulls() { * read from an intermediate operator, using the * {@link CrankyCircuitBreakerService} which fails randomly. *

- * Note that this'll sometimes be a {@link Vector} of values if the - * input pattern contained only a single value. + * Note that this'll sometimes be a {@link Vector} of values if the + * input pattern contained only a single value. *

*/ public final void testCrankyEvaluateBlockWithoutNullsFloating() { @@ -396,7 +410,7 @@ private void testEvaluateBlock(BlockFactory inputBlockFactory, DriverContext con assertThat(toJavaObject(block, p), allNullsMatcher()); continue; } - assertThat(toJavaObject(block, p), testCase.getMatcher()); + assertThat(toJavaObjectUnsignedLongAware(block, p), testCase.getMatcher()); } assertThat( "evaluates to tracked block", @@ -470,7 +484,7 @@ public final void testEvaluateInManyThreads() throws ExecutionException, Interru try (EvalOperator.ExpressionEvaluator eval = evalSupplier.get(driverContext())) { for (int c = 0; c < count; c++) { try (Block block = eval.eval(page)) { - assertThat(toJavaObject(block, 0), testCase.getMatcher()); + assertThat(toJavaObjectUnsignedLongAware(block, 0), testCase.getMatcher()); } } } @@ -511,7 +525,12 @@ public final void testFold() { expression = new FoldNull().rule(expression); assertThat(expression.dataType(), equalTo(testCase.expectedType)); assertTrue(expression.foldable()); - assertThat(expression.fold(), testCase.getMatcher()); + Object result = expression.fold(); + // Decode unsigned longs into BigIntegers + if (testCase.expectedType == DataTypes.UNSIGNED_LONG && result != null) { + result = NumericUtils.unsignedLongAsBigInteger((Long) result); + } + assertThat(result, testCase.getMatcher()); if (testCase.getExpectedWarnings() != null) { assertWarnings(testCase.getExpectedWarnings()); } @@ -544,7 +563,7 @@ public static void testFunctionInfo() { LogManager.getLogger(getTestClass()).info("Skipping function info checks because we're running a portion of the tests"); return; } - FunctionDefinition definition = definition(); + FunctionDefinition definition = definition(functionName()); if (definition == null) { LogManager.getLogger(getTestClass()).info("Skipping function info checks because the function isn't registered"); return; @@ -588,14 +607,15 @@ public static void testFunctionInfo() { /** * Adds cases with {@code null} and asserts that the result is {@code null}. *

- * Note: This won't add more than a single null to any existing test case, - * just to keep the number of test cases from exploding totally. + * Note: This won't add more than a single null to any existing test case, + * just to keep the number of test cases from exploding totally. *

- * @param entirelyNullPreservesType should a test case that only contains parameters - * with the {@code null} type keep it's expected type? - * This is mostly going to be {@code true} - * except for functions that base their type entirely - * on input types like {@link Greatest} or {@link Coalesce}. + * + * @param entirelyNullPreservesType should a test case that only contains parameters + * with the {@code null} type keep it's expected type? + * This is mostly going to be {@code true} + * except for functions that base their type entirely + * on input types like {@link Greatest} or {@link Coalesce}. */ protected static List anyNullIsNull(boolean entirelyNullPreservesType, List testCaseSuppliers) { typesRequired(testCaseSuppliers); @@ -691,8 +711,7 @@ protected static List errorsForCasesWithoutExamples(List, cases will function the same as , * cases. - */ - .filter(types -> types.stream().filter(t -> t == DataTypes.NULL).count() <= 1) + */.filter(types -> types.stream().filter(t -> t == DataTypes.NULL).count() <= 1) .map(types -> typeErrorSupplier(validPerPosition.size() != 1, validPerPosition, types)) .forEach(suppliers::add); return suppliers; @@ -911,30 +930,44 @@ public static void renderSignature() throws IOException { LogManager.getLogger(getTestClass()).info("Skipping rendering signature because we're running a portion of the tests"); return; } - FunctionDefinition definition = definition(); - if (definition == null) { + String rendered = buildSignatureSvg(functionName()); + if (rendered == null) { LogManager.getLogger(getTestClass()).info("Skipping rendering signature because the function isn't registered"); - return; + } else { + LogManager.getLogger(getTestClass()).info("Writing function signature"); + writeToTempDir("signature", rendered, "svg"); } + } - String rendered = RailRoadDiagram.functionSignature(definition); - LogManager.getLogger(getTestClass()).info("Writing function signature"); - writeToTempDir("signature", rendered, "svg"); + private static String buildSignatureSvg(String name) throws IOException { + String binaryOperator = binaryOperator(name); + if (binaryOperator != null) { + return RailRoadDiagram.binaryOperator(binaryOperator); + } + String unaryOperator = unaryOperator(name); + if (unaryOperator != null) { + return RailRoadDiagram.unaryOperator(unaryOperator); + } + FunctionDefinition definition = definition(name); + if (definition != null) { + return RailRoadDiagram.functionSignature(definition); + } + return null; } /** * Unique signatures encountered by this test. *

- * We clear this at the beginning of the test class with - * {@link #clearSignatures} out of paranoia. It is - * shared by many tests, after all. + * We clear this at the beginning of the test class with + * {@link #clearSignatures} out of paranoia. It is + * shared by many tests, after all. *

*

- * After each test method we add the signature it operated on via - * {@link #trackSignature}. Once the test class is done we render - * all the unique signatures to a temp file with {@link #renderTypesTable}. - * We use a temp file because that's all we're allowed to write to. - * Gradle will move the files into the docs after this is done. + * After each test method we add the signature it operated on via + * {@link #trackSignature}. Once the test class is done we render + * all the unique signatures to a temp file with {@link #renderTypesTable}. + * We use a temp file because that's all we're allowed to write to. + * Gradle will move the files into the docs after this is done. *

*/ private static final Map, DataType> signatures = new HashMap<>(); @@ -960,7 +993,8 @@ public static void renderTypesTable() throws IOException { if (System.getProperty("generateDocs") == null) { return; } - FunctionDefinition definition = definition(); + String name = functionName(); // TODO types table for operators + FunctionDefinition definition = definition(name); if (definition == null) { LogManager.getLogger(getTestClass()).info("Skipping rendering types because the function isn't registered"); return; @@ -995,8 +1029,11 @@ public static void renderTypesTable() throws IOException { writeToTempDir("types", rendered, "asciidoc"); } - private static FunctionDefinition definition() { - String name = functionName(); + private static String functionName() { + return StringUtils.camelCaseToUnderscore(getTestClass().getSimpleName().replace("Tests", "")).toLowerCase(Locale.ROOT); + } + + private static FunctionDefinition definition(String name) { EsqlFunctionRegistry registry = new EsqlFunctionRegistry(); if (registry.functionExists(name)) { return registry.resolveFunction(name); @@ -1004,15 +1041,44 @@ private static FunctionDefinition definition() { return null; } - private static String functionName() { - return StringUtils.camelCaseToUnderscore(getTestClass().getSimpleName().replace("Tests", "")).toLowerCase(Locale.ROOT); + /** + * If this test is a for a binary operator return its symbol, otherwise return {@code null}. + * This is functionally the reverse of the combination of + * {@link ExpressionBuilder#visitArithmeticBinary} and {@link ExpressionBuilder#visitComparison}. + */ + private static String binaryOperator(String name) { + return switch (name) { + case "add" -> "+"; + case "div" -> "/"; + case "equals" -> "=="; + case "greater_than" -> ">"; + case "greater_than_or_equal_to" -> ">="; + case "less_than" -> "<"; + case "less_than_or_equal_to" -> "<="; + case "mod" -> "%"; + case "mul" -> "*"; + case "not_equals" -> "!="; + case "sub" -> "-"; + default -> null; + }; + } + + /** + * If this tests is for a unary operator return its symbol, otherwise return {@code null}. + * This is functionally the reverse of {@link ExpressionBuilder#visitArithmeticUnary}. + */ + private static String unaryOperator(String name) { + return switch (name) { + case "neg" -> "-"; + default -> null; + }; } /** * Write some text to a tempdir so we can copy it to the docs later. *

- * We need to write to a tempdir instead of the docs because the tests - * don't have write permission to the docs. + * We need to write to a tempdir instead of the docs because the tests + * don't have write permission to the docs. *

*/ private static void writeToTempDir(String subdir, String str, String extension) throws IOException { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java index 65977ea6a174f..d6501568a85ec 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java @@ -42,6 +42,10 @@ public class RailRoadDiagram { */ private static final LazyInitializable FONT = new LazyInitializable<>(() -> loadFont().deriveFont(20.0F)); + /** + * Generate a railroad diagram for a function. The output would look like + * {@code FOO(a, b, c)}. + */ static String functionSignature(FunctionDefinition definition) throws IOException { List expressions = new ArrayList<>(); expressions.add(new SpecialSequence(definition.name().toUpperCase(Locale.ROOT))); @@ -61,10 +65,34 @@ static String functionSignature(FunctionDefinition definition) throws IOExceptio } } expressions.add(new Syntax(")")); - net.nextencia.rrdiagram.grammar.model.Expression rr = new Sequence( - expressions.toArray(net.nextencia.rrdiagram.grammar.model.Expression[]::new) - ); - RRDiagram rrDiagram = new GrammarToRRDiagram().convert(new Rule("test", rr)); + return toSvg(new Sequence(expressions.toArray(Expression[]::new))); + } + + /** + * Generate a railroad diagram for binary operator. The output would look like + * {@code lhs + rhs}. + */ + static String binaryOperator(String operator) throws IOException { + List expressions = new ArrayList<>(); + expressions.add(new Literal("lhs")); + expressions.add(new Syntax(operator)); + expressions.add(new Literal("rhs")); + return toSvg(new Sequence(expressions.toArray(Expression[]::new))); + } + + /** + * Generate a railroad diagram for unary operator. The output would look like + * {@code -v}. + */ + static String unaryOperator(String operator) throws IOException { + List expressions = new ArrayList<>(); + expressions.add(new Syntax(operator)); + expressions.add(new Literal("v")); + return toSvg(new Sequence(expressions.toArray(Expression[]::new))); + } + + private static String toSvg(Expression exp) throws IOException { + RRDiagram rrDiagram = new GrammarToRRDiagram().convert(new Rule("", exp)); RRDiagramToSVG toSvg = new RRDiagramToSVG(); toSvg.setSpecialSequenceShape(RRDiagramToSVG.BoxShape.RECTANGLE); @@ -74,20 +102,29 @@ static String functionSignature(FunctionDefinition definition) throws IOExceptio toSvg.setLiteralFont(FONT.getOrCompute()); toSvg.setRuleFont(FONT.getOrCompute()); - /* - * "Tighten" the styles in the SVG so they beat the styles sitting in the - * main page. We need this because we're embedding the SVG into the page. - * We need to embed the SVG into the page so it can get fonts loaded in the - * primary stylesheet. We need to load a font so they images are consistent - * on all clients. - */ - return toSvg.convert(rrDiagram).replace(".c", "#guide .c").replace(".k", "#guide .k").replace(".s", "#guide .s"); + return tightenStyles(toSvg.convert(rrDiagram)); + } + + /** + * "Tighten" the styles in the SVG so they beat the styles sitting in the + * main page. We need this because we're embedding the SVG into the page. + * We need to embed the SVG into the page so it can get fonts loaded in the + * primary stylesheet. We need to load a font so they images are consistent + * on all clients. + */ + private static String tightenStyles(String svg) { + for (String c : new String[] { "c", "k", "s", "j", "l" }) { + svg = svg.replace("." + c, "#guide ." + c); + } + return svg; } /** * Like a literal but with light grey text for a more muted appearance for syntax. */ private static class Syntax extends Literal { + private static final String LITERAL_CLASS = "l"; + private static final String SYNTAX_CLASS = "lsyn"; private static final String LITERAL_TEXT_CLASS = "j"; private static final String SYNTAX_TEXT_CLASS = "syn"; private static final String SYNTAX_GREY = "8D8D8D"; @@ -101,13 +138,20 @@ private Syntax(String text) { @Override protected RRElement toRRElement(GrammarToRRDiagram grammarToRRDiagram) { - // This performs a monumentally rude hack to replace the text color of this element. + /* + * This performs a monumentally rude hack to replace the text color of this element. + * It renders a "literal" element but intercepts the layer that defines it's css class + * and replaces it with our own. + */ return new RRText(RRText.Type.LITERAL, text, null) { @Override protected void toSVG(RRDiagramToSVG rrDiagramToSVG, int xOffset, int yOffset, RRDiagram.SvgContent svgContent) { super.toSVG(rrDiagramToSVG, xOffset, yOffset, new RRDiagram.SvgContent() { @Override public String getDefinedCSSClass(String style) { + if (style.equals(LITERAL_CLASS)) { + return svgContent.getDefinedCSSClass(SYNTAX_CLASS); + } if (style.equals(LITERAL_TEXT_CLASS)) { return svgContent.getDefinedCSSClass(SYNTAX_TEXT_CLASS); } @@ -116,11 +160,18 @@ public String getDefinedCSSClass(String style) { @Override public String setCSSClass(String cssClass, String definition) { - if (false == cssClass.equals(LITERAL_TEXT_CLASS)) { - return svgContent.setCSSClass(cssClass, definition); + if (cssClass.equals(LITERAL_CLASS)) { + svgContent.setCSSClass(cssClass, definition); + return svgContent.setCSSClass(SYNTAX_CLASS, definition); + } + if (cssClass.equals(LITERAL_TEXT_CLASS)) { + svgContent.setCSSClass(cssClass, definition); + return svgContent.setCSSClass( + SYNTAX_TEXT_CLASS, + definition.replace("fill:#000000", "fill:#" + SYNTAX_GREY) + ); } - svgContent.setCSSClass(cssClass, definition); - return svgContent.setCSSClass(SYNTAX_TEXT_CLASS, definition.replace("fill:#000000", "fill:#" + SYNTAX_GREY)); + return svgContent.setCSSClass(cssClass, definition); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index faf10d499127a..9eab8cbef5fed 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -10,6 +10,8 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; @@ -48,6 +50,8 @@ public record TestCaseSupplier(String name, List types, Supplier supplier) implements Supplier { + + private static Logger logger = LogManager.getLogger(TestCaseSupplier.class); /** * Build a test case without types. * @@ -530,6 +534,8 @@ public static void unary( supplier.type(), "value" ); + logger.info("Value is " + value + " of type " + value.getClass()); + logger.info("expectedValue is " + expectedValue.apply(value)); TestCase testCase = new TestCase( List.of(typed), expectedEvaluatorToString, @@ -949,6 +955,9 @@ public TypedData(Object data, String name) { @Override public String toString() { + if (type == DataTypes.UNSIGNED_LONG && data instanceof Long longData) { + return type.toString() + "(" + NumericUtils.unsignedLongAsBigInteger(longData).toString() + ")"; + } return type.toString() + "(" + (data == null ? "null" : data.toString()) + ")"; } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java index 080424602703d..8d5ee002a8f78 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; -import org.elasticsearch.xpack.ql.util.NumericUtils; import java.math.BigDecimal; import java.math.BigInteger; @@ -26,10 +25,7 @@ import java.util.function.Supplier; import static org.elasticsearch.xpack.ql.type.DataTypeConverter.safeToUnsignedLong; -import static org.elasticsearch.xpack.ql.util.NumericUtils.ONE_AS_UNSIGNED_LONG; import static org.elasticsearch.xpack.ql.util.NumericUtils.UNSIGNED_LONG_MAX_AS_DOUBLE; -import static org.elasticsearch.xpack.ql.util.NumericUtils.ZERO_AS_UNSIGNED_LONG; -import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; public class ToUnsignedLongTests extends AbstractFunctionTestCase { public ToUnsignedLongTests(@Name("TestCase") Supplier testCaseSupplier) { @@ -47,7 +43,7 @@ public static Iterable parameters() { suppliers, read, DataTypes.UNSIGNED_LONG, - NumericUtils::asLongUnsigned, + n -> n, BigInteger.ZERO, UNSIGNED_LONG_MAX, List.of() @@ -57,7 +53,7 @@ public static Iterable parameters() { suppliers, evaluatorName.apply("Boolean"), DataTypes.UNSIGNED_LONG, - b -> b ? ONE_AS_UNSIGNED_LONG : ZERO_AS_UNSIGNED_LONG, + b -> b ? BigInteger.ONE : BigInteger.ZERO, List.of() ); @@ -66,7 +62,7 @@ public static Iterable parameters() { suppliers, evaluatorName.apply("Long"), DataTypes.UNSIGNED_LONG, - instant -> asLongUnsigned(instant.toEpochMilli()), + instant -> BigInteger.valueOf(instant.toEpochMilli()), List.of() ); // random strings that don't look like an unsigned_long @@ -85,7 +81,7 @@ public static Iterable parameters() { suppliers, evaluatorName.apply("Double"), DataTypes.UNSIGNED_LONG, - d -> asLongUnsigned(BigDecimal.valueOf(d).toBigInteger()), // note: not: new BigDecimal(d).toBigInteger + d -> BigDecimal.valueOf(d).toBigInteger(), // note: not: new BigDecimal(d).toBigInteger 0d, UNSIGNED_LONG_MAX_AS_DOUBLE, List.of() @@ -122,7 +118,7 @@ public static Iterable parameters() { suppliers, evaluatorName.apply("Long"), DataTypes.UNSIGNED_LONG, - NumericUtils::asLongUnsigned, + BigInteger::valueOf, 0L, Long.MAX_VALUE, List.of() @@ -146,7 +142,7 @@ public static Iterable parameters() { suppliers, evaluatorName.apply("Int"), DataTypes.UNSIGNED_LONG, - NumericUtils::asLongUnsigned, + BigInteger::valueOf, 0, Integer.MAX_VALUE, List.of() @@ -180,7 +176,7 @@ public static Iterable parameters() { ) .toList(), DataTypes.UNSIGNED_LONG, - bytesRef -> asLongUnsigned(safeToUnsignedLong(((BytesRef) bytesRef).utf8ToString())), + bytesRef -> safeToUnsignedLong(((BytesRef) bytesRef).utf8ToString()), List.of() ); // strings of random doubles within unsigned_long's range @@ -198,7 +194,7 @@ public static Iterable parameters() { ) .toList(), DataTypes.UNSIGNED_LONG, - bytesRef -> asLongUnsigned(safeToUnsignedLong(((BytesRef) bytesRef).utf8ToString())), + bytesRef -> safeToUnsignedLong(((BytesRef) bytesRef).utf8ToString()), List.of() ); // strings of random doubles outside unsigned_long's range, negative diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java index e6621fdf78408..491680d537f37 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; +import java.math.BigInteger; import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; @@ -36,15 +37,15 @@ public static Iterable parameters() { equalTo(Math.abs(arg)) ); })); - suppliers.add(new TestCaseSupplier(List.of(DataTypes.UNSIGNED_LONG), () -> { - long arg = randomLong(); - return new TestCaseSupplier.TestCase( - List.of(new TestCaseSupplier.TypedData(arg, DataTypes.UNSIGNED_LONG, "arg")), - "Attribute[channel=0]", - DataTypes.UNSIGNED_LONG, - equalTo(arg) - ); - })); + TestCaseSupplier.forUnaryUnsignedLong( + suppliers, + "Attribute[channel=0]", + DataTypes.UNSIGNED_LONG, + (n) -> n, + BigInteger.ZERO, + UNSIGNED_LONG_MAX, + List.of() + ); suppliers.add(new TestCaseSupplier(List.of(DataTypes.LONG), () -> { long arg = randomLong(); return new TestCaseSupplier.TestCase( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java index f4e03de146c54..cbc7e99bf6c09 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java @@ -17,6 +17,8 @@ import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; +import java.math.BigInteger; +import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; @@ -29,7 +31,8 @@ public CeilTests(@Name("TestCase") Supplier testCaseS @ParametersFactory public static Iterable parameters() { - return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("large double value", () -> { + List suppliers = new ArrayList<>(); + suppliers.addAll(List.of(new TestCaseSupplier("large double value", () -> { double arg = 1 / randomDouble(); return new TestCaseSupplier.TestCase( List.of(new TestCaseSupplier.TypedData(arg, DataTypes.DOUBLE, "arg")), @@ -53,15 +56,18 @@ public static Iterable parameters() { DataTypes.LONG, equalTo(arg) ); - }), new TestCaseSupplier("unsigned long value", () -> { - long arg = randomLong(); - return new TestCaseSupplier.TestCase( - List.of(new TestCaseSupplier.TypedData(arg, DataTypes.UNSIGNED_LONG, "arg")), - "Attribute[channel=0]", - DataTypes.UNSIGNED_LONG, - equalTo(arg) - ); }))); + + TestCaseSupplier.forUnaryUnsignedLong( + suppliers, + "Attribute[channel=0]", + DataTypes.UNSIGNED_LONG, + (n) -> n, + BigInteger.ZERO, + UNSIGNED_LONG_MAX, + List.of() + ); + return parameterSuppliersFromTypedData(suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java index fe9578aea6480..be7e8e47a0754 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java @@ -33,7 +33,6 @@ public static Iterable parameters() { 710d, // Hyperbolic Cosine grows extremely fast. Values outside this range return Double.POSITIVE_INFINITY List.of() ); - suppliers = anyNullIsNull(true, suppliers); // Out of range cases suppliers.addAll( @@ -62,7 +61,7 @@ public static Iterable parameters() { ) ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(suppliers)); + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java index f41b7c5de38ad..9a185172c9972 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; -import org.elasticsearch.xpack.ql.util.NumericUtils; import java.math.BigInteger; import java.util.ArrayList; @@ -37,7 +36,7 @@ public static Iterable parameters() { suppliers, read, DataTypes.UNSIGNED_LONG, - ul -> NumericUtils.asLongUnsigned(ul), + ul -> ul, BigInteger.ZERO, UNSIGNED_LONG_MAX, List.of() diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java index ede33838903de..465879b071542 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java @@ -33,7 +33,6 @@ public static Iterable parameters() { 710d, // Hyperbolic sine grows extremely fast. Values outside this range return Double.POSITIVE_INFINITY List.of() ); - suppliers = anyNullIsNull(true, suppliers); // Out of range cases suppliers.addAll( @@ -62,7 +61,7 @@ public static Iterable parameters() { ) ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(suppliers)); + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java index 25764a9029bfd..c477cad17904d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; -import org.elasticsearch.xpack.ql.util.NumericUtils; import java.math.BigInteger; import java.util.ArrayList; @@ -37,12 +36,7 @@ public static Iterable parameters() { doubles(cases, "mv_max", "MvMax", (size, values) -> equalTo(values.max().getAsDouble())); ints(cases, "mv_max", "MvMax", (size, values) -> equalTo(values.max().getAsInt())); longs(cases, "mv_max", "MvMax", (size, values) -> equalTo(values.max().getAsLong())); - unsignedLongs( - cases, - "mv_max", - "MvMax", - (size, values) -> equalTo(NumericUtils.asLongUnsigned(values.reduce(BigInteger::max).get())) - ); + unsignedLongs(cases, "mv_max", "MvMax", (size, values) -> equalTo(values.reduce(BigInteger::max).get())); dateTimes(cases, "mv_max", "MvMax", (size, values) -> equalTo(values.max().getAsLong())); return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java index ce83c4bb8f786..43e8467147279 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; -import org.elasticsearch.xpack.ql.util.NumericUtils; import java.math.BigInteger; import java.util.ArrayList; @@ -62,12 +61,12 @@ public static Iterable parameters() { unsignedLongs(cases, "mv_median", "MvMedian", (size, values) -> { int middle = size / 2; if (size % 2 == 1) { - return equalTo(NumericUtils.asLongUnsigned(values.sorted().skip(middle).findFirst().get())); + return equalTo(values.sorted().skip(middle).findFirst().get()); } var s = values.sorted().skip(middle - 1).limit(2).iterator(); BigInteger a = s.next(); BigInteger b = s.next(); - return equalTo(NumericUtils.asLongUnsigned(a.add(b.subtract(a).divide(BigInteger.valueOf(2))))); + return equalTo(a.add(b.subtract(a).divide(BigInteger.valueOf(2)))); }); cases.add( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java index 5556755cfe125..2e47f7c24bb54 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; -import org.elasticsearch.xpack.ql.util.NumericUtils; import java.math.BigInteger; import java.util.ArrayList; @@ -37,12 +36,7 @@ public static Iterable parameters() { doubles(cases, "mv_min", "MvMin", (size, values) -> equalTo(values.min().getAsDouble())); ints(cases, "mv_min", "MvMin", (size, values) -> equalTo(values.min().getAsInt())); longs(cases, "mv_min", "MvMin", (size, values) -> equalTo(values.min().getAsLong())); - unsignedLongs( - cases, - "mv_min", - "MvMin", - (size, values) -> equalTo(NumericUtils.asLongUnsigned(values.reduce(BigInteger::min).get())) - ); + unsignedLongs(cases, "mv_min", "MvMin", (size, values) -> equalTo(values.reduce(BigInteger::min).get())); dateTimes(cases, "mv_min", "MvMin", (size, values) -> equalTo(values.min().getAsLong())); return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index b02f6339e49c0..65ef493f664f9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -269,28 +269,20 @@ public void onFailure(Exception e) { }; ActionListener jobListener = ActionListener.wrap(jobBuilder -> { - try { - Job job = jobBuilder.build(); - validate(job, datafeedConfigHolder.get(), tasks, xContentRegistry); - auditDeprecations(datafeedConfigHolder.get(), job, auditor, xContentRegistry); - createDataExtractor.accept(job); - } catch (Exception e) { - listener.onFailure(e); - } + Job job = jobBuilder.build(); + validate(job, datafeedConfigHolder.get(), tasks, xContentRegistry); + auditDeprecations(datafeedConfigHolder.get(), job, auditor, xContentRegistry); + createDataExtractor.accept(job); }, listener::onFailure); ActionListener datafeedListener = ActionListener.wrap(datafeedBuilder -> { - try { - DatafeedConfig datafeedConfig = datafeedBuilder.build(); - params.setDatafeedIndices(datafeedConfig.getIndices()); - params.setJobId(datafeedConfig.getJobId()); - params.setIndicesOptions(datafeedConfig.getIndicesOptions()); - datafeedConfigHolder.set(datafeedConfig); - - jobConfigProvider.getJob(datafeedConfig.getJobId(), null, jobListener); - } catch (Exception e) { - listener.onFailure(e); - } + DatafeedConfig datafeedConfig = datafeedBuilder.build(); + params.setDatafeedIndices(datafeedConfig.getIndices()); + params.setJobId(datafeedConfig.getJobId()); + params.setIndicesOptions(datafeedConfig.getIndicesOptions()); + datafeedConfigHolder.set(datafeedConfig); + + jobConfigProvider.getJob(datafeedConfig.getJobId(), null, jobListener); }, listener::onFailure); datafeedConfigProvider.getDatafeedConfig(params.getDatafeedId(), null, datafeedListener); diff --git a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java index 406ea50315de0..42fe09691d249 100644 --- a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java +++ b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java @@ -170,8 +170,8 @@ private static void convertToNewFormat(IndexShard indexShard) { throw new UncheckedIOException( Strings.format( """ - Elasticsearch version [{}] has limited support for indices created with version [{}] but this index could not be \ - read. It may be using an unsupported feature, or it may be damaged or corrupt. See {} for further information.""", + Elasticsearch version [%s] has limited support for indices created with version [%s] but this index could not be \ + read. It may be using an unsupported feature, or it may be damaged or corrupt. See %s for further information.""", Build.current().version(), IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(indexShard.indexSettings().getSettings()), ReferenceDocs.ARCHIVE_INDICES diff --git a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/util/NumericUtilsTests.java b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/util/NumericUtilsTests.java index 45ab0e732305c..80be578ed2647 100644 --- a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/util/NumericUtilsTests.java +++ b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/util/NumericUtilsTests.java @@ -13,6 +13,7 @@ import java.util.function.BiFunction; import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; +import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsBigInteger; import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; import static org.elasticsearch.xpack.ql.util.StringUtils.parseIntegral; import static org.hamcrest.Matchers.equalTo; @@ -115,6 +116,11 @@ public void testUnsignedLongMultiplyExact() { assertThat(mulExact("0", "0"), equalTo("0")); } + public void testRoundTripConversion() { + BigInteger b = randomUnsignedLongBetween(BigInteger.ZERO, UNSIGNED_LONG_MAX); + assertThat(b, equalTo(unsignedLongAsBigInteger(asLongUnsigned(b)))); + } + private static String addExact(String x, String y) { return exactOperation(x, y, NumericUtils::unsignedLongAddExact); } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java index d3bb435dc03ab..b7dc212fe12ad 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java @@ -64,6 +64,7 @@ import static org.elasticsearch.license.LicenseSettings.SELF_GENERATED_LICENSE_TYPE; import static org.elasticsearch.test.NodeRoles.addRoles; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.searchablesnapshots.cache.common.TestUtils.pageAligned; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -289,10 +290,10 @@ protected void assertTotalHits(String indexName, TotalHits originalAllHits, Tota } catch (InterruptedException e) { throw new RuntimeException(e); } - allHits.set(t, prepareSearch(indexName).setTrackTotalHits(true).get().getHits().getTotalHits()); - barHits.set( - t, - prepareSearch(indexName).setTrackTotalHits(true).setQuery(matchQuery("foo", "bar")).get().getHits().getTotalHits() + assertResponse(prepareSearch(indexName).setTrackTotalHits(true), resp -> allHits.set(t, resp.getHits().getTotalHits())); + assertResponse( + prepareSearch(indexName).setTrackTotalHits(true).setQuery(matchQuery("foo", "bar")), + resp -> barHits.set(t, resp.getHits().getTotalHits()) ); }); threads[i].start(); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java index 0551ac3007f10..c80cf3c3d62e3 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.ClosePointInTimeRequest; import org.elasticsearch.action.search.OpenPointInTimeRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.search.TransportOpenPointInTimeAction; @@ -32,7 +31,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.equalTo; public class RetrySearchIntegTests extends BaseSearchableSnapshotsIntegTestCase { @@ -144,30 +143,31 @@ public void testRetryPointInTime() throws Exception { ).keepAlive(TimeValue.timeValueMinutes(2)); final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId(); try { - SearchResponse resp = prepareSearch().setIndices(indexName) - .setPreference(null) - .setPointInTime(new PointInTimeBuilder(pitId)) - .get(); - assertNoFailures(resp); - assertThat(resp.pointInTimeId(), equalTo(pitId)); - assertHitCount(resp, docCount); - + assertNoFailuresAndResponse( + prepareSearch().setIndices(indexName).setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), + resp -> { + assertThat(resp.pointInTimeId(), equalTo(pitId)); + assertHitCount(resp, docCount); + } + ); final Set allocatedNodes = internalCluster().nodesInclude(indexName); for (String allocatedNode : allocatedNodes) { internalCluster().restartNode(allocatedNode); } ensureGreen(indexName); - resp = prepareSearch().setIndices(indexName) - .setQuery(new RangeQueryBuilder("created_date").gte("2011-01-01").lte("2011-12-12")) - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference(null) - .setPreFilterShardSize(between(1, 10)) - .setAllowPartialSearchResults(true) - .setPointInTime(new PointInTimeBuilder(pitId)) - .get(); - assertNoFailures(resp); - assertThat(resp.pointInTimeId(), equalTo(pitId)); - assertHitCount(resp, docCount); + assertNoFailuresAndResponse( + prepareSearch().setIndices(indexName) + .setQuery(new RangeQueryBuilder("created_date").gte("2011-01-01").lte("2011-12-12")) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreference(null) + .setPreFilterShardSize(between(1, 10)) + .setAllowPartialSearchResults(true) + .setPointInTime(new PointInTimeBuilder(pitId)), + resp -> { + assertThat(resp.pointInTimeId(), equalTo(pitId)); + assertHitCount(resp, docCount); + } + ); } finally { client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index 844e6099460b2..a7a3b8e461604 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -52,6 +52,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING; import static org.elasticsearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -182,14 +183,14 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying .source(new SearchSourceBuilder().query(rangeQuery)); if (includeIndexCoveringSearchRangeInSearchRequest) { - SearchResponse searchResponse = client().search(request).actionGet(); - - // All the regular index searches succeeded - assertThat(searchResponse.getSuccessfulShards(), equalTo(indexWithinSearchRangeShardCount)); - // All the searchable snapshots shard search failed - assertThat(searchResponse.getFailedShards(), equalTo(indexOutsideSearchRangeShardCount)); - assertThat(searchResponse.getSkippedShards(), equalTo(0)); - assertThat(searchResponse.getTotalShards(), equalTo(totalShards)); + assertResponse(client().search(request), searchResponse -> { + // All the regular index searches succeeded + assertThat(searchResponse.getSuccessfulShards(), equalTo(indexWithinSearchRangeShardCount)); + // All the searchable snapshots shard search failed + assertThat(searchResponse.getFailedShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(searchResponse.getSkippedShards(), equalTo(0)); + assertThat(searchResponse.getTotalShards(), equalTo(totalShards)); + }); } else { // All shards failed, since all shards are unassigned and the IndexMetadata min/max timestamp // is not available yet @@ -271,13 +272,13 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying waitUntilAllShardsAreUnassigned(updatedIndexMetadata.getIndex()); if (includeIndexCoveringSearchRangeInSearchRequest) { - SearchResponse newSearchResponse = client().search(request).actionGet(); - - assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount)); - assertThat(newSearchResponse.getSuccessfulShards(), equalTo(totalShards)); - assertThat(newSearchResponse.getFailedShards(), equalTo(0)); - assertThat(newSearchResponse.getTotalShards(), equalTo(totalShards)); - assertThat(newSearchResponse.getHits().getTotalHits().value, equalTo((long) numDocsWithinRange)); + assertResponse(client().search(request), newSearchResponse -> { + assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(newSearchResponse.getSuccessfulShards(), equalTo(totalShards)); + assertThat(newSearchResponse.getFailedShards(), equalTo(0)); + assertThat(newSearchResponse.getTotalShards(), equalTo(totalShards)); + assertThat(newSearchResponse.getHits().getTotalHits().value, equalTo((long) numDocsWithinRange)); + }); // test with SearchShardsAPI { @@ -338,13 +339,14 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying } } } else { - SearchResponse newSearchResponse = client().search(request).actionGet(); - // When all shards are skipped, at least one of them should be queried in order to - // provide a proper search response. - assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount - 1)); - assertThat(newSearchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount - 1)); - assertThat(newSearchResponse.getFailedShards(), equalTo(1)); - assertThat(newSearchResponse.getTotalShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertResponse(client().search(request), newSearchResponse -> { + // When all shards are skipped, at least one of them should be queried in order to + // provide a proper search response. + assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount - 1)); + assertThat(newSearchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount - 1)); + assertThat(newSearchResponse.getFailedShards(), equalTo(1)); + assertThat(newSearchResponse.getTotalShards(), equalTo(indexOutsideSearchRangeShardCount)); + }); // test with SearchShardsAPI { @@ -449,14 +451,15 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() // test with Search API { - SearchResponse searchResponse = client().search(request).actionGet(); - // All the regular index searches succeeded - assertThat(searchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount)); - // All the searchable snapshots shard search failed - assertThat(searchResponse.getFailedShards(), equalTo(indexOutsideSearchRangeShardCount)); - assertThat(searchResponse.getSkippedShards(), equalTo(searchableSnapshotShardCount)); - assertThat(searchResponse.getTotalShards(), equalTo(totalShards)); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertResponse(client().search(request), searchResponse -> { + // All the regular index searches succeeded + assertThat(searchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount)); + // All the searchable snapshots shard search failed + assertThat(searchResponse.getFailedShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(searchResponse.getSkippedShards(), equalTo(searchableSnapshotShardCount)); + assertThat(searchResponse.getTotalShards(), equalTo(totalShards)); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + }); } // test with SearchShards API @@ -513,16 +516,16 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() // busy assert since computing the time stamp field from the cluster state happens off of the CS applier thread and thus can be // slightly delayed assertBusy(() -> { - SearchResponse newSearchResponse = client().search(request).actionGet(); - - // All the regular index searches succeeded - assertThat(newSearchResponse.getSuccessfulShards(), equalTo(totalShards)); - assertThat(newSearchResponse.getFailedShards(), equalTo(0)); - // We have to query at least one node to construct a valid response, and we pick - // a shard that's available in order to construct the search response - assertThat(newSearchResponse.getSkippedShards(), equalTo(totalShards - 1)); - assertThat(newSearchResponse.getTotalShards(), equalTo(totalShards)); - assertThat(newSearchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertResponse(client().search(request), newSearchResponse -> { + // All the regular index searches succeeded + assertThat(newSearchResponse.getSuccessfulShards(), equalTo(totalShards)); + assertThat(newSearchResponse.getFailedShards(), equalTo(0)); + // We have to query at least one node to construct a valid response, and we pick + // a shard that's available in order to construct the search response + assertThat(newSearchResponse.getSkippedShards(), equalTo(totalShards - 1)); + assertThat(newSearchResponse.getTotalShards(), equalTo(totalShards)); + assertThat(newSearchResponse.getHits().getTotalHits().value, equalTo(0L)); + }); }); // test with SearchShards API diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java index c3f5e44ae32a0..876ff9ebdb86f 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotsService; @@ -117,19 +118,12 @@ public void testCreateAndRestoreSearchableSnapshot() throws Exception { populateIndex(indexName, 10_000); - final TotalHits originalAllHits = internalCluster().client() - .prepareSearch(indexName) - .setTrackTotalHits(true) - .get() - .getHits() - .getTotalHits(); - final TotalHits originalBarHits = internalCluster().client() - .prepareSearch(indexName) - .setTrackTotalHits(true) - .setQuery(matchQuery("foo", "bar")) - .get() - .getHits() - .getTotalHits(); + final TotalHits originalAllHits = SearchResponseUtils.getTotalHits( + internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true) + ); + final TotalHits originalBarHits = SearchResponseUtils.getTotalHits( + internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true).setQuery(matchQuery("foo", "bar")) + ); logger.info("--> [{}] in total, of which [{}] match the query", originalAllHits, originalBarHits); expectThrows( @@ -765,19 +759,12 @@ public void testSnapshotOfSearchableSnapshotIncludesNoDataButCanBeRestored() thr Settings.builder().put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(INDEX_SOFT_DELETES_SETTING.getKey(), true) ); - final TotalHits originalAllHits = internalCluster().client() - .prepareSearch(indexName) - .setTrackTotalHits(true) - .get() - .getHits() - .getTotalHits(); - final TotalHits originalBarHits = internalCluster().client() - .prepareSearch(indexName) - .setTrackTotalHits(true) - .setQuery(matchQuery("foo", "bar")) - .get() - .getHits() - .getTotalHits(); + final TotalHits originalAllHits = SearchResponseUtils.getTotalHits( + internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true) + ); + final TotalHits originalBarHits = SearchResponseUtils.getTotalHits( + internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true).setQuery(matchQuery("foo", "bar")) + ); logger.info("--> [{}] in total, of which [{}] match the query", originalAllHits, originalBarHits); // The repository that contains the actual data @@ -936,19 +923,12 @@ public void testSnapshotOfSearchableSnapshotCanBeRestoredBeforeRepositoryRegiste Settings.builder().put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(INDEX_SOFT_DELETES_SETTING.getKey(), true) ); - final TotalHits originalAllHits = internalCluster().client() - .prepareSearch(indexName) - .setTrackTotalHits(true) - .get() - .getHits() - .getTotalHits(); - final TotalHits originalBarHits = internalCluster().client() - .prepareSearch(indexName) - .setTrackTotalHits(true) - .setQuery(matchQuery("foo", "bar")) - .get() - .getHits() - .getTotalHits(); + final TotalHits originalAllHits = SearchResponseUtils.getTotalHits( + internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true) + ); + final TotalHits originalBarHits = SearchResponseUtils.getTotalHits( + internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true).setQuery(matchQuery("foo", "bar")) + ); logger.info("--> [{}] in total, of which [{}] match the query", originalAllHits, originalBarHits); // Take snapshot containing the actual data to one repository diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRecoverFromSnapshotIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRecoverFromSnapshotIntegTests.java index 6f71f7c33bf06..894d3af8d75b8 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRecoverFromSnapshotIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRecoverFromSnapshotIntegTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.indices.recovery.plan.ShardSnapshotsService; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest; @@ -43,12 +44,9 @@ public void testSearchableSnapshotRelocationDoNotUseSnapshotBasedRecoveries() th .put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) ); - final TotalHits totalHits = internalCluster().client() - .prepareSearch(indexName) - .setTrackTotalHits(true) - .get() - .getHits() - .getTotalHits(); + final TotalHits totalHits = SearchResponseUtils.getTotalHits( + internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true) + ); final var snapshotName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createSnapshot(repositoryName, snapshotName, List.of(indexName)); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRepositoryIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRepositoryIntegTests.java index cb6cf45b641c6..f97151f9ae330 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRepositoryIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRepositoryIntegTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.repositories.RepositoryConflictException; import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.snapshots.SnapshotRestoreException; import java.util.Arrays; @@ -49,12 +50,9 @@ public void testRepositoryUsedBySearchableSnapshotCanBeUpdatedButNotUnregistered Settings.builder().put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(INDEX_SOFT_DELETES_SETTING.getKey(), true) ); - final TotalHits totalHits = internalCluster().client() - .prepareSearch(indexName) - .setTrackTotalHits(true) - .get() - .getHits() - .getTotalHits(); + final TotalHits totalHits = SearchResponseUtils.getTotalHits( + internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true) + ); final String snapshotName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createSnapshot(repositoryName, snapshotName, List.of(indexName)); @@ -164,7 +162,9 @@ public void testMountIndexWithDifferentDeletionOfSnapshot() throws Exception { final String index = "index"; createAndPopulateIndex(index, Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true)); - final TotalHits totalHits = internalCluster().client().prepareSearch(index).setTrackTotalHits(true).get().getHits().getTotalHits(); + final TotalHits totalHits = SearchResponseUtils.getTotalHits( + internalCluster().client().prepareSearch(index).setTrackTotalHits(true) + ); final String snapshot = "snapshot"; createSnapshot(repository, snapshot, List.of(index)); @@ -220,7 +220,9 @@ public void testDeletionOfSnapshotSettingCannotBeUpdated() throws Exception { final String index = "index"; createAndPopulateIndex(index, Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true)); - final TotalHits totalHits = internalCluster().client().prepareSearch(index).setTrackTotalHits(true).get().getHits().getTotalHits(); + final TotalHits totalHits = SearchResponseUtils.getTotalHits( + internalCluster().client().prepareSearch(index).setTrackTotalHits(true) + ); final String snapshot = "snapshot"; createSnapshot(repository, snapshot, List.of(index)); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java index b5ebf1104a195..37b3ecfd36959 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.xcontent.XContentBuilder; @@ -207,11 +208,9 @@ public void testBlobStoreCache() throws Exception { refreshSystemIndex(); - final long numberOfCachedBlobs = systemClient().prepareSearch(SNAPSHOT_BLOB_CACHE_INDEX) - .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN) - .get() - .getHits() - .getTotalHits().value; + final long numberOfCachedBlobs = SearchResponseUtils.getTotalHitsValue( + systemClient().prepareSearch(SNAPSHOT_BLOB_CACHE_INDEX).setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN) + ); IndexingStats indexingStats = systemClient().admin() .indices() diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java index 04233e47b7bcc..981ffe2832e66 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.reindex.ReindexPlugin; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -318,16 +319,12 @@ private Client systemClient() { } private long numberOfEntriesInCache() { - var res = systemClient().prepareSearch(SNAPSHOT_BLOB_CACHE_INDEX) - .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN) - .setTrackTotalHits(true) - .setSize(0) - .get(); - try { - return res.getHits().getTotalHits().value; - } finally { - res.decRef(); - } + return SearchResponseUtils.getTotalHitsValue( + systemClient().prepareSearch(SNAPSHOT_BLOB_CACHE_INDEX) + .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN) + .setTrackTotalHits(true) + .setSize(0) + ); } private void refreshSystemIndex(boolean failIfNotExist) { diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java index 3858b087f4d3a..42ac63579b6c6 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java @@ -112,7 +112,7 @@ public void testNodesCachesStats() throws Exception { randomBoolean() ? QueryBuilders.rangeQuery("id").gte(randomIntBetween(0, 1000)) : QueryBuilders.termQuery("test", "value" + randomIntBetween(0, 1000)) - ).setSize(randomIntBetween(0, 1000)).get(); + ).setSize(randomIntBetween(0, 1000)).get().decRef(); } assertExecutorIsIdle(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java index 64508e1d49959..89cab65765bf9 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java @@ -417,7 +417,7 @@ private class PeriodicMaintenanceTask implements Runnable, Releasable { private volatile Map> existingSnapshots; private volatile Set existingRepositories; - private volatile SearchResponse searchResponse; + private final AtomicReference searchResponse = new AtomicReference<>(); private volatile Instant expirationTime; private volatile String pointIntTimeId; private volatile Object[] searchAfter; @@ -458,146 +458,155 @@ public void onFailure(Exception e) { final String pitId = pointIntTimeId; assert Strings.hasLength(pitId); - if (searchResponse == null) { - final SearchSourceBuilder searchSource = new SearchSourceBuilder(); - searchSource.fetchField(new FieldAndFormat(CachedBlob.CREATION_TIME_FIELD, "epoch_millis")); - searchSource.fetchSource(false); - searchSource.trackScores(false); - searchSource.sort(ShardDocSortField.NAME); - searchSource.size(batchSize); - if (searchAfter != null) { - searchSource.searchAfter(searchAfter); - searchSource.trackTotalHits(false); - } else { - searchSource.trackTotalHits(true); + SearchResponse searchResponseRef; + do { + searchResponseRef = searchResponse.get(); + if (searchResponseRef == null) { + handleMissingSearchResponse(pitId); + return; } - final PointInTimeBuilder pointInTime = new PointInTimeBuilder(pitId); - pointInTime.setKeepAlive(keepAlive); - searchSource.pointInTimeBuilder(pointInTime); - final SearchRequest searchRequest = new SearchRequest(); - searchRequest.source(searchSource); - clientWithOrigin.execute(TransportSearchAction.TYPE, searchRequest, new ActionListener<>() { - @Override - public void onResponse(SearchResponse response) { - if (searchAfter == null) { - assert PeriodicMaintenanceTask.this.total.get() == 0L; - PeriodicMaintenanceTask.this.total.set(response.getHits().getTotalHits().value); - } - PeriodicMaintenanceTask.this.searchResponse = response; - PeriodicMaintenanceTask.this.searchAfter = null; - executeNext(PeriodicMaintenanceTask.this); - } - - @Override - public void onFailure(Exception e) { - complete(e); - } - }); - return; + } while (searchResponseRef.tryIncRef() == false); + try { + var searchHits = searchResponseRef.getHits().getHits(); + if (searchHits != null && searchHits.length > 0) { + updateWithSearchHits(searchHits); + return; + } + } finally { + searchResponseRef.decRef(); } + // we're done, complete the task + complete(null); + } catch (Exception e) { + complete(e); + } + } - final SearchHit[] searchHits = searchResponse.getHits().getHits(); - if (searchHits != null && searchHits.length > 0) { - if (expirationTime == null) { - final TimeValue retention = periodicTaskRetention; - expirationTime = Instant.ofEpochMilli(threadPool.absoluteTimeInMillis()) - .minus(retention.duration(), retention.timeUnit().toChronoUnit()); - - final ClusterState state = clusterService.state(); - // compute the list of existing searchable snapshots and repositories once - existingSnapshots = listSearchableSnapshots(state); - existingRepositories = RepositoriesMetadata.get(state) - .repositories() - .stream() - .map(RepositoryMetadata::name) - .collect(Collectors.toSet()); + private void handleMissingSearchResponse(String pitId) { + final SearchSourceBuilder searchSource = new SearchSourceBuilder(); + searchSource.fetchField(new FieldAndFormat(CachedBlob.CREATION_TIME_FIELD, "epoch_millis")); + searchSource.fetchSource(false); + searchSource.trackScores(false); + searchSource.sort(ShardDocSortField.NAME); + searchSource.size(batchSize); + if (searchAfter != null) { + searchSource.searchAfter(searchAfter); + searchSource.trackTotalHits(false); + } else { + searchSource.trackTotalHits(true); + } + final PointInTimeBuilder pointInTime = new PointInTimeBuilder(pitId); + pointInTime.setKeepAlive(keepAlive); + searchSource.pointInTimeBuilder(pointInTime); + final SearchRequest searchRequest = new SearchRequest(); + searchRequest.source(searchSource); + clientWithOrigin.execute(TransportSearchAction.TYPE, searchRequest, new ActionListener<>() { + @Override + public void onResponse(SearchResponse response) { + if (searchAfter == null) { + assert PeriodicMaintenanceTask.this.total.get() == 0L; + PeriodicMaintenanceTask.this.total.set(response.getHits().getTotalHits().value); } + PeriodicMaintenanceTask.this.setCurrentResponse(response); + PeriodicMaintenanceTask.this.searchAfter = null; + executeNext(PeriodicMaintenanceTask.this); + } - final BulkRequest bulkRequest = new BulkRequest(); - final Map> knownSnapshots = existingSnapshots; - assert knownSnapshots != null; - final Set knownRepositories = existingRepositories; - assert knownRepositories != null; - final Instant expirationTimeCopy = this.expirationTime; - assert expirationTimeCopy != null; - - Object[] lastSortValues = null; - for (SearchHit searchHit : searchHits) { - lastSortValues = searchHit.getSortValues(); - assert searchHit.getId() != null; - try { - boolean delete = false; - - // See {@link BlobStoreCacheService#generateId} - // doc id = {repository name}/{snapshot id}/{snapshot index id}/{shard id}/{file name}/@{file offset} - final String[] parts = Objects.requireNonNull(searchHit.getId()).split("/"); - assert parts.length == 6 : Arrays.toString(parts) + " vs " + searchHit.getId(); - - final String repositoryName = parts[0]; - if (knownRepositories.contains(repositoryName) == false) { - logger.trace("deleting blob store cache entry with id [{}]: repository does not exist", searchHit.getId()); - delete = true; - } else { - final Set knownIndexIds = knownSnapshots.get(parts[1]); - if (knownIndexIds == null || knownIndexIds.contains(parts[2]) == false) { - logger.trace("deleting blob store cache entry with id [{}]: not used", searchHit.getId()); - delete = true; - } - } - if (delete) { - final Instant creationTime = getCreationTime(searchHit); - if (creationTime.isAfter(expirationTimeCopy)) { - logger.trace( - "blob store cache entry with id [{}] was created recently, skipping deletion", - searchHit.getId() - ); - continue; - } - bulkRequest.add(new DeleteRequest().index(searchHit.getIndex()).id(searchHit.getId())); - } - } catch (Exception e) { - logger.warn( - () -> format("exception when parsing blob store cache entry with id [%s], skipping", searchHit.getId()), - e - ); + @Override + public void onFailure(Exception e) { + complete(e); + } + }); + } + + private void updateWithSearchHits(SearchHit[] searchHits) { + if (expirationTime == null) { + final TimeValue retention = periodicTaskRetention; + expirationTime = Instant.ofEpochMilli(threadPool.absoluteTimeInMillis()) + .minus(retention.duration(), retention.timeUnit().toChronoUnit()); + + final ClusterState state = clusterService.state(); + // compute the list of existing searchable snapshots and repositories once + existingSnapshots = listSearchableSnapshots(state); + existingRepositories = RepositoriesMetadata.get(state) + .repositories() + .stream() + .map(RepositoryMetadata::name) + .collect(Collectors.toSet()); + } + + final BulkRequest bulkRequest = new BulkRequest(); + final Map> knownSnapshots = existingSnapshots; + assert knownSnapshots != null; + final Set knownRepositories = existingRepositories; + assert knownRepositories != null; + final Instant expirationTimeCopy = this.expirationTime; + assert expirationTimeCopy != null; + + Object[] lastSortValues = null; + for (SearchHit searchHit : searchHits) { + lastSortValues = searchHit.getSortValues(); + assert searchHit.getId() != null; + try { + boolean delete = false; + + // See {@link BlobStoreCacheService#generateId} + // doc id = {repository name}/{snapshot id}/{snapshot index id}/{shard id}/{file name}/@{file offset} + final String[] parts = Objects.requireNonNull(searchHit.getId()).split("/"); + assert parts.length == 6 : Arrays.toString(parts) + " vs " + searchHit.getId(); + + final String repositoryName = parts[0]; + if (knownRepositories.contains(repositoryName) == false) { + logger.trace("deleting blob store cache entry with id [{}]: repository does not exist", searchHit.getId()); + delete = true; + } else { + final Set knownIndexIds = knownSnapshots.get(parts[1]); + if (knownIndexIds == null || knownIndexIds.contains(parts[2]) == false) { + logger.trace("deleting blob store cache entry with id [{}]: not used", searchHit.getId()); + delete = true; } } - - assert lastSortValues != null; - if (bulkRequest.numberOfActions() == 0) { - this.searchResponse = null; - this.searchAfter = lastSortValues; - executeNext(this); - return; + if (delete) { + final Instant creationTime = getCreationTime(searchHit); + if (creationTime.isAfter(expirationTimeCopy)) { + logger.trace("blob store cache entry with id [{}] was created recently, skipping deletion", searchHit.getId()); + continue; + } + bulkRequest.add(new DeleteRequest().index(searchHit.getIndex()).id(searchHit.getId())); } + } catch (Exception e) { + logger.warn(() -> format("exception when parsing blob store cache entry with id [%s], skipping", searchHit.getId()), e); + } + } - final Object[] finalSearchAfter = lastSortValues; - clientWithOrigin.execute(BulkAction.INSTANCE, bulkRequest, new ActionListener<>() { - @Override - public void onResponse(BulkResponse response) { - for (BulkItemResponse itemResponse : response.getItems()) { - if (itemResponse.isFailed() == false) { - assert itemResponse.getResponse() instanceof DeleteResponse; - PeriodicMaintenanceTask.this.deletes.incrementAndGet(); - } - } - PeriodicMaintenanceTask.this.searchResponse = null; - PeriodicMaintenanceTask.this.searchAfter = finalSearchAfter; - executeNext(PeriodicMaintenanceTask.this); - } + assert lastSortValues != null; + if (bulkRequest.numberOfActions() == 0) { + setCurrentResponse(null); + this.searchAfter = lastSortValues; + executeNext(this); + return; + } - @Override - public void onFailure(Exception e) { - complete(e); + final Object[] finalSearchAfter = lastSortValues; + clientWithOrigin.execute(BulkAction.INSTANCE, bulkRequest, new ActionListener<>() { + @Override + public void onResponse(BulkResponse response) { + for (BulkItemResponse itemResponse : response.getItems()) { + if (itemResponse.isFailed() == false) { + assert itemResponse.getResponse() instanceof DeleteResponse; + deletes.incrementAndGet(); } - }); - return; + } + PeriodicMaintenanceTask.this.setCurrentResponse(null); + PeriodicMaintenanceTask.this.searchAfter = finalSearchAfter; + executeNext(PeriodicMaintenanceTask.this); } - // we're done, complete the task - complete(null); - } catch (Exception e) { - complete(e); - } + + @Override + public void onFailure(Exception e) { + complete(e); + } + }); } public boolean isClosed() { @@ -614,6 +623,7 @@ private void ensureOpen() { @Override public void close() { if (closed.compareAndSet(false, true)) { + setCurrentResponse(null); final Exception e = error.get(); if (e != null) { logger.warn( @@ -679,6 +689,16 @@ public void onFailure(Exception e) { } } } + + private void setCurrentResponse(SearchResponse response) { + if (response != null) { + response.mustIncRef(); + } + var previous = searchResponse.getAndSet(response); + if (previous != null) { + previous.decRef(); + } + } } private void executeNext(PeriodicMaintenanceTask maintenanceTask) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java index 17c35ecca5f13..9c54f6decb0c8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.core.security.authz.support.DLSRoleQueryValidator; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; -import java.util.Collection; import java.util.Set; public class ApiKeyUserRoleDescriptorResolver { @@ -37,15 +36,10 @@ public void resolveUserRoleDescriptors(final Authentication authentication, fina return; } - rolesStore.getRoleDescriptorsList(effectiveSubject, listener.delegateFailureAndWrap(this::handleRoleDescriptorsList)); + rolesStore.getRoleDescriptors(effectiveSubject, listener.delegateFailureAndWrap(this::handleRoleDescriptors)); } - private void handleRoleDescriptorsList( - ActionListener> listener, - Collection> roleDescriptorsList - ) { - assert roleDescriptorsList.size() == 1; - final var roleDescriptors = roleDescriptorsList.iterator().next(); + private void handleRoleDescriptors(ActionListener> listener, Set roleDescriptors) { for (RoleDescriptor rd : roleDescriptors) { DLSRoleQueryValidator.validateQueryField(rd.getIndicesPrivileges(), xContentRegistry); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java index ab9ea8f772054..631a988b0f325 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.cache.Cache; @@ -349,30 +348,18 @@ private void buildThenMaybeCacheRole( ); } - public void getRoleDescriptorsList(Subject subject, ActionListener>> listener) { - tryGetRoleDescriptorForInternalUser(subject).ifPresentOrElse( - roleDescriptor -> listener.onResponse(List.of(Set.of(roleDescriptor))), - () -> { - final List roleReferences = subject.getRoleReferenceIntersection(anonymousUser).getRoleReferences(); - final GroupedActionListener> groupedActionListener = new GroupedActionListener<>( - roleReferences.size(), - listener - ); + public void getRoleDescriptors(Subject subject, ActionListener> listener) { + tryGetRoleDescriptorForInternalUser(subject).ifPresentOrElse(roleDescriptor -> listener.onResponse(Set.of(roleDescriptor)), () -> { + final List roleReferences = subject.getRoleReferenceIntersection(anonymousUser).getRoleReferences(); + assert roleReferences.size() == 1; // we only handle the singleton case today, but that may change with derived API keys - roleReferences.forEach( - roleReference -> roleReference.resolve( - roleReferenceResolver, - groupedActionListener.delegateFailureAndWrap((delegate, rolesRetrievalResult) -> { - if (rolesRetrievalResult.isSuccess()) { - delegate.onResponse(rolesRetrievalResult.getRoleDescriptors()); - } else { - delegate.onFailure(new ElasticsearchException("role retrieval had one or more failures")); - } - }) - ) - ); - } - ); + ActionListener.run(listener.map(rolesRetrievalResult -> { + if (rolesRetrievalResult.isSuccess() == false) { + throw new ElasticsearchException("role retrieval had one or more failures"); + } + return rolesRetrievalResult.getRoleDescriptors(); + }), l -> roleReferences.iterator().next().resolve(roleReferenceResolver, l)); + }); } // Package private for testing diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolverTests.java index 2af18f5dca7dc..502d2b7ce0dda 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolverTests.java @@ -19,8 +19,6 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; -import java.util.Collection; -import java.util.List; import java.util.Set; import java.util.stream.Collectors; @@ -56,11 +54,10 @@ public void testGetRoleDescriptors() { assertThat(subject.getType(), is(Subject.Type.USER)); assertThat(Set.of(subject.getUser().roles()), equalTo(userRoleNames)); - ActionListener>> listener = (ActionListener>>) args[args.length - - 1]; - listener.onResponse(List.of(roleDescriptors)); + ActionListener> listener = (ActionListener>) args[args.length - 1]; + listener.onResponse(roleDescriptors); return null; - }).when(rolesStore).getRoleDescriptorsList(any(Subject.class), any(ActionListener.class)); + }).when(rolesStore).getRoleDescriptors(any(Subject.class), any(ActionListener.class)); final PlainActionFuture> future = new PlainActionFuture<>(); resolver.resolveUserRoleDescriptors(authentication, future); @@ -77,6 +74,6 @@ public void testGetRoleDescriptorsEmptyForApiKey() { resolver.resolveUserRoleDescriptors(authentication, future); assertThat(future.actionGet(), equalTo(Set.of())); - verify(rolesStore, never()).getRoleDescriptorsList(any(), any()); + verify(rolesStore, never()).getRoleDescriptors(any(), any()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index c0df3a0947c71..4ad7c61d45d63 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -2760,15 +2760,15 @@ public void testGetRoleDescriptorsListForInternalUsers() { when(subject.getUser()).thenReturn(InternalUsers.SYSTEM_USER); final IllegalArgumentException e1 = expectThrows( IllegalArgumentException.class, - () -> compositeRolesStore.getRoleDescriptorsList(subject, new PlainActionFuture<>()) + () -> compositeRolesStore.getRoleDescriptors(subject, ActionListener.noop()) ); assertThat(e1.getMessage(), equalTo("should never try to get the roles for internal user [" + SystemUser.NAME + "]")); for (var internalUser : AuthenticationTestHelper.internalUsersWithLocalRoleDescriptor()) { when(subject.getUser()).thenReturn(internalUser); - final PlainActionFuture>> future = new PlainActionFuture<>(); - compositeRolesStore.getRoleDescriptorsList(subject, future); - assertThat(future.actionGet(), equalTo(List.of(Set.of(internalUser.getLocalClusterRoleDescriptor().get())))); + final PlainActionFuture> future = new PlainActionFuture<>(); + compositeRolesStore.getRoleDescriptors(subject, future); + assertThat(future.actionGet(), equalTo(Set.of(internalUser.getLocalClusterRoleDescriptor().get()))); } } @@ -2787,9 +2787,9 @@ public void testGetRoleDescriptorsListUsesRoleStoreToResolveRoleWithInternalRole when(subject.getRoleReferenceIntersection(any())).thenReturn( new RoleReferenceIntersection(new RoleReference.NamedRoleReference(new String[] { roleName })) ); - final PlainActionFuture>> future = new PlainActionFuture<>(); - compositeRolesStore.getRoleDescriptorsList(subject, future); - assertThat(future.actionGet(), equalTo(List.of(Set.of(expectedRoleDescriptor)))); + final PlainActionFuture> future = new PlainActionFuture<>(); + compositeRolesStore.getRoleDescriptors(subject, future); + assertThat(future.actionGet(), equalTo(Set.of(expectedRoleDescriptor))); } private void getRoleForRoleNames(CompositeRolesStore rolesStore, Collection roleNames, ActionListener listener) { diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java index c6c6c2febfdad..7125b4074bc4a 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java @@ -327,7 +327,6 @@ public void testSchedulingWithSystemClock() throws Exception { transformScheduler.stop(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/95445") public void testScheduleNowWithSystemClock() throws Exception { String transformId = "test-schedule-now-with-system-clock"; TimeValue frequency = TimeValue.timeValueHours(1); // Very long pause between checkpoints @@ -343,7 +342,11 @@ public void testScheduleNowWithSystemClock() throws Exception { Thread.sleep(5 * 1000L); transformScheduler.scheduleNow(transformId); - assertThat(events, hasSize(2)); + // If we are unlucky, the scheduleNow call will trigger processing **exactly** in this small window of time in which processing is + // temporarily disallowed (as two concurrent invocations of the "TransformScheduler.processScheduledTasks" method are not allowed). + // Hence, we need to wait for the next processing cycle to happen (it will be TEST_SCHEDULER_FREQUENCY from now). + assertBusy(() -> assertThat(events, hasSize(2)), TEST_SCHEDULER_FREQUENCY.seconds() + 1, TimeUnit.SECONDS); + assertThat(events.get(0).transformId(), is(equalTo(transformId))); assertThat(events.get(1).transformId(), is(equalTo(transformId))); assertThat(events.get(1).scheduledTime() - events.get(0).triggeredTime(), is(allOf(greaterThan(4 * 1000L), lessThan(6 * 1000L)))); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportGetWatcherSettingsAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportGetWatcherSettingsAction.java index eecbc21ad0475..0c52057100860 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportGetWatcherSettingsAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportGetWatcherSettingsAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -20,7 +19,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -28,6 +26,7 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.put.UpdateWatcherSettingsAction; import static org.elasticsearch.xpack.watcher.transport.actions.TransportUpdateWatcherSettingsAction.WATCHER_INDEX_NAME; +import static org.elasticsearch.xpack.watcher.transport.actions.TransportUpdateWatcherSettingsAction.WATCHER_INDEX_REQUEST; public class TransportGetWatcherSettingsAction extends TransportMasterNodeAction< GetWatcherSettingsAction.Request, @@ -61,15 +60,11 @@ protected void masterOperation( ClusterState state, ActionListener listener ) { - final ThreadContext threadContext = threadPool.getThreadContext(); - // Stashing and un-stashing the context allows warning headers about accessing a system index to be ignored. - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - IndexMetadata metadata = state.metadata().index(WATCHER_INDEX_NAME); - if (metadata == null) { - listener.onResponse(new GetWatcherSettingsAction.Response(Settings.EMPTY)); - } else { - listener.onResponse(new GetWatcherSettingsAction.Response(filterSettableSettings(metadata.getSettings()))); - } + IndexMetadata metadata = state.metadata().index(WATCHER_INDEX_NAME); + if (metadata == null) { + listener.onResponse(new GetWatcherSettingsAction.Response(Settings.EMPTY)); + } else { + listener.onResponse(new GetWatcherSettingsAction.Response(filterSettableSettings(metadata.getSettings()))); } } @@ -95,7 +90,7 @@ protected ClusterBlockException checkBlock(GetWatcherSettingsAction.Request requ return state.blocks() .indicesBlockedException( ClusterBlockLevel.METADATA_READ, - indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.LENIENT_EXPAND_OPEN, WATCHER_INDEX_NAME) + indexNameExpressionResolver.concreteIndexNamesWithSystemIndexAccess(state, WATCHER_INDEX_REQUEST) ); } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportUpdateWatcherSettingsAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportUpdateWatcherSettingsAction.java index 124cb6de5fd40..b9895b2319599 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportUpdateWatcherSettingsAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportUpdateWatcherSettingsAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; @@ -24,7 +25,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.Index; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -37,7 +37,19 @@ public class TransportUpdateWatcherSettingsAction extends TransportMasterNodeAct UpdateWatcherSettingsAction.Request, AcknowledgedResponse> { - public static final String WATCHER_INDEX_NAME = ".watches"; + static final String WATCHER_INDEX_NAME = ".watches"; + + static final IndicesRequest WATCHER_INDEX_REQUEST = new IndicesRequest() { + @Override + public String[] indices() { + return new String[] { WATCHER_INDEX_NAME }; + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.LENIENT_EXPAND_OPEN; + } + }; private static final Logger logger = LogManager.getLogger(TransportUpdateWatcherSettingsAction.class); private final MetadataUpdateSettingsService updateSettingsService; @@ -83,27 +95,23 @@ protected void masterOperation( new Index[] { watcherIndexMd.getIndex() } ).settings(newSettings).ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()); - final ThreadContext threadContext = threadPool.getThreadContext(); - // Stashing and un-stashing the context allows warning headers about accessing a system index to be ignored. - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - updateSettingsService.updateSettings(clusterStateUpdateRequest, new ActionListener<>() { - @Override - public void onResponse(AcknowledgedResponse acknowledgedResponse) { - if (acknowledgedResponse.isAcknowledged()) { - logger.info("successfully updated Watcher service settings to {}", request.settings()); - } else { - logger.warn("updating Watcher service settings to {} was not acknowledged", request.settings()); - } - listener.onResponse(acknowledgedResponse); + updateSettingsService.updateSettings(clusterStateUpdateRequest, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + if (acknowledgedResponse.isAcknowledged()) { + logger.info("successfully updated Watcher service settings to {}", request.settings()); + } else { + logger.warn("updating Watcher service settings to {} was not acknowledged", request.settings()); } + listener.onResponse(acknowledgedResponse); + } - @Override - public void onFailure(Exception e) { - logger.debug(() -> "failed to update settings for Watcher service", e); - listener.onFailure(e); - } - }); - } + @Override + public void onFailure(Exception e) { + logger.debug(() -> "failed to update settings for Watcher service", e); + listener.onFailure(e); + } + }); } @Override @@ -115,7 +123,7 @@ protected ClusterBlockException checkBlock(UpdateWatcherSettingsAction.Request r return state.blocks() .indicesBlockedException( ClusterBlockLevel.METADATA_WRITE, - indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.LENIENT_EXPAND_OPEN, WATCHER_INDEX_NAME) + indexNameExpressionResolver.concreteIndexNamesWithSystemIndexAccess(state, WATCHER_INDEX_REQUEST) ); } }