Skip to content

Commit

Permalink
Merge branch 'main' into fix-operator-privilege-IT
Browse files Browse the repository at this point in the history
  • Loading branch information
n1v0lg authored Nov 26, 2024
2 parents 8a60067 + ed33bea commit 6f36b31
Show file tree
Hide file tree
Showing 103 changed files with 1,440 additions and 397 deletions.
15 changes: 14 additions & 1 deletion .buildkite/scripts/dra-workflow.sh
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ find "$WORKSPACE" -type d -path "*/build/distributions" -exec chmod a+w {} \;

echo --- Running release-manager

set +e
# Artifacts should be generated
docker run --rm \
--name release-manager \
Expand All @@ -91,4 +92,16 @@ docker run --rm \
--version "$ES_VERSION" \
--artifact-set main \
--dependency "beats:https://artifacts-${WORKFLOW}.elastic.co/beats/${BEATS_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json" \
--dependency "ml-cpp:https://artifacts-${WORKFLOW}.elastic.co/ml-cpp/${ML_CPP_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json"
--dependency "ml-cpp:https://artifacts-${WORKFLOW}.elastic.co/ml-cpp/${ML_CPP_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json" \
2>&1 | tee release-manager.log
EXIT_CODE=$?
set -e

# This failure is just generating a ton of noise right now, so let's just ignore it
# This should be removed once this issue has been fixed
if grep "elasticsearch-ubi-9.0.0-SNAPSHOT-docker-image.tar.gz" release-manager.log; then
echo "Ignoring error about missing ubi artifact"
exit 0
fi

exit "$EXIT_CODE"
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ public enum DockerBase {
// Chainguard based wolfi image with latest jdk
// This is usually updated via renovatebot
// spotless:off
WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:973431347ad45f40e01afbbd010bf9de929c088a63382239b90dd84f39618bc8",
WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:55b297da5151d2a2997e8ab9729fe1304e4869389d7090ab7031cc29530f69f8",
"-wolfi",
"apk"
),
Expand Down
5 changes: 5 additions & 0 deletions docs/changelog/117404.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 117404
summary: Correct bit * byte and bit * float script comparisons
area: Vector Search
type: bug
issues: []
2 changes: 1 addition & 1 deletion docs/reference/data-streams/tsds-reindex.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ POST /_component_template/destination_template
POST /_index_template/2
{
"index_patterns": [
"k8s*"
"k9s*"
],
"composed_of": [
"destination_template"
Expand Down
16 changes: 8 additions & 8 deletions docs/reference/esql/functions/spatial-functions.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -8,19 +8,19 @@
{esql} supports these spatial functions:

// tag::spatial_list[]
* experimental:[] <<esql-st_intersects>>
* experimental:[] <<esql-st_disjoint>>
* experimental:[] <<esql-st_contains>>
* experimental:[] <<esql-st_within>>
* experimental:[] <<esql-st_x>>
* experimental:[] <<esql-st_y>>
* experimental:[] <<esql-st_distance>>
* <<esql-st_distance>>
* <<esql-st_intersects>>
* <<esql-st_disjoint>>
* <<esql-st_contains>>
* <<esql-st_within>>
* <<esql-st_x>>
* <<esql-st_y>>
// end::spatial_list[]

include::layout/st_distance.asciidoc[]
include::layout/st_intersects.asciidoc[]
include::layout/st_disjoint.asciidoc[]
include::layout/st_contains.asciidoc[]
include::layout/st_within.asciidoc[]
include::layout/st_x.asciidoc[]
include::layout/st_y.asciidoc[]
include::layout/st_distance.asciidoc[]
10 changes: 5 additions & 5 deletions docs/reference/geospatial-analysis.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,11 @@ Data is often messy and incomplete. <<ingest,Ingest pipelines>> lets you clean,

<<esql,ES|QL>> has support for <<esql-spatial-functions, Geospatial Search>> functions, enabling efficient index searching for documents that intersect with, are within, are contained by, or are disjoint from a query geometry. In addition, the `ST_DISTANCE` function calculates the distance between two points.

* experimental:[] <<esql-st_intersects>>
* experimental:[] <<esql-st_disjoint>>
* experimental:[] <<esql-st_contains>>
* experimental:[] <<esql-st_within>>
* experimental:[] <<esql-st_distance>>
* <<esql-st_intersects>>
* <<esql-st_disjoint>>
* <<esql-st_contains>>
* <<esql-st_within>>
* <<esql-st_distance>>

[discrete]
[[geospatial-aggregate]]
Expand Down
4 changes: 4 additions & 0 deletions docs/reference/vectors/vector-functions.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -336,6 +336,10 @@ When using `bit` vectors, not all the vector functions are available. The suppor
this is the sum of the bitwise AND of the two vectors. If providing `float[]` or `byte[]`, who has `dims` number of elements, as a query vector, the `dotProduct` is
the sum of the floating point values using the stored `bit` vector as a mask.

NOTE: When comparing `floats` and `bytes` with `bit` vectors, the `bit` vector is treated as a mask in big-endian order.
For example, if the `bit` vector is `10100001` (e.g. the single byte value `161`) and its compared
with array of values `[1, 2, 3, 4, 5, 6, 7, 8]` the `dotProduct` will be `1 + 3 + 8 = 16`.

Here is an example of using dot-product with bit vectors.

[source,console]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ public static long ipByteBinByte(byte[] q, byte[] d) {
/**
* Compute the inner product of two vectors, where the query vector is a byte vector and the document vector is a bit vector.
* This will return the sum of the query vector values using the document vector as a mask.
* When comparing the bits with the bytes, they are done in "big endian" order. For example, if the byte vector
* is [1, 2, 3, 4, 5, 6, 7, 8] and the bit vector is [0b10000000], the inner product will be 1.0.
* @param q the query vector
* @param d the document vector
* @return the inner product of the two vectors
Expand All @@ -63,9 +65,9 @@ public static int ipByteBit(byte[] q, byte[] d) {
// now combine the two vectors, summing the byte dimensions where the bit in d is `1`
for (int i = 0; i < d.length; i++) {
byte mask = d[i];
for (int j = 0; j < Byte.SIZE; j++) {
for (int j = Byte.SIZE - 1; j >= 0; j--) {
if ((mask & (1 << j)) != 0) {
result += q[i * Byte.SIZE + j];
result += q[i * Byte.SIZE + Byte.SIZE - 1 - j];
}
}
}
Expand All @@ -75,6 +77,8 @@ public static int ipByteBit(byte[] q, byte[] d) {
/**
* Compute the inner product of two vectors, where the query vector is a float vector and the document vector is a bit vector.
* This will return the sum of the query vector values using the document vector as a mask.
* When comparing the bits with the floats, they are done in "big endian" order. For example, if the float vector
* is [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0] and the bit vector is [0b10000000], the inner product will be 1.0.
* @param q the query vector
* @param d the document vector
* @return the inner product of the two vectors
Expand All @@ -86,9 +90,9 @@ public static float ipFloatBit(float[] q, byte[] d) {
float result = 0;
for (int i = 0; i < d.length; i++) {
byte mask = d[i];
for (int j = 0; j < Byte.SIZE; j++) {
for (int j = Byte.SIZE - 1; j >= 0; j--) {
if ((mask & (1 << j)) != 0) {
result += q[i * Byte.SIZE + j];
result += q[i * Byte.SIZE + Byte.SIZE - 1 - j];
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,22 @@ public class ESVectorUtilTests extends BaseVectorizationTests {
static final ESVectorizationProvider defaultedProvider = BaseVectorizationTests.defaultProvider();
static final ESVectorizationProvider defOrPanamaProvider = BaseVectorizationTests.maybePanamaProvider();

public void testIpByteBit() {
byte[] q = new byte[16];
byte[] d = new byte[] { (byte) Integer.parseInt("01100010", 2), (byte) Integer.parseInt("10100111", 2) };
random().nextBytes(q);
int expected = q[1] + q[2] + q[6] + q[8] + q[10] + q[13] + q[14] + q[15];
assertEquals(expected, ESVectorUtil.ipByteBit(q, d));
}

public void testIpFloatBit() {
float[] q = new float[16];
byte[] d = new byte[] { (byte) Integer.parseInt("01100010", 2), (byte) Integer.parseInt("10100111", 2) };
random().nextFloat();
float expected = q[1] + q[2] + q[6] + q[8] + q[10] + q[13] + q[14] + q[15];
assertEquals(expected, ESVectorUtil.ipFloatBit(q, d), 1e-6);
}

public void testBitAndCount() {
testBasicBitAndImpl(ESVectorUtil::andBitCountLong);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
public class InternalAdjacencyMatrix extends InternalMultiBucketAggregation<InternalAdjacencyMatrix, InternalAdjacencyMatrix.InternalBucket>
implements
AdjacencyMatrix {
public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket implements AdjacencyMatrix.Bucket {
public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucketWritable implements AdjacencyMatrix.Bucket {

private final String key;
private final long docCount;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ public class InternalTimeSeries extends InternalMultiBucketAggregation<InternalT
/**
* A bucket associated with a specific time series (identified by its key)
*/
public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket {
public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucketWritable {
protected long bucketOrd;
protected final BytesRef key;
// TODO: make computing docCount optional
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,8 @@ protected void registerParameters(ParameterChecker checker) throws IOException {
checker.registerConflictCheck(
"enabled",
timestampMapping(true, b -> b.startObject("@timestamp").field("type", "date").endObject()),
timestampMapping(false, b -> b.startObject("@timestamp").field("type", "date").endObject())
timestampMapping(false, b -> b.startObject("@timestamp").field("type", "date").endObject()),
dm -> {}
);
checker.registerUpdateCheck(
timestampMapping(false, b -> b.startObject("@timestamp").field("type", "date").endObject()),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ setup:
capabilities:
- method: POST
path: /_search
capabilities: [ multi_dense_vector_script_max_sim ]
capabilities: [ multi_dense_vector_script_max_sim_with_bugfix ]
test_runner_features: capabilities
reason: "Support for multi dense vector max-sim functions capability required"
- skip:
Expand Down Expand Up @@ -136,10 +136,10 @@ setup:
- match: {hits.total: 2}

- match: {hits.hits.0._id: "1"}
- close_to: {hits.hits.0._score: {value: 190, error: 0.01}}
- close_to: {hits.hits.0._score: {value: 220, error: 0.01}}

- match: {hits.hits.1._id: "3"}
- close_to: {hits.hits.1._score: {value: 125, error: 0.01}}
- close_to: {hits.hits.1._score: {value: 147, error: 0.01}}
---
"Test max-sim inv hamming scoring":
- skip:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ setup:
capabilities:
- method: POST
path: /_search
capabilities: [ byte_float_bit_dot_product ]
capabilities: [ byte_float_bit_dot_product_with_bugfix ]
reason: Capability required to run test
- do:
catch: bad_request
Expand Down Expand Up @@ -399,7 +399,7 @@ setup:
capabilities:
- method: POST
path: /_search
capabilities: [ byte_float_bit_dot_product ]
capabilities: [ byte_float_bit_dot_product_with_bugfix ]
test_runner_features: [capabilities, close_to]
reason: Capability required to run test
- do:
Expand All @@ -419,13 +419,13 @@ setup:
- match: { hits.total: 3 }

- match: {hits.hits.0._id: "2"}
- close_to: {hits.hits.0._score: {value: 35.999, error: 0.01}}
- close_to: {hits.hits.0._score: {value: 33.78, error: 0.01}}

- match: {hits.hits.1._id: "3"}
- close_to: {hits.hits.1._score:{value: 27.23, error: 0.01}}
- close_to: {hits.hits.1._score:{value: 22.579, error: 0.01}}

- match: {hits.hits.2._id: "1"}
- close_to: {hits.hits.2._score: {value: 16.57, error: 0.01}}
- close_to: {hits.hits.2._score: {value: 11.919, error: 0.01}}

- do:
headers:
Expand All @@ -444,20 +444,20 @@ setup:
- match: { hits.total: 3 }

- match: {hits.hits.0._id: "2"}
- close_to: {hits.hits.0._score: {value: 35.999, error: 0.01}}
- close_to: {hits.hits.0._score: {value: 33.78, error: 0.01}}

- match: {hits.hits.1._id: "3"}
- close_to: {hits.hits.1._score:{value: 27.23, error: 0.01}}
- close_to: {hits.hits.1._score:{value: 22.579, error: 0.01}}

- match: {hits.hits.2._id: "1"}
- close_to: {hits.hits.2._score: {value: 16.57, error: 0.01}}
- close_to: {hits.hits.2._score: {value: 11.919, error: 0.01}}
---
"Dot product with byte":
- requires:
capabilities:
- method: POST
path: /_search
capabilities: [ byte_float_bit_dot_product ]
capabilities: [ byte_float_bit_dot_product_with_bugfix ]
test_runner_features: capabilities
reason: Capability required to run test
- do:
Expand All @@ -476,14 +476,14 @@ setup:

- match: { hits.total: 3 }

- match: {hits.hits.0._id: "1"}
- match: {hits.hits.0._score: 248}
- match: {hits.hits.0._id: "3"}
- match: {hits.hits.0._score: 415}

- match: {hits.hits.1._id: "2"}
- match: {hits.hits.1._score: 136}
- match: {hits.hits.1._id: "1"}
- match: {hits.hits.1._score: 168}

- match: {hits.hits.2._id: "3"}
- match: {hits.hits.2._score: 20}
- match: {hits.hits.2._id: "2"}
- match: {hits.hits.2._score: 126}

- do:
headers:
Expand All @@ -501,11 +501,11 @@ setup:

- match: { hits.total: 3 }

- match: {hits.hits.0._id: "1"}
- match: {hits.hits.0._score: 248}
- match: {hits.hits.0._id: "3"}
- match: {hits.hits.0._score: 415}

- match: {hits.hits.1._id: "2"}
- match: {hits.hits.1._score: 136}
- match: {hits.hits.1._id: "1"}
- match: {hits.hits.1._score: 168}

- match: {hits.hits.2._id: "3"}
- match: {hits.hits.2._score: 20}
- match: {hits.hits.2._id: "2"}
- match: {hits.hits.2._score: 126}
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
import io.netty.handler.timeout.ReadTimeoutException;
import io.netty.handler.timeout.ReadTimeoutHandler;
import io.netty.util.AttributeKey;
import io.netty.util.ResourceLeakDetector;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
Expand Down Expand Up @@ -410,6 +411,9 @@ protected Result beginEncode(HttpResponse httpResponse, String acceptEncoding) t
}
});
}
if (ResourceLeakDetector.isEnabled()) {
ch.pipeline().addLast(new Netty4LeakDetectionHandler());
}
ch.pipeline()
.addLast(
"pipelining",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/

package org.elasticsearch.http.netty4;

import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.handler.codec.http.HttpContent;
import io.netty.handler.codec.http.HttpRequest;

import org.elasticsearch.tasks.Task;

/**
* Inbound channel handler that enrich leaking buffers information from HTTP request.
* It helps to detect which handler is leaking buffers. Especially integration tests that run with
* paranoid leak detector that samples all buffers for leaking. Supplying informative opaque-id in
* integ test helps to narrow down problem (for example test name).
*/
public class Netty4LeakDetectionHandler extends ChannelInboundHandlerAdapter {

private String info;

@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
if (msg instanceof HttpRequest request) {
var opaqueId = request.headers().get(Task.X_OPAQUE_ID_HTTP_HEADER);
info = "method: " + request.method() + "; uri: " + request.uri() + "; x-opaque-id: " + opaqueId;
}
if (msg instanceof HttpContent content) {
content.touch(info);
}
ctx.fireChannelRead(msg);
}
}
Loading

0 comments on commit 6f36b31

Please sign in to comment.