From a9d19fce29d06b7bbd9b7d816d3292f377dcc590 Mon Sep 17 00:00:00 2001 From: Benedikt Bongartz Date: Fri, 2 Aug 2024 16:09:31 +0200 Subject: [PATCH] update vendor Signed-off-by: Benedikt Bongartz --- go.mod | 161 +- go.sum | 354 +- pkg/.patched-proto/collector/README.md | 10 + .../collector/logs/v1/logs_service.proto | 79 + .../collector/logs/v1/logs_service_http.yaml | 9 + .../metrics/v1/metrics_service.proto | 79 + .../metrics/v1/metrics_service_http.yaml | 9 + .../v1experimental/profiles_service.proto | 78 + .../v1experimental/profiles_service_http.yaml | 9 + .../collector/trace/v1/trace_service.proto | 79 + .../trace/v1/trace_service_http.yaml | 9 + pkg/.patched-proto/common/v1/common.proto | 81 + pkg/.patched-proto/logs/v1/logs.proto | 211 + pkg/.patched-proto/metrics/v1/metrics.proto | 691 ++ .../v1experimental/pprofextended.proto | 386 + .../profiles/v1experimental/profiles.proto | 191 + pkg/.patched-proto/resource/v1/resource.proto | 37 + pkg/.patched-proto/trace/v1/trace.proto | 355 + .../VividCortex/gohistogram/.gitignore | 2 - .../VividCortex/gohistogram/LICENSE | 19 - .../VividCortex/gohistogram/README.md | 80 - .../VividCortex/gohistogram/histogram.go | 23 - .../gohistogram/numerichistogram.go | 160 - .../gohistogram/weightedhistogram.go | 190 - vendor/github.com/go-kit/kit/LICENSE | 22 - .../github.com/go-kit/kit/metrics/README.md | 98 - vendor/github.com/go-kit/kit/metrics/doc.go | 97 - .../go-kit/kit/metrics/expvar/expvar.go | 94 - .../go-kit/kit/metrics/generic/generic.go | 247 - .../kit/metrics/internal/lv/labelvalues.go | 14 - .../go-kit/kit/metrics/internal/lv/space.go | 145 - .../github.com/go-kit/kit/metrics/metrics.go | 25 - vendor/github.com/go-kit/kit/metrics/timer.go | 36 - vendor/github.com/go-logr/logr/README.md | 1 + vendor/github.com/go-logr/logr/funcr/funcr.go | 169 +- .../gax-go/v2/.release-please-manifest.json | 2 +- .../googleapis/gax-go/v2/CHANGES.md | 7 + .../googleapis/gax-go/v2/internal/version.go | 2 +- .../github.com/gorilla/handlers/.editorconfig | 20 + vendor/github.com/gorilla/handlers/.gitignore | 2 + vendor/github.com/gorilla/handlers/LICENSE | 39 +- vendor/github.com/gorilla/handlers/Makefile | 34 + vendor/github.com/gorilla/handlers/README.md | 8 +- .../github.com/gorilla/handlers/canonical.go | 9 +- .../github.com/gorilla/handlers/compress.go | 8 +- vendor/github.com/gorilla/handlers/cors.go | 41 +- .../github.com/gorilla/handlers/handlers.go | 15 +- vendor/github.com/gorilla/handlers/logging.go | 32 +- .../gorilla/handlers/proxy_headers.go | 16 +- .../github.com/gorilla/handlers/recovery.go | 22 +- .../go-grpc-middleware/logging/common.go | 43 - .../go-grpc-middleware/logging/doc.go | 32 - .../logging/settable/doc.go | 16 - .../logging/settable/logsettable.go | 99 - .../logging/zap/client_interceptors.go | 64 - .../go-grpc-middleware/logging/zap/context.go | 21 - .../logging/zap/ctxzap/context.go | 88 - .../logging/zap/ctxzap/doc.go | 14 - .../go-grpc-middleware/logging/zap/doc.go | 75 - .../logging/zap/grpclogger.go | 141 - .../go-grpc-middleware/logging/zap/options.go | 217 - .../logging/zap/payload_interceptors.go | 150 - .../logging/zap/server_interceptors.go | 85 - .../go-grpc-middleware/retry/backoff.go | 44 - .../go-grpc-middleware/tags/context.go | 78 - .../go-grpc-middleware/tags/doc.go | 22 - .../go-grpc-middleware/tags/fieldextractor.go | 85 - .../go-grpc-middleware/tags/interceptors.go | 85 - .../go-grpc-middleware/tags/options.go | 44 - .../util/backoffutils/backoff.go | 28 - .../go-grpc-middleware/util/metautils/doc.go | 19 - .../util/metautils/nicemd.go | 126 - .../go-grpc-middleware/v2/COPYRIGHT | 2 + .../go-grpc-middleware/v2}/LICENSE | 5 +- .../v2/interceptors/retry/backoff.go | 55 + .../{ => v2/interceptors}/retry/doc.go | 14 +- .../{ => v2/interceptors}/retry/options.go | 79 +- .../{ => v2/interceptors}/retry/retry.go | 81 +- .../go-grpc-middleware/v2/metadata/doc.go | 19 + .../v2/metadata/metadata.go | 126 + .../v2/metadata/single_key.go | 21 + .../hashicorp/consul/api/.copywrite.hcl | 8 + .../github.com/hashicorp/consul/api/LICENSE | 303 +- vendor/github.com/hashicorp/consul/api/acl.go | 119 + .../github.com/hashicorp/consul/api/agent.go | 18 + .../hashicorp/consul/api/config_entry.go | 54 +- .../consul/api/config_entry_discoverychain.go | 29 +- .../consul/api/config_entry_exports.go | 4 +- .../consul/api/config_entry_gateways.go | 40 + .../consul/api/config_entry_rate_limit_ip.go | 8 +- .../consul/api/config_entry_routes.go | 40 +- .../consul/api/config_entry_status.go | 19 + .../hashicorp/consul/api/exported_services.go | 49 + .../hashicorp/consul/api/internal.go | 3 + .../hashicorp/consul/api/operator_raft.go | 7 +- .../agent/app/configmanager/grpc/manager.go | 2 +- .../buffered_read_transport.go | 8 +- .../agent/app/processors/thrift_processor.go | 2 +- .../cmd/agent/app/reporter/client_metrics.go | 2 +- .../jaeger/cmd/agent/app/reporter/flags.go | 2 +- .../cmd/agent/app/reporter/grpc/builder.go | 4 +- .../cmd/agent/app/reporter/grpc/flags.go | 12 +- .../cmd/agent/app/servers/tbuffered_server.go | 2 +- .../agent/app/servers/thriftudp/transport.go | 4 +- .../factory.go | 11 +- .../interface.go | 12 +- .../app/sanitizer/zipkin/span_sanitizer.go | 8 +- .../jaeger/cmd/internal/flags/.nocover | 2 - .../jaeger/cmd/internal/flags/admin.go | 6 +- .../jaeger/cmd/internal/flags/flags.go | 14 +- .../jaeger/cmd/internal/flags/service.go | 20 +- .../jaeger/internal/metrics/expvar/cache.go | 83 - .../jaeger/internal/metrics/expvar/factory.go | 134 - .../jaeger/internal/metrics/expvar/metrics.go | 82 - .../metrics/metricsbuilder/builder.go | 13 +- .../internal/metrics/prometheus/cache.go | 2 +- .../internal/metrics/prometheus/factory.go | 4 +- .../model/converter/json/from_domain.go | 8 +- .../converter/thrift/jaeger/from_domain.go | 4 +- .../converter/thrift/jaeger/to_domain.go | 4 +- .../converter/thrift/zipkin/to_domain.go | 24 +- .../jaeger/model/dependencies.go | 7 +- .../jaegertracing/jaeger/model/ids.go | 12 +- .../jaegertracing/jaeger/model/json/model.go | 6 +- .../jaegertracing/jaeger/model/keyvalue.go | 4 +- .../pkg/clientcfg/clientcfghttp/cfgmgr.go | 8 +- .../pkg/clientcfg/clientcfghttp/handler.go | 2 +- .../jaeger/pkg/config/tlscfg/options.go | 68 +- .../discovery/grpcresolver/grpc_resolver.go | 4 +- .../jaeger/pkg/fswatcher/fswatcher.go | 6 +- .../jaeger/pkg/gogocodec/codec.go | 6 +- .../jaeger/pkg/healthcheck/handler.go | 2 +- .../jaeger/pkg/metrics/factory.go | 10 +- .../jaeger/pkg/metrics/metrics.go | 6 +- .../jaeger/pkg/recoveryhandler/zap.go | 2 +- .../jaeger/pkg/tenancy/context.go | 43 - .../jaegertracing/jaeger/pkg/tenancy/flags.go | 54 - .../jaegertracing/jaeger/pkg/tenancy/grpc.go | 149 - .../jaegertracing/jaeger/pkg/tenancy/http.go | 65 - .../jaeger/pkg/tenancy/manager.go | 91 - .../jaeger/pkg/version/command.go | 2 +- .../jaegertracing/jaeger/plugin/README.md | 3 - .../jaeger/plugin/configurable.go | 33 - .../jaegertracing/jaeger/plugin/doc.go | 19 - .../jaeger/plugin/storage/grpc/README.md | 243 - .../plugin/storage/grpc/config/config.go | 230 - .../jaeger/plugin/storage/grpc/factory.go | 177 - .../jaeger/plugin/storage/grpc/grpc.go | 46 - .../jaeger/plugin/storage/grpc/options.go | 83 - .../plugin/storage/grpc/shared/archive.go | 8 +- .../plugin/storage/grpc/shared/grpc_client.go | 45 +- .../storage/grpc/shared/grpc_handler.go | 26 +- .../plugin/storage/grpc/shared/interface.go | 16 - .../plugin/storage/grpc/shared/plugin.go | 53 - .../jaegertracing/jaeger/storage/factory.go | 3 +- .../storage/spanstore/downsampling_writer.go | 2 +- .../klauspost/compress/flate/matchlen_amd64.s | 10 +- .../klauspost/compress/s2/decode_arm64.s | 2 +- .../github.com/klauspost/compress/s2/index.go | 10 +- vendor/github.com/klauspost/compress/s2/s2.go | 6 +- .../klauspost/compress/s2/writer.go | 26 +- .../klauspost/compress/zstd/dict.go | 31 + .../zstd/internal/xxhash/xxhash_arm64.s | 4 +- .../klauspost/compress/zstd/matchlen_amd64.s | 10 +- .../go-grpc-compression/internal/zstd/zstd.go | 146 + .../nonclobbering/zstd/zstd.go | 52 + .../exporter/kafkaexporter/README.md | 1 + .../exporter/kafkaexporter/factory.go | 6 +- .../exporter/kafkaexporter/kafka_exporter.go | 6 +- .../common/localhostgate/featuregate.go | 6 +- .../internal/coreinternal/parseutils/uri.go | 164 + .../internal/kafka/authentication.go | 18 +- .../receiver/jaegerreceiver/README.md | 11 +- .../receiver/jaegerreceiver/factory.go | 2 +- .../receiver/jaegerreceiver/trace_receiver.go | 4 +- .../receiver/kafkareceiver/README.md | 4 +- .../receiver/kafkareceiver/documentation.md | 71 + .../receiver/kafkareceiver/factory.go | 9 +- .../internal/metadata/generated_telemetry.go | 96 +- .../receiver/kafkareceiver/kafka_receiver.go | 137 +- .../receiver/kafkareceiver/metadata.yaml | 62 +- .../receiver/kafkareceiver/metrics.go | 107 - .../receiver/zipkinreceiver/README.md | 2 +- .../receiver/zipkinreceiver/factory.go | 4 +- .../receiver/zipkinreceiver/trace_receiver.go | 4 +- .../pelletier/go-toml/v2/.gitignore | 3 +- .../pelletier/go-toml/v2/CONTRIBUTING.md | 31 +- .../github.com/pelletier/go-toml/v2/README.md | 117 +- .../pelletier/go-toml/v2/SECURITY.md | 3 - vendor/github.com/pelletier/go-toml/v2/ci.sh | 22 +- .../go-toml/v2/internal/tracker/seen.go | 74 +- .../pelletier/go-toml/v2/marshaler.go | 39 +- .../pelletier/go-toml/v2/unmarshaler.go | 57 +- .../go-toml/v2/unstable/unmarshaler.go | 7 + .../prometheus/common/config/config.go | 4 +- .../prometheus/common/config/headers.go | 140 + .../prometheus/common/config/http_config.go | 724 +- .../prometheus/common/expfmt/encode.go | 3 +- .../bitbucket.org/ww/goautoneg/README.txt | 67 - .../bitbucket.org/ww/goautoneg/autoneg.go | 160 - .../prometheus/procfs/.golangci.yml | 7 + .../prometheus/procfs/Makefile.common | 2 +- .../shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go | 92 - .../shirou/gopsutil/v3/net/net_linux_111.go | 12 - .../shirou/gopsutil/v3/net/net_linux_116.go | 12 - .../shirou/gopsutil/{v3 => v4}/LICENSE | 0 .../shirou/gopsutil/{v3 => v4}/common/env.go | 1 + .../shirou/gopsutil/{v3 => v4}/cpu/cpu.go | 3 +- .../shirou/gopsutil/{v3 => v4}/cpu/cpu_aix.go | 2 +- .../gopsutil/{v3 => v4}/cpu/cpu_aix_cgo.go | 2 +- .../shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go | 157 + .../gopsutil/{v3 => v4}/cpu/cpu_darwin.go | 2 +- .../gopsutil/{v3 => v4}/cpu/cpu_darwin_cgo.go | 2 +- .../{v3 => v4}/cpu/cpu_darwin_nocgo.go | 4 +- .../gopsutil/{v3 => v4}/cpu/cpu_dragonfly.go | 3 +- .../{v3 => v4}/cpu/cpu_dragonfly_amd64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_fallback.go | 4 +- .../gopsutil/{v3 => v4}/cpu/cpu_freebsd.go | 3 +- .../{v3 => v4}/cpu/cpu_freebsd_386.go | 1 + .../{v3 => v4}/cpu/cpu_freebsd_amd64.go | 1 + .../{v3 => v4}/cpu/cpu_freebsd_arm.go | 1 + .../{v3 => v4}/cpu/cpu_freebsd_arm64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_linux.go | 4 +- .../gopsutil/{v3 => v4}/cpu/cpu_netbsd.go | 4 +- .../{v3 => v4}/cpu/cpu_netbsd_amd64.go | 1 + .../{v3 => v4}/cpu/cpu_netbsd_arm64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_openbsd.go | 4 +- .../{v3 => v4}/cpu/cpu_openbsd_386.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_amd64.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_arm.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_arm64.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_riscv64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_plan9.go | 4 +- .../gopsutil/{v3 => v4}/cpu/cpu_solaris.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_windows.go | 8 +- .../{v3 => v4}/internal/common/binary.go | 1 + .../{v3 => v4}/internal/common/common.go | 3 +- .../internal/common/common_darwin.go | 2 +- .../internal/common/common_freebsd.go | 2 +- .../internal/common/common_linux.go | 2 +- .../internal/common/common_netbsd.go | 2 +- .../internal/common/common_openbsd.go | 2 +- .../{v3 => v4}/internal/common/common_unix.go | 2 +- .../internal/common/common_windows.go | 2 +- .../{v3 => v4}/internal/common/endian.go | 1 + .../{v3 => v4}/internal/common/sleep.go | 1 + .../{v3 => v4}/internal/common/warnings.go | 1 + .../shirou/gopsutil/v4/mem/ex_linux.go | 40 + .../shirou/gopsutil/v4/mem/ex_windows.go | 39 + .../shirou/gopsutil/{v3 => v4}/mem/mem.go | 3 +- .../shirou/gopsutil/{v3 => v4}/mem/mem_aix.go | 2 +- .../gopsutil/{v3 => v4}/mem/mem_aix_cgo.go | 2 +- .../gopsutil/{v3 => v4}/mem/mem_aix_nocgo.go | 12 +- .../shirou/gopsutil/{v3 => v4}/mem/mem_bsd.go | 2 +- .../gopsutil/{v3 => v4}/mem/mem_darwin.go | 4 +- .../gopsutil/{v3 => v4}/mem/mem_darwin_cgo.go | 2 +- .../{v3 => v4}/mem/mem_darwin_nocgo.go | 2 +- .../gopsutil/{v3 => v4}/mem/mem_fallback.go | 4 +- .../gopsutil/{v3 => v4}/mem/mem_freebsd.go | 4 +- .../gopsutil/{v3 => v4}/mem/mem_linux.go | 36 +- .../gopsutil/{v3 => v4}/mem/mem_netbsd.go | 2 +- .../gopsutil/{v3 => v4}/mem/mem_openbsd.go | 4 +- .../{v3 => v4}/mem/mem_openbsd_386.go | 2 +- .../{v3 => v4}/mem/mem_openbsd_amd64.go | 1 + .../{v3 => v4}/mem/mem_openbsd_arm.go | 2 +- .../{v3 => v4}/mem/mem_openbsd_arm64.go | 2 +- .../{v3 => v4}/mem/mem_openbsd_riscv64.go | 2 +- .../gopsutil/{v3 => v4}/mem/mem_plan9.go | 4 +- .../gopsutil/{v3 => v4}/mem/mem_solaris.go | 4 +- .../gopsutil/{v3 => v4}/mem/mem_windows.go | 4 +- .../shirou/gopsutil/{v3 => v4}/net/net.go | 3 +- .../shirou/gopsutil/{v3 => v4}/net/net_aix.go | 4 +- .../gopsutil/{v3 => v4}/net/net_aix_cgo.go | 2 +- .../gopsutil/{v3 => v4}/net/net_aix_nocgo.go | 4 +- .../gopsutil/{v3 => v4}/net/net_darwin.go | 4 +- .../gopsutil/{v3 => v4}/net/net_fallback.go | 4 +- .../gopsutil/{v3 => v4}/net/net_freebsd.go | 4 +- .../gopsutil/{v3 => v4}/net/net_linux.go | 6 +- .../gopsutil/{v3 => v4}/net/net_openbsd.go | 7 +- .../gopsutil/{v3 => v4}/net/net_solaris.go | 4 +- .../gopsutil/{v3 => v4}/net/net_unix.go | 4 +- .../gopsutil/{v3 => v4}/net/net_windows.go | 4 +- .../gopsutil/{v3 => v4}/process/process.go | 33 +- .../{v3 => v4}/process/process_bsd.go | 6 +- .../{v3 => v4}/process/process_darwin.go | 20 +- .../process/process_darwin_amd64.go | 1 + .../process/process_darwin_arm64.go | 2 +- .../{v3 => v4}/process/process_darwin_cgo.go | 4 +- .../process/process_darwin_nocgo.go | 13 +- .../{v3 => v4}/process/process_fallback.go | 14 +- .../{v3 => v4}/process/process_freebsd.go | 26 +- .../{v3 => v4}/process/process_freebsd_386.go | 1 + .../process/process_freebsd_amd64.go | 1 + .../{v3 => v4}/process/process_freebsd_arm.go | 1 + .../process/process_freebsd_arm64.go | 1 + .../{v3 => v4}/process/process_linux.go | 40 +- .../{v3 => v4}/process/process_openbsd.go | 35 +- .../{v3 => v4}/process/process_openbsd_386.go | 3 +- .../process/process_openbsd_amd64.go | 2 + .../{v3 => v4}/process/process_openbsd_arm.go | 3 +- .../process/process_openbsd_arm64.go | 3 +- .../process/process_openbsd_riscv64.go | 3 +- .../{v3 => v4}/process/process_plan9.go | 14 +- .../{v3 => v4}/process/process_posix.go | 4 +- .../{v3 => v4}/process/process_solaris.go | 13 +- .../{v3 => v4}/process/process_windows.go | 14 +- .../process/process_windows_32bit.go | 4 +- .../process/process_windows_64bit.go | 4 +- vendor/github.com/spf13/cobra/.golangci.yml | 21 +- vendor/github.com/spf13/cobra/active_help.go | 13 +- vendor/github.com/spf13/cobra/args.go | 4 +- .../spf13/cobra/bash_completions.go | 25 +- vendor/github.com/spf13/cobra/cobra.go | 2 - vendor/github.com/spf13/cobra/command.go | 51 +- vendor/github.com/spf13/cobra/completions.go | 62 +- vendor/github.com/spf13/cobra/flag_groups.go | 34 +- .../spf13/cobra/powershell_completions.go | 4 +- vendor/github.com/spf13/viper/README.md | 8 +- .../github.com/spf13/viper/TROUBLESHOOTING.md | 4 +- vendor/github.com/spf13/viper/flake.lock | 62 +- vendor/github.com/spf13/viper/flake.nix | 1 + vendor/github.com/spf13/viper/viper.go | 8 +- .../go.etcd.io/etcd/api/v3/version/version.go | 2 +- .../etcd/client/pkg/v3/fileutil/purge.go | 33 +- .../collector/component/component.go | 10 +- .../component/componenttest/obsreporttest.go | 12 +- .../componenttest/otelprometheuschecker.go | 18 +- .../collector/component/config.go | 10 +- .../collector/component/host.go | 2 +- .../collector/component/telemetry.go | 7 +- .../collector/config/configauth/configauth.go | 18 +- .../collector/config/configgrpc/configgrpc.go | 16 +- .../config/configgrpc/internal/zstd.go | 83 - .../collector/config/confighttp/README.md | 6 + .../config/confighttp/compression.go | 106 +- .../collector/config/confighttp/confighttp.go | 38 +- .../collector/config/configtls/README.md | 1 + .../collector/confmap/README.md | 57 + .../collector/confmap/confmap.go | 4 +- .../confmap/converter/expandconverter/LICENSE | 202 - .../converter/expandconverter/Makefile | 1 - .../converter/expandconverter/expand.go | 114 - .../collector/confmap/expand.go | 43 +- .../confmap/internal/envvar/pattern.go | 10 - .../collector/confmap/internal/featuregate.go | 14 + .../confmap/internal/mapstructure/encoder.go | 33 + .../collector/confmap/provider.go | 55 +- .../confmap/provider/envprovider/Makefile | 1 - .../confmap/provider/envprovider/provider.go | 66 - .../confmap/provider/fileprovider/LICENSE | 202 - .../confmap/provider/fileprovider/Makefile | 1 - .../confmap/provider/fileprovider/provider.go | 64 - .../confmap/provider/httpprovider/LICENSE | 202 - .../confmap/provider/httpprovider/Makefile | 1 - .../confmap/provider/httpprovider/README.md | 13 - .../confmap/provider/httpprovider/provider.go | 22 - .../confmap/provider/httpsprovider/LICENSE | 202 - .../confmap/provider/httpsprovider/Makefile | 1 - .../confmap/provider/httpsprovider/README.md | 18 - .../provider/httpsprovider/provider.go | 23 - .../configurablehttpprovider/provider.go | 121 - .../confmap/provider/internal/provider.go | 21 - .../confmap/provider/yamlprovider/LICENSE | 202 - .../confmap/provider/yamlprovider/Makefile | 1 - .../confmap/provider/yamlprovider/provider.go | 50 - .../collector/connector/connector.go | 79 +- .../collector/exporter/exporter.go | 33 +- .../exporter/exporterhelper/batch_sender.go | 67 +- .../exporter/exporterhelper/common.go | 10 +- .../exporter/exporterhelper/documentation.md | 16 + .../internal/metadata/generated_telemetry.go | 70 +- .../collector/exporter/exporterhelper/logs.go | 4 +- .../exporter/exporterhelper/metadata.yaml | 18 + .../exporter/exporterhelper/metrics.go | 4 +- .../exporter/exporterhelper/obsexporter.go | 15 +- .../exporter/exporterhelper/queue_sender.go | 73 +- .../exporter/exporterhelper/retry_sender.go | 2 +- .../exporter/exporterhelper/traces.go | 4 +- .../collector/exporter/exporterqueue/queue.go | 2 +- .../exporter/exportertest/contract_checker.go | 12 +- .../exporter/exportertest/nop_exporter.go | 17 +- .../internal/queue/persistent_queue.go | 2 +- .../collector/exporter/otlpexporter/README.md | 3 +- .../collector/exporter/otlpexporter/config.go | 6 +- .../exporter/otlpexporter/factory.go | 6 +- .../exporter/otlpexporter/metadata.yaml | 2 +- .../collector/exporter/otlpexporter/otlp.go | 6 +- .../collector/extension/extension.go | 15 +- .../internal/featuregates/featuregates.go | 11 + .../internal/localhostgate/featuregate.go | 6 +- .../collector/otelcol/command.go | 25 +- .../collector/otelcol/configprovider.go | 22 - .../v1experimental/profiles_service.pb.go | 845 ++ .../v1experimental/pprofextended.pb.go | 4919 +++++++++ .../profiles/v1experimental/profiles.pb.go | 1482 +++ .../pdata/internal/wrapper_profiles.go | 46 + .../collector/processor/processor.go | 31 +- .../processorhelper/documentation.md | 24 + .../internal/metadata/generated_telemetry.go | 49 +- .../processor/processorhelper/logs.go | 2 +- .../processor/processorhelper/metadata.yaml | 26 +- .../processor/processorhelper/metrics.go | 2 +- .../processor/processorhelper/obsreport.go | 47 +- .../processor/processorhelper/traces.go | 2 +- .../collector/receiver/otlpreceiver/README.md | 3 +- .../receiver/otlpreceiver/factory.go | 6 +- .../receiver/otlpreceiver/metadata.yaml | 2 +- .../collector/receiver/otlpreceiver/otlp.go | 4 +- .../collector/receiver/receiver.go | 31 +- .../internal/metadata/generated_telemetry.go | 22 +- .../receiver/receiverhelper/obsreport.go | 5 +- .../receiver/receivertest/contract_checker.go | 6 +- .../receiver/receivertest/nop_receiver.go | 17 +- .../collector/semconv/v1.25.0/doc.go | 9 + .../v1.25.0/generated_attribute_group.go | 4796 +++++++++ .../semconv/v1.25.0/generated_event.go | 105 + .../semconv/v1.25.0/generated_resource.go | 242 + .../semconv/v1.25.0/generated_trace.go | 245 + .../collector/semconv/v1.25.0/schema.go | 9 + .../collector/service/documentation.md | 55 + .../service/extensions/extensions.go | 2 +- .../collector/service/internal/graph/graph.go | 18 +- .../collector/service/internal/graph/nodes.go | 8 +- .../internal/metadata/generated_telemetry.go | 179 + .../service/internal/proctelemetry/config.go | 3 +- .../proctelemetry/process_telemetry.go | 98 +- .../servicetelemetry/telemetry_settings.go | 2 +- .../service/internal/status/status.go | 20 +- .../collector/service/metadata.yaml | 68 + .../collector/service/service.go | 46 +- .../service/telemetry/internal/factory.go | 16 +- .../collector/service/telemetry/telemetry.go | 2 +- .../google.golang.org/grpc/otelgrpc/config.go | 2 +- .../grpc/otelgrpc/interceptor.go | 5 +- .../grpc/otelgrpc/stats_handler.go | 13 +- .../grpc/otelgrpc/version.go | 2 +- .../net/http/otelhttp/client.go | 2 +- .../net/http/otelhttp/config.go | 2 +- .../net/http/otelhttp/handler.go | 17 +- .../net/http/otelhttp/internal/semconv/env.go | 69 +- .../http/otelhttp/internal/semconv/util.go | 42 + .../http/otelhttp/internal/semconv/v1.20.0.go | 7 +- .../http/otelhttp/internal/semconv/v1.24.0.go | 197 + .../otelhttp/internal/semconvutil/netconv.go | 4 +- .../net/http/otelhttp/labeler.go | 8 +- .../net/http/otelhttp/transport.go | 20 +- .../net/http/otelhttp/version.go | 2 +- .../instrumentation/net/http/otelhttp/wrap.go | 10 + .../go.opentelemetry.io/otel/.codespellignore | 2 + vendor/go.opentelemetry.io/otel/.codespellrc | 2 +- vendor/go.opentelemetry.io/otel/.gitmodules | 3 - vendor/go.opentelemetry.io/otel/.golangci.yml | 6 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 57 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 4 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 8 +- vendor/go.opentelemetry.io/otel/Makefile | 19 +- .../otel/baggage/baggage.go | 6 +- .../otlptracegrpc/internal/retry/retry.go | 2 +- .../otlp/otlptrace/otlptracegrpc/options.go | 10 +- .../otlp/otlptrace/otlptracehttp/client.go | 5 +- .../otlptracehttp/internal/retry/retry.go | 2 +- .../otel/exporters/otlp/otlptrace/version.go | 2 +- .../otel/exporters/prometheus/exporter.go | 2 +- .../exporters/stdout/stdouttrace/config.go | 4 +- .../exporters/stdout/stdouttrace/trace.go | 5 +- .../otel/internal/attribute/attribute.go | 24 +- .../otel/internal/global/meter.go | 1 + .../otel/internal/global/trace.go | 6 +- vendor/go.opentelemetry.io/otel/metric/doc.go | 18 + .../go.opentelemetry.io/otel/metric/meter.go | 56 + .../go.opentelemetry.io/otel/requirements.txt | 2 +- .../otel/sdk/internal/gen.go | 18 - .../otel/sdk/internal/internal.go | 17 - .../otel/sdk/internal/x/README.md | 46 + .../otel/sdk/internal/x/x.go | 66 + .../otel/sdk/metric/config.go | 2 +- .../otel/sdk/metric/exemplar.go | 92 +- .../otel/sdk/metric/instrument.go | 13 +- .../metric/internal/aggregate/aggregate.go | 4 +- .../aggregate/exponential_histogram.go | 10 +- .../metric/internal/aggregate/histogram.go | 12 +- .../metric/internal/aggregate/lastvalue.go | 38 +- .../otel/sdk/metric/internal/aggregate/sum.go | 14 +- .../otel/sdk/metric/internal/exemplar/drop.go | 11 +- .../sdk/metric/internal/exemplar/filter.go | 28 +- .../internal/exemplar/filtered_reservoir.go | 49 + .../otel/sdk/metric/internal/exemplar/rand.go | 18 +- .../otel/sdk/metric/periodic_reader.go | 2 +- .../otel/sdk/metric/pipeline.go | 2 +- .../otel/sdk/metric/version.go | 2 +- .../otel/sdk/metric/view.go | 2 +- .../otel/sdk/resource/builtin.go | 23 +- .../otel/sdk/resource/container.go | 2 +- .../otel/sdk/resource/env.go | 2 +- .../otel/sdk/resource/host_id.go | 2 +- .../otel/sdk/resource/os.go | 2 +- .../otel/sdk/resource/process.go | 2 +- .../otel/sdk/resource/resource.go | 11 +- .../otel/sdk/trace/batch_span_processor.go | 2 +- .../otel/sdk/trace/evictedqueue.go | 36 +- .../otel/sdk/trace/id_generator.go | 21 +- .../otel/sdk/trace/provider.go | 2 +- .../otel/sdk/trace/span.go | 69 +- .../otel/sdk/trace/tracer.go | 4 +- .../go.opentelemetry.io/otel/sdk/version.go | 2 +- .../otel/semconv/v1.24.0/README.md | 3 + .../otel/semconv/v1.24.0/attribute_group.go | 4387 ++++++++ .../otel/semconv/v1.24.0/doc.go | 9 + .../otel/semconv/v1.24.0/event.go | 200 + .../otel/semconv/v1.24.0/exception.go | 9 + .../otel/semconv/v1.24.0/metric.go | 1071 ++ .../otel/semconv/v1.24.0/resource.go | 2545 +++++ .../otel/semconv/v1.24.0/schema.go | 9 + .../otel/semconv/v1.24.0/trace.go | 1323 +++ .../otel/semconv/v1.26.0/README.md | 3 + .../otel/semconv/v1.26.0/attribute_group.go | 8996 +++++++++++++++++ .../otel/semconv/v1.26.0/doc.go | 9 + .../otel/semconv/v1.26.0/exception.go | 9 + .../otel/semconv/v1.26.0/metric.go | 1307 +++ .../otel/semconv/v1.26.0/schema.go | 9 + .../otel/trace/noop/noop.go | 4 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 6 +- vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 2 +- vendor/golang.org/x/crypto/blowfish/cipher.go | 2 +- .../chacha20poly1305/chacha20poly1305.go | 2 +- .../x/crypto/cryptobyte/asn1/asn1.go | 2 +- .../golang.org/x/crypto/cryptobyte/string.go | 2 +- vendor/golang.org/x/crypto/hkdf/hkdf.go | 2 +- vendor/golang.org/x/crypto/md4/md4.go | 2 +- vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 2 +- vendor/golang.org/x/net/http2/transport.go | 4 - vendor/golang.org/x/oauth2/google/google.go | 5 +- vendor/golang.org/x/sys/unix/mremap.go | 5 + .../golang.org/x/sys/unix/syscall_darwin.go | 12 + vendor/golang.org/x/sys/unix/syscall_unix.go | 9 + .../x/sys/unix/zsyscall_darwin_amd64.go | 33 + .../x/sys/unix/zsyscall_darwin_amd64.s | 10 + .../x/sys/unix/zsyscall_darwin_arm64.go | 33 + .../x/sys/unix/zsyscall_darwin_arm64.s | 10 + .../x/sys/windows/security_windows.go | 24 +- .../golang.org/x/sys/windows/svc/service.go | 5 +- .../x/sys/windows/zsyscall_windows.go | 9 + .../iamcredentials/v1/iamcredentials-api.json | 4 +- .../api/internal/settings.go | 9 +- .../google.golang.org/api/internal/version.go | 2 +- .../api/storage/v1/storage-api.json | 12 +- .../api/storage/v1/storage-gen.go | 27 +- vendor/google.golang.org/grpc/README.md | 2 +- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 2 +- .../grpc_lb_v1/load_balancer_grpc.pb.go | 2 +- .../grpc/balancer/grpclb/grpclb_config.go | 4 +- .../grpc/balancer/grpclb/grpclb_picker.go | 6 +- .../{ => balancer/pickfirst}/pickfirst.go | 32 +- .../grpc/balancer/roundrobin/roundrobin.go | 4 +- .../grpc/balancer_wrapper.go | 4 + .../grpc_binarylog_v1/binarylog.pb.go | 2 +- vendor/google.golang.org/grpc/clientconn.go | 66 +- .../internal/proto/grpc_gcp/altscontext.pb.go | 2 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 2 +- .../proto/grpc_gcp/handshaker_grpc.pb.go | 2 +- .../grpc_gcp/transport_security_common.pb.go | 2 +- .../google.golang.org/grpc/credentials/tls.go | 34 +- vendor/google.golang.org/grpc/dialoptions.go | 46 + .../grpc/health/grpc_health_v1/health.pb.go | 2 +- .../health/grpc_health_v1/health_grpc.pb.go | 10 +- .../grpc/internal/backoff/backoff.go | 4 +- .../grpc/internal/envconfig/envconfig.go | 6 + .../grpc/internal/grpcrand/grpcrand.go | 100 - .../grpc/internal/grpcrand/grpcrand_go1.21.go | 73 - .../grpc/internal/internal.go | 37 +- .../internal/resolver/dns/dns_resolver.go | 14 +- .../resolver/dns/internal/internal.go | 13 +- .../grpc/internal/transport/http2_server.go | 4 +- .../grpc/metadata/metadata.go | 15 - .../google.golang.org/grpc/picker_wrapper.go | 81 +- .../grpc_reflection_v1/reflection.pb.go | 2 +- .../grpc_reflection_v1/reflection_grpc.pb.go | 2 +- .../grpc_reflection_v1alpha/reflection.pb.go | 2 +- .../reflection_grpc.pb.go | 2 +- .../grpc/resolver_wrapper.go | 2 +- .../google.golang.org/grpc/service_config.go | 24 +- vendor/google.golang.org/grpc/stream.go | 4 +- vendor/google.golang.org/grpc/version.go | 2 +- .../protobuf/encoding/protojson/decode.go | 4 +- .../protobuf/encoding/prototext/decode.go | 4 +- .../protobuf/internal/encoding/json/decode.go | 2 +- .../protobuf/internal/encoding/text/decode.go | 2 +- .../protobuf/internal/errors/errors.go | 6 +- .../protobuf/internal/filedesc/desc.go | 4 + .../protobuf/internal/filedesc/desc_init.go | 2 +- .../protobuf/internal/filedesc/desc_lazy.go | 5 + .../protobuf/internal/filetype/build.go | 4 +- .../protobuf/internal/genid/descriptor_gen.go | 3 + .../protobuf/internal/impl/api_export.go | 6 +- .../protobuf/internal/impl/checkinit.go | 2 +- .../protobuf/internal/impl/codec_extension.go | 22 + .../internal/impl/codec_messageset.go | 22 + .../protobuf/internal/impl/convert.go | 2 +- .../protobuf/internal/impl/convert_list.go | 2 +- .../protobuf/internal/impl/convert_map.go | 2 +- .../protobuf/internal/impl/encode.go | 48 +- .../protobuf/internal/impl/extension.go | 8 +- .../protobuf/internal/impl/legacy_enum.go | 2 +- .../protobuf/internal/impl/legacy_message.go | 4 +- .../protobuf/internal/impl/message.go | 8 +- .../protobuf/internal/impl/message_reflect.go | 14 +- .../internal/impl/message_reflect_gen.go | 4 +- .../protobuf/internal/impl/pointer_reflect.go | 6 +- .../protobuf/internal/impl/pointer_unsafe.go | 4 +- .../protobuf/internal/order/range.go | 4 +- .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/extension.go | 6 +- .../reflect/protodesc/desc_resolve.go | 5 + .../reflect/protodesc/desc_validate.go | 12 - .../reflect/protoreflect/source_gen.go | 2 + .../protobuf/reflect/protoreflect/type.go | 6 +- .../reflect/protoreflect/value_pure.go | 14 +- .../reflect/protoreflect/value_union.go | 14 +- .../protoreflect/value_unsafe_go120.go | 6 +- .../protoreflect/value_unsafe_go121.go | 8 +- .../reflect/protoregistry/registry.go | 14 +- .../types/descriptorpb/descriptor.pb.go | 995 +- .../types/gofeaturespb/go_features.pb.go | 44 +- .../protobuf/types/known/anypb/any.pb.go | 4 +- .../types/known/durationpb/duration.pb.go | 4 +- .../protobuf/types/known/emptypb/empty.pb.go | 4 +- .../types/known/fieldmaskpb/field_mask.pb.go | 4 +- .../types/known/structpb/struct.pb.go | 50 +- .../types/known/timestamppb/timestamp.pb.go | 4 +- .../types/known/wrapperspb/wrappers.pb.go | 20 +- vendor/modules.txt | 245 +- 632 files changed, 42259 insertions(+), 10355 deletions(-) create mode 100644 pkg/.patched-proto/collector/README.md create mode 100644 pkg/.patched-proto/collector/logs/v1/logs_service.proto create mode 100644 pkg/.patched-proto/collector/logs/v1/logs_service_http.yaml create mode 100644 pkg/.patched-proto/collector/metrics/v1/metrics_service.proto create mode 100644 pkg/.patched-proto/collector/metrics/v1/metrics_service_http.yaml create mode 100644 pkg/.patched-proto/collector/profiles/v1experimental/profiles_service.proto create mode 100644 pkg/.patched-proto/collector/profiles/v1experimental/profiles_service_http.yaml create mode 100644 pkg/.patched-proto/collector/trace/v1/trace_service.proto create mode 100644 pkg/.patched-proto/collector/trace/v1/trace_service_http.yaml create mode 100644 pkg/.patched-proto/common/v1/common.proto create mode 100644 pkg/.patched-proto/logs/v1/logs.proto create mode 100644 pkg/.patched-proto/metrics/v1/metrics.proto create mode 100644 pkg/.patched-proto/profiles/v1experimental/pprofextended.proto create mode 100644 pkg/.patched-proto/profiles/v1experimental/profiles.proto create mode 100644 pkg/.patched-proto/resource/v1/resource.proto create mode 100644 pkg/.patched-proto/trace/v1/trace.proto delete mode 100644 vendor/github.com/VividCortex/gohistogram/.gitignore delete mode 100644 vendor/github.com/VividCortex/gohistogram/LICENSE delete mode 100644 vendor/github.com/VividCortex/gohistogram/README.md delete mode 100644 vendor/github.com/VividCortex/gohistogram/histogram.go delete mode 100644 vendor/github.com/VividCortex/gohistogram/numerichistogram.go delete mode 100644 vendor/github.com/VividCortex/gohistogram/weightedhistogram.go delete mode 100644 vendor/github.com/go-kit/kit/LICENSE delete mode 100644 vendor/github.com/go-kit/kit/metrics/README.md delete mode 100644 vendor/github.com/go-kit/kit/metrics/doc.go delete mode 100644 vendor/github.com/go-kit/kit/metrics/expvar/expvar.go delete mode 100644 vendor/github.com/go-kit/kit/metrics/generic/generic.go delete mode 100644 vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go delete mode 100644 vendor/github.com/go-kit/kit/metrics/internal/lv/space.go delete mode 100644 vendor/github.com/go-kit/kit/metrics/metrics.go delete mode 100644 vendor/github.com/go-kit/kit/metrics/timer.go create mode 100644 vendor/github.com/gorilla/handlers/.editorconfig create mode 100644 vendor/github.com/gorilla/handlers/.gitignore create mode 100644 vendor/github.com/gorilla/handlers/Makefile delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/common.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/logsettable.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/client_interceptors.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/context.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/context.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/grpclogger.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/options.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/payload_interceptors.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/server_interceptors.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT rename vendor/{go.opentelemetry.io/collector/confmap/provider/envprovider => github.com/grpc-ecosystem/go-grpc-middleware/v2}/LICENSE (99%) create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/backoff.go rename vendor/github.com/grpc-ecosystem/go-grpc-middleware/{ => v2/interceptors}/retry/doc.go (68%) rename vendor/github.com/grpc-ecosystem/go-grpc-middleware/{ => v2/interceptors}/retry/options.go (66%) rename vendor/github.com/grpc-ecosystem/go-grpc-middleware/{ => v2/interceptors}/retry/retry.go (80%) create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/metadata.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/single_key.go create mode 100644 vendor/github.com/hashicorp/consul/api/.copywrite.hcl create mode 100644 vendor/github.com/hashicorp/consul/api/exported_services.go rename vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/{strategystore => samplingstrategy}/factory.go (80%) rename vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/{strategystore => samplingstrategy}/interface.go (80%) delete mode 100644 vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/.nocover delete mode 100644 vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/cache.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/factory.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/metrics.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/pkg/tenancy/context.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/pkg/tenancy/flags.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/pkg/tenancy/grpc.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/pkg/tenancy/http.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/pkg/tenancy/manager.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/plugin/README.md delete mode 100644 vendor/github.com/jaegertracing/jaeger/plugin/configurable.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/plugin/doc.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/README.md delete mode 100644 vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/config/config.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/factory.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/grpc.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/options.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/plugin.go create mode 100644 vendor/github.com/mostynb/go-grpc-compression/internal/zstd/zstd.go create mode 100644 vendor/github.com/mostynb/go-grpc-compression/nonclobbering/zstd/zstd.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/uri.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/documentation.md delete mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metrics.go create mode 100644 vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go create mode 100644 vendor/github.com/prometheus/common/config/headers.go delete mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt delete mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/LICENSE (100%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/common/env.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_aix.go (85%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_aix_cgo.go (96%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_darwin.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_darwin_cgo.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_darwin_nocgo.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_dragonfly.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_dragonfly_amd64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_fallback.go (83%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_386.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_amd64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_arm.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_arm64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_linux.go (99%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_netbsd.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_netbsd_amd64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_netbsd_arm64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_386.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_amd64.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_arm.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_arm64.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_riscv64.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_plan9.go (91%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_solaris.go (99%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_windows.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/binary.go (99%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common.go (99%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_darwin.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_freebsd.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_linux.go (99%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_netbsd.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_openbsd.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_unix.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_windows.go (99%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/endian.go (88%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/sleep.go (89%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/warnings.go (92%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_aix.go (86%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_aix_cgo.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_aix_nocgo.go (83%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_bsd.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_darwin.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_darwin_cgo.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_darwin_nocgo.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_fallback.go (86%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_freebsd.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_linux.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_netbsd.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_386.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_amd64.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_arm.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_arm64.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_riscv64.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_plan9.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_solaris.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_windows.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_aix.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_aix_cgo.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_aix_nocgo.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_darwin.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_fallback.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_freebsd.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_linux.go (99%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_openbsd.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_solaris.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_unix.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_windows.go (99%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_bsd.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_darwin.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_darwin_amd64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_darwin_arm64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_darwin_cgo.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_darwin_nocgo.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_fallback.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd.go (90%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd_386.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd_amd64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd_arm.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd_arm64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_linux.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd.go (90%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_386.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_amd64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_arm.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_arm64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_riscv64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_plan9.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_posix.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_solaris.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_windows.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_windows_32bit.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_windows_64bit.go (95%) delete mode 100644 vendor/go.opentelemetry.io/collector/config/configgrpc/internal/zstd.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/LICENSE delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/Makefile delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/expand.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/internal/envvar/pattern.go create mode 100644 vendor/go.opentelemetry.io/collector/confmap/internal/featuregate.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/Makefile delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/provider.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/LICENSE delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/Makefile delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/provider.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/LICENSE delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/Makefile delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/README.md delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/provider.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/LICENSE delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/Makefile delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/README.md delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/provider.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider/provider.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/internal/provider.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/LICENSE delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/Makefile delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/provider.go create mode 100644 vendor/go.opentelemetry.io/collector/internal/featuregates/featuregates.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental/profiles_service.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/pprofextended.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/profiles.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.25.0/doc.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_attribute_group.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_event.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_resource.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_trace.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.25.0/schema.go create mode 100644 vendor/go.opentelemetry.io/collector/service/documentation.md create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go create mode 100644 vendor/go.opentelemetry.io/collector/service/metadata.yaml create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go delete mode 100644 vendor/go.opentelemetry.io/otel/.gitmodules delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/gen.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/internal.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go rename vendor/google.golang.org/grpc/{ => balancer/pickfirst}/pickfirst.go (89%) delete mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go diff --git a/go.mod b/go.mod index 6f493b083f5..6fde9721ef7 100644 --- a/go.mod +++ b/go.mod @@ -30,54 +30,54 @@ require ( github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 github.com/hashicorp/go-hclog v1.6.3 github.com/hashicorp/go-plugin v1.6.0 - github.com/jaegertracing/jaeger v1.57.0 + github.com/jaegertracing/jaeger v1.59.0 github.com/jedib0t/go-pretty/v6 v6.2.4 github.com/json-iterator/go v1.1.12 github.com/jsternberg/zap-logfmt v1.2.0 - github.com/klauspost/compress v1.17.8 + github.com/klauspost/compress v1.17.9 github.com/minio/minio-go/v7 v7.0.70 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c github.com/olekukonko/tablewriter v0.0.5 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.104.0 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e github.com/opentracing/opentracing-go v1.2.0 github.com/pierrec/lz4/v4 v4.1.21 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.53.0 + github.com/prometheus/common v0.55.0 github.com/prometheus/prometheus v0.48.1 github.com/prometheus/statsd_exporter v0.26.0 github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e github.com/sony/gobreaker v0.4.1 - github.com/spf13/viper v1.18.2 + github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 github.com/uber-go/atomic v1.4.0 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/willf/bloom v2.0.3+incompatible go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.102.1 - go.opentelemetry.io/collector/component v0.102.1 - go.opentelemetry.io/collector/confmap v0.102.1 - go.opentelemetry.io/collector/consumer v0.102.1 - go.opentelemetry.io/collector/pdata v1.9.0 - go.opentelemetry.io/collector/semconv v0.102.1 // indirect - go.opentelemetry.io/otel v1.27.0 + go.opentelemetry.io/collector v0.104.0 + go.opentelemetry.io/collector/component v0.104.0 + go.opentelemetry.io/collector/confmap v0.104.0 + go.opentelemetry.io/collector/consumer v0.104.0 + go.opentelemetry.io/collector/pdata v1.11.0 + go.opentelemetry.io/collector/semconv v0.104.0 // indirect + go.opentelemetry.io/otel v1.28.0 go.opentelemetry.io/otel/bridge/opencensus v1.27.0 go.opentelemetry.io/otel/bridge/opentracing v1.26.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 - go.opentelemetry.io/otel/metric v1.27.0 - go.opentelemetry.io/otel/sdk v1.27.0 - go.opentelemetry.io/otel/trace v1.27.0 + go.opentelemetry.io/otel/metric v1.28.0 + go.opentelemetry.io/otel/sdk v1.28.0 + go.opentelemetry.io/otel/trace v1.28.0 go.uber.org/atomic v1.11.0 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 golang.org/x/sync v0.7.0 golang.org/x/time v0.5.0 - google.golang.org/api v0.169.0 - google.golang.org/grpc v1.64.0 - google.golang.org/protobuf v1.34.1 + google.golang.org/api v0.171.0 + google.golang.org/grpc v1.65.0 + google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -89,35 +89,35 @@ require ( github.com/brianvoe/gofakeit/v6 v6.25.0 github.com/evanphx/json-patch v5.6.0+incompatible github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da - github.com/googleapis/gax-go/v2 v2.12.2 + github.com/googleapis/gax-go/v2 v2.12.3 github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.102.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.97.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.104.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.104.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.102.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.104.0 github.com/parquet-go/parquet-go v0.20.2-0.20240416173845-962b3c5827c3 github.com/stoewer/parquet-cli v0.0.7 - go.opentelemetry.io/collector/config/configgrpc v0.102.1 - go.opentelemetry.io/collector/config/confighttp v0.102.1 - go.opentelemetry.io/collector/config/configtls v0.102.1 - go.opentelemetry.io/collector/exporter v0.102.1 - go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1 - go.opentelemetry.io/collector/extension v0.102.1 - go.opentelemetry.io/collector/otelcol v0.102.1 - go.opentelemetry.io/collector/processor v0.102.1 - go.opentelemetry.io/collector/receiver v0.102.1 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 - go.opentelemetry.io/proto/otlp v1.2.0 + go.opentelemetry.io/collector/config/configgrpc v0.104.0 + go.opentelemetry.io/collector/config/confighttp v0.104.0 + go.opentelemetry.io/collector/config/configtls v0.104.0 + go.opentelemetry.io/collector/exporter v0.104.0 + go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 + go.opentelemetry.io/collector/extension v0.104.0 + go.opentelemetry.io/collector/otelcol v0.104.0 + go.opentelemetry.io/collector/processor v0.104.0 + go.opentelemetry.io/collector/receiver v0.104.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.104.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 + go.opentelemetry.io/proto/otlp v1.3.1 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 - golang.org/x/net v0.26.0 - golang.org/x/oauth2 v0.20.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 + golang.org/x/net v0.27.0 + golang.org/x/oauth2 v0.21.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 ) require ( @@ -127,7 +127,6 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/IBM/sarama v1.43.2 // indirect - github.com/VividCortex/gohistogram v1.0.0 // indirect github.com/alecthomas/participle/v2 v2.1.1 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect @@ -153,8 +152,7 @@ require ( github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-kit/kit v0.13.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/analysis v0.21.4 // indirect @@ -174,11 +172,12 @@ require ( github.com/google/btree v1.1.2 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/gorilla/handlers v1.5.1 // indirect + github.com/gorilla/handlers v1.5.2 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.6 // indirect github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db // indirect + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/hashicorp/consul/api v1.25.1 // indirect + github.com/hashicorp/consul/api v1.28.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect @@ -228,20 +227,20 @@ require ( github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/oklog/run v1.1.0 // indirect github.com/oklog/ulid v1.3.1 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.104.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.104.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.104.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.104.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.104.0 // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/openzipkin/zipkin-go v0.4.3 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pires/go-proxyproto v0.7.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -249,7 +248,7 @@ require ( github.com/prometheus/alertmanager v0.26.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 // indirect - github.com/prometheus/procfs v0.15.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/relvacode/iso8601 v1.4.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect @@ -260,14 +259,14 @@ require ( github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/segmentio/encoding v0.3.6 // indirect github.com/sercand/kuberesolver/v5 v5.1.1 // indirect - github.com/shirou/gopsutil/v3 v3.24.4 // indirect + github.com/shirou/gopsutil/v4 v4.24.5 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/soheilhy/cmux v0.1.5 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.8.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect @@ -280,46 +279,40 @@ require ( github.com/xdg-go/stringprep v1.0.4 // indirect github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.etcd.io/etcd/api/v3 v3.5.10 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect - go.etcd.io/etcd/client/v3 v3.5.10 // indirect + go.etcd.io/etcd/api/v3 v3.5.12 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.12 // indirect + go.etcd.io/etcd/client/v3 v3.5.12 // indirect go.mongodb.org/mongo-driver v1.15.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.102.1 // indirect - go.opentelemetry.io/collector/config/configcompression v1.9.0 // indirect - go.opentelemetry.io/collector/config/confignet v0.102.1 // indirect - go.opentelemetry.io/collector/config/configopaque v1.9.0 // indirect - go.opentelemetry.io/collector/config/configretry v0.102.1 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.102.1 // indirect - go.opentelemetry.io/collector/config/internal v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1 // indirect - go.opentelemetry.io/collector/connector v0.102.1 // indirect - go.opentelemetry.io/collector/extension/auth v0.102.1 // indirect - go.opentelemetry.io/collector/featuregate v1.9.0 // indirect - go.opentelemetry.io/collector/service v0.102.1 // indirect + go.opentelemetry.io/collector/config/configauth v0.104.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.11.0 // indirect + go.opentelemetry.io/collector/config/confignet v0.104.0 // indirect + go.opentelemetry.io/collector/config/configopaque v1.11.0 // indirect + go.opentelemetry.io/collector/config/configretry v1.11.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect + go.opentelemetry.io/collector/config/internal v0.104.0 // indirect + go.opentelemetry.io/collector/connector v0.104.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.104.0 // indirect + go.opentelemetry.io/collector/featuregate v1.11.0 // indirect + go.opentelemetry.io/collector/service v0.104.0 // indirect go.opentelemetry.io/contrib/config v0.7.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - golang.org/x/crypto v0.24.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect + golang.org/x/crypto v0.25.0 // indirect golang.org/x/mod v0.17.0 // indirect - golang.org/x/sys v0.21.0 // indirect + golang.org/x/sys v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gonum.org/v1/gonum v0.15.0 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect gopkg.in/ini.v1 v1.67.0 // indirect k8s.io/api v0.28.3 // indirect k8s.io/apimachinery v0.28.3 // indirect diff --git a/go.sum b/go.sum index 064898ef484..a7a86049c74 100644 --- a/go.sum +++ b/go.sum @@ -69,8 +69,6 @@ github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/kong v0.8.0 h1:ryDCzutfIqJPnNn0omnrgHLbAggDQM2VWHikE1xqK7s= @@ -169,14 +167,14 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cristalhq/hedgedhttp v0.9.1 h1:g68L9cf8uUyQKQJwciD0A1Vgbsz+QgCjuB1I8FAsCDs= github.com/cristalhq/hedgedhttp v0.9.1/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI= @@ -236,7 +234,6 @@ github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYF github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -253,8 +250,6 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= -github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= @@ -265,8 +260,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= @@ -445,12 +440,12 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= -github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs= github.com/gophercloud/gophercloud v1.7.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= @@ -471,6 +466,8 @@ github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db h1:7aN5cccjIqCLTzed github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= @@ -478,11 +475,11 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= -github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= -github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g= +github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8= +github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= -github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg= +github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= +github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -565,8 +562,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4= github.com/ionos-cloud/sdk-go/v6 v6.1.9/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= -github.com/jaegertracing/jaeger v1.57.0 h1:3wDtUUPs6NRYH7+d+y8MilDkLHdpPrVlQ2wbcsA62bs= -github.com/jaegertracing/jaeger v1.57.0/go.mod h1:p/1fxIU9hKHl7qEhKC72p2ZYVhvvZvNB73y6V7YyuTs= +github.com/jaegertracing/jaeger v1.59.0 h1:p9/nJxdoCxq4NSgVN8P0aDqlGSfxFaggpNfLwhqQZRc= +github.com/jaegertracing/jaeger v1.59.0/go.mod h1:IZeUGtxNIYWGD3PVI4mqAn2IWVrfGdfswB8XK0mzZ0w= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -611,8 +608,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -639,7 +636,6 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/linode/linodego v1.23.0 h1:s0ReCZtuN9Z1IoUN9w1RLeYO1dMZUGPwOQ/IBFsBHtU= github.com/linode/linodego v1.23.0/go.mod h1:0U7wj/UQOqBNbKv1FYTXiBUXueR8DY4HvIotwE0ENgg= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= @@ -730,46 +726,46 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0 h1:R70PpK14trQfL/Vj5oAiGRqX09s2gOWuf6t1Ae5fevQ= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0/go.mod h1:xmy/yFFmB1Epy+czrYMbA+4xeOKvhFqNqYWU6qINeis= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.104.0 h1:xDA/V4cZ4LXj70la1uzvGaX/xfzeGIEq3txhF7W6TAg= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.104.0/go.mod h1:BxDMJl5xzSbCwcuVFs9jIrBwc1sflBVdihNJxcEKkOE= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.102.0 h1:N3vWsp3xealy4AX8TovfHG5EKi/k7z+F/8LFP4SVAgo= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.102.0/go.mod h1:/Ijok2yF1qYoHuRHvyLS04ZuW91Pue2VkqZ/nZxpkvk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0 h1:PNLVcz8kJLE9V5kGnbBh277Bvl4WwiVZ+NbFbOB80WY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0/go.mod h1:cBbjwd8m4rBVgCQksUbAVQX1EoM5IuCyNQw2mzvibEM= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 h1:qsM5HhWpAfIMg8LdO4u+CHofu4UuCuJwg/M+ySO9uZA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0/go.mod h1:wBJlGy9Wx6s7AxIMcSne2sGw73e5ZUy1AQ/duYwpFf8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.104.0 h1:4ke4j/y7AQnRAyYveB+KGcdjVYEKVrwTxc3BDHagdd0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.104.0/go.mod h1:I2zX9YBggIum9LAHXN1DqqbYOENrHXbXdkXouhwVCHw= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.104.0 h1:/koTWTWCFF7tBYkDX5UzCaEc/ceTU8jij/Yzuj0So3M= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.104.0/go.mod h1:KWVekIHTPScOrLKVYOiijxfEdGK5OBhD4EFNBh96ESg= github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0 h1:f3HVDcjUVUbOpKWiebD9v8+9YdDdNvzPyKh3IVb0ORY= github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0/go.mod h1:110wLws4lB2Jpv58rK7YoaMIhIEmLlzw5/viC0XJhbM= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0 h1:xBd9EXG9qvWwa2d7qDRVv/D/2gAQqn1zGbPqdjkd+O8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0/go.mod h1:e4pc6nkNyzBi5g2RgIRjJ1slRsOY5qHIbPu0E4oM3cE= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.104.0 h1:BGKkg/wBfDeKSMqj8tMC6v03XtMuL9lNltfZiMViBAU= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.104.0/go.mod h1:SqzW5R0kOIs6HAk18+zH+Ej7CehtLVKtyBifT1wqAt0= github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0 h1:/J1Q2tylp8ID+AIpCmfaArUyCPoSjY3nyZXdkpTw9J8= github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0/go.mod h1:lbNQBpvs40lInohZrqAbRZ+8r29GzfMfkbLV4fBPrzE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0 h1:pVJ792+Nzcv8nLlg18XOLOWEZ/dCK+Wo3Iak5TU8rz8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0/go.mod h1:DmkGhNL9nuSTg8fMhYNopMuF1Y3LFqu/FQHrvhBzME0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.104.0 h1:pH5vUaC85j8DcgfgFXlQW3QRoED9DCAGJ5lGw4J/UvE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.104.0/go.mod h1:BsappX8t6TNyk+M0UZ3xMwEtn2IIHy7hqXlThdBoKjI= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0 h1:bVeo7BahYY4rWdaEuzJX2Tn20MbvYcEHXbFjV2IwnPQ= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0/go.mod h1:lj29zRdEZdvbQvZ6g7Pb+tSPw4dQE/jahe39fRgO+08= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.102.0 h1:vJL6lDaeI3pVA7ADnWKD3HMpI80BSrZ2UnGc+qkwqoY= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.102.0/go.mod h1:xtE7tds5j8PtI/wMuGb+Em5K9rJH8hm6t28Qe4QrpoU= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0 h1:TvJYcU/DLRFCgHr7nT98k5D+qkZ4syKVxc8OJjv+K4c= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0/go.mod h1:WzD3Ox7tywAQHknxAFpAC1oZJGItMp5mbvgUGjvzNY8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0 h1:IgLMHSuraJzxLqVeM7xU7aZPcXS5/eoVnX+HBuFGQ6E= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0/go.mod h1:hG8EmxUvgXIiKTG6+UVcMhFeIN6UD/bswP7WYpQ2lCc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 h1:4VQidhCgkJiBvBDMOukr5ixrf5uP66iW5Hb+CFsb+4E= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0/go.mod h1:nMto9zkv0vD8YI3oGZFZS2Uu7k2oHt1d+xUHN/ofUYo= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.104.0 h1:bgS1X1UnUuYfKXsQPq3U50jsMNpN5lI+BcDeklPJOW8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.104.0/go.mod h1:tImy4FWNu1qpaXRVaNi2BU+TmZHtYgLO6LbB6mspZio= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.104.0 h1:Pl4rXXpRG/xJuNWUS3I/w1jViHcrssMf47bGX/Ug/KY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.104.0/go.mod h1:tP4dyc5+g/qoXYb8lmNj+y+Nhphn4MkL23/np0Zhx2g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.104.0 h1:V58XfkZBVuOcNL/+US/ZnG8mVy5AbVHV49mGDIgIMo8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.104.0/go.mod h1:HpEqZW6TOsujFvvOSkcCFj7N2NEW4LP/Q6X7/ryfSnA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.104.0 h1:3joQv7QiLvsVfrpqYAxHvq3bKUaUEpgg93fMLW+TU2w= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.104.0/go.mod h1:aUAgxXQQPiSajwMXQv4LobDTc16ezeF9S1Xh53yHbOg= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.102.0 h1:Mh5MHf0PrUQMTM2S8HwEuPt3Fyz0Xnt0IG7GUc6Fmbs= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.102.0/go.mod h1:6fc8qnIayeGwAF41LyLR+/FRbyJf4+FikbmaO0GGq/Y= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0 h1:5M7I78lyGsH+Xyy4NoXKM/UUCa52aZQiPcSX6so6x94= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0/go.mod h1:BEQy0zEel5uIOTEFBBmvQJ4A32R6nKLtSMtC6ylLI8k= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.104.0 h1:oIYYjBh2kNi7APSArA/9YCQzLwOlMdUhqMxUoMoTChY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.104.0/go.mod h1:hRayDDoU1e1POXuDO7RpwcqIirFJoCdSgHgICY0hNNI= github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.97.0 h1:IfJ9EkykXHBYdwuvZd0qfFcCHAAOuTEaiIlTRw6R9g4= github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.97.0/go.mod h1:uTs2ukYAG9tCkoUhW39J9bNQAqwpqHhE85eeRVm6zCM= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0 h1:HTGSfx2HzfudY1Uczw9yTBJnGBmTVFYzpGH1z+oD0nU= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0/go.mod h1:Hlz24+Ah6Ojk0FUKNb1watRmTbLEru35+feroKA7dvQ= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0 h1:2D3niNAKkr+NRVmAJW0bquSjzHUL6Pf1qQRLRPwA13M= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0/go.mod h1:h0uqwH7b+NGDfFFWTjoGErMdYRdCqP1Az1/G+tfG024= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.104.0 h1:9HJ3ejNoiMFWxTRy9gobdurEocf79QlxwlYrOY9tMIQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.104.0/go.mod h1:Ax4DroNn/xKyjWoJCd3FQE9xOZqHSTdDEj1I3HLNOeQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.104.0 h1:sE+B+i3m9sMecnJZkljnUrykzkRkEFvXJWPckwbQOVc= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.104.0/go.mod h1:cyzp/19NsVmEz7mTS/LHf2m8e26MnlGK8x1kga3rX9I= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.102.0 h1:dBhFe/29ODIbxg4+JRaHwYAHMFFeh6/+izVtjceXwew= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.102.0/go.mod h1:WNFjuquVqyi+WEoa6L0J3DzPLRsP24ZlbZYwKv49VwY= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0 h1:Pemo9pZa3VMYdrM/bss3f0qqVyBzPSulOBQL8VQcgN8= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0/go.mod h1:fvjAM+jOQdiXCmAENKH/eWxBBqTaImbq3lpoBI4X5Ek= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.104.0 h1:U04Ezl3Keb1j6bcVktvgvAbbMEyPDkM5sNboQgPYI1w= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.104.0/go.mod h1:GbpsurP8UERCcHyIB/gUMKcAK3kIypKGE0O+aqbNa/c= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -791,8 +787,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -810,7 +806,6 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/alertmanager v0.26.0 h1:uOMJWfIwJguc3NaM3appWNbbrh6G/OjvaHMk22aBBYc= @@ -834,8 +829,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= -github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 h1:oHcfzdJnM/SFppy2aUlvomk37GI33x9vgJULihE5Dt8= @@ -845,8 +840,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= -github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/prometheus v0.48.1 h1:CTszphSNTXkuCG6O0IfpKdHcJkvvnAAE1GbELKS+NFk= github.com/prometheus/prometheus v0.48.1/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= github.com/prometheus/statsd_exporter v0.26.0 h1:SQl3M6suC6NWQYEzOvIv+EF6dAMYEqIuZy+o4H9F5Ig= @@ -888,8 +883,8 @@ github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e h1:uO75wNGioszj github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M= github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= -github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRBtXeU= -github.com/shirou/gopsutil/v3 v3.24.4/go.mod h1:lTd2mdiOspcqLgAnr9/nGi71NkeMpWKdmhuxm9GusH8= +github.com/shirou/gopsutil/v4 v4.24.5 h1:gGsArG5K6vmsh5hcFOHaPm87UD003CaDMkAOweSQjhM= +github.com/shirou/gopsutil/v4 v4.24.5/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -913,13 +908,13 @@ github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNo github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stoewer/parquet-cli v0.0.7 h1:rhdZODIbyMS3twr4OM3am8BPPT5pbfMcHLH93whDM5o= github.com/stoewer/parquet-cli v0.0.7/go.mod h1:bskxHdj8q3H1EmfuCqjViFoeO3NEvs5lzZAQvI8Nfjk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -986,14 +981,14 @@ github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64/go.mod h1:GBR0iDaN github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= -go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= +go.etcd.io/etcd/api/v3 v3.5.12 h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c= +go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= -go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= +go.etcd.io/etcd/client/pkg/v3 v3.5.12 h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A= +go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= -go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= -go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= +go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg= +go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= @@ -1006,88 +1001,78 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.102.1 h1:M/ciCcReQsSDYG9bJ2Qwqk7pQILDJ2bM/l0MdeCAvJE= -go.opentelemetry.io/collector v0.102.1/go.mod h1:yF1lDRgL/Eksb4/LUnkMjvLvHHpi6wqBVlzp+dACnPM= -go.opentelemetry.io/collector/component v0.102.1 h1:66z+LN5dVCXhvuVKD1b56/3cYLK+mtYSLIwlskYA9IQ= -go.opentelemetry.io/collector/component v0.102.1/go.mod h1:XfkiSeImKYaewT2DavA80l0VZ3JjvGndZ8ayPXfp8d0= -go.opentelemetry.io/collector/config/configauth v0.102.1 h1:LuzijaZulMu4xmAUG8WA00ZKDlampH+ERjxclb40Q9g= -go.opentelemetry.io/collector/config/configauth v0.102.1/go.mod h1:kTzfI5fnbMJpm2wycVtQeWxFAtb7ns4HksSb66NIhX8= -go.opentelemetry.io/collector/config/configcompression v1.9.0 h1:B2q6XMO6xiF2s+14XjqAQHGY5UefR+PtkZ0WAlmSqpU= -go.opentelemetry.io/collector/config/configcompression v1.9.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= -go.opentelemetry.io/collector/config/configgrpc v0.102.1 h1:6Plnfx+xw/JH8k11MkljGoysPfn1u7hHbO2evteOTeE= -go.opentelemetry.io/collector/config/configgrpc v0.102.1/go.mod h1:Kk3XOSar3QTzGDS8N8M38DVlOzUD7STS2obczO9q43I= -go.opentelemetry.io/collector/config/confighttp v0.102.1 h1:tPw1Xf2PfDdrXoBKLY5Sd4Dh8FNm5i+6DKuky9XraIM= -go.opentelemetry.io/collector/config/confighttp v0.102.1/go.mod h1:k4qscfjxuaDQmcAzioxmPujui9VSgW6oal3WLxp9CzI= -go.opentelemetry.io/collector/config/confignet v0.102.1 h1:nSiAFQMzNCO4sDBztUxY73qFw4Vh0hVePq8+3wXUHtU= -go.opentelemetry.io/collector/config/confignet v0.102.1/go.mod h1:pfOrCTfSZEB6H2rKtx41/3RN4dKs+X2EKQbw3MGRh0E= -go.opentelemetry.io/collector/config/configopaque v1.9.0 h1:jocenLdK/rVG9UoGlnpiBxXLXgH5NhIXCrVSTyKVYuA= -go.opentelemetry.io/collector/config/configopaque v1.9.0/go.mod h1:8v1yaH4iYjcigbbyEaP/tzVXeFm4AaAsKBF9SBeqaG4= -go.opentelemetry.io/collector/config/configretry v0.102.1 h1:J5/tXBL8P7d7HT5dxsp2H+//SkwDXR66Z9UTgRgtAzk= -go.opentelemetry.io/collector/config/configretry v0.102.1/go.mod h1:P+RA0IA+QoxnDn4072uyeAk1RIoYiCbxYsjpKX5eFC4= -go.opentelemetry.io/collector/config/configtelemetry v0.102.1 h1:f/CYcrOkaHd+COIJ2lWnEgBCHfhEycpbow4ZhrGwAlA= -go.opentelemetry.io/collector/config/configtelemetry v0.102.1/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= -go.opentelemetry.io/collector/config/configtls v0.102.1 h1:7fr+PU9BRg0HRc1Pn3WmDW/4WBHRjuo7o1CdG2vQKoA= -go.opentelemetry.io/collector/config/configtls v0.102.1/go.mod h1:KHdrvo3cwosgDxclyiLWmtbovIwqvaIGeTXr3p5721A= -go.opentelemetry.io/collector/config/internal v0.102.1 h1:HFsFD3xpHUuNHb8/UTz5crJw1cMHzsJQf/86sgD44hw= -go.opentelemetry.io/collector/config/internal v0.102.1/go.mod h1:Vig3dfeJJnuRe1kBNpszBzPoj5eYnR51wXbeq36Zfpg= -go.opentelemetry.io/collector/confmap v0.102.1 h1:wZuH+d/P11Suz8wbp+xQCJ0BPE9m5pybtUe74c+rU7E= -go.opentelemetry.io/collector/confmap v0.102.1/go.mod h1:KgpS7UxH5rkd69CzAzlY2I1heH8Z7eNCZlHmwQBMxNg= -go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1 h1:s0RxnaABoRxtfvUeimZ0OOsF83wD/EK1tR2N5GZyst0= -go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1/go.mod h1:ZwSMlOSIzmrrSSVNoMPDr21SQx7E52bZFMQJSOZ+EhY= -go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1 h1:4KLw0pTChIqDfw0ckZ411aQDw98pu2dDOqgBHXfJm8M= -go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1/go.mod h1:f+IJBW0Sc96T79qj3GQtE1wQ0uWEwpslD785efKBl+c= -go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1 h1:nPhOtUbJHfTDqZqtvU76HmEz9iV4O/4/DSCZdnm0mpY= -go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1/go.mod h1:eJnr6YDQiocmoRBvsKj33bIc4wysq5hy/jmOApv1dSM= -go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1 h1:VsaGXqEUFost0mf2svhds6loYzPavkyY37nMQcqoTkc= -go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1/go.mod h1:lQocxKI32Zj1F3PR9UZfzykq50/mOI1mbyZ0729dphI= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1 h1:rEhPTqkGAezaFxJ8y/BL5m4vKTK3ZSpn+VcVLKnZo7Q= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1/go.mod h1:GxUZM23m3u4vURw/At2zEKW+5GwcuCNsHJNT/Wq/cFI= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1 h1:qmdaBIz0UnUKVitZzq+4HtO9zvRTwgNc/Q3b7kyf1NQ= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1/go.mod h1:nAckG/FkzAaPuwtEN2Na2+ij+2hdTjtXUtFBnlUqpFk= -go.opentelemetry.io/collector/connector v0.102.1 h1:7lEwXmhzqtyZwz2bBUHzwV/CZqA8bhPPVJOi0cm9+Fk= -go.opentelemetry.io/collector/connector v0.102.1/go.mod h1:DRlDYJXsFx1FKKxkdM2Ja52/xe+0bgmy0hA+wgKRUVI= -go.opentelemetry.io/collector/consumer v0.102.1 h1:0CkgHhxwx4lI/m+hWjh607xyjooW5CObZ8hFQy5vvo0= -go.opentelemetry.io/collector/consumer v0.102.1/go.mod h1:HoXqmrRV13jLnP3/Gg3fYNdRkDPoO7UW58hKiLyFF60= -go.opentelemetry.io/collector/exporter v0.102.1 h1:4VURYgBNJscxfMhZWitzcwA1cig5a6pH0xZSpdECDnM= -go.opentelemetry.io/collector/exporter v0.102.1/go.mod h1:1pmNxvrvvbWDW6PiGObICdj0eOSGV4Fzwpm5QA1GU54= -go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1 h1:bOXE7u1iy0SKwH2mnVyIMKkvFIR9bn9iIm1Cf/CJlZU= -go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1/go.mod h1:4ya6xaUYvcXq9MQW0TbsR4QWkOJI02d/2Vt8plwdozA= -go.opentelemetry.io/collector/extension v0.102.1 h1:gAvE3w15q+Vv0Tj100jzcDpeMTyc8dAiemHRtJbspLg= -go.opentelemetry.io/collector/extension v0.102.1/go.mod h1:XBxUOXjZpwYLZYOK5u3GWlbBTOKmzStY5eU1R/aXkIo= -go.opentelemetry.io/collector/extension/auth v0.102.1 h1:GP6oBmpFJjxuVruPb9X40bdf6PNu9779i8anxa+wW6U= -go.opentelemetry.io/collector/extension/auth v0.102.1/go.mod h1:U2JWz8AW1QXX2Ap3ofzo5Dn2fZU/Lglld97Vbh8BZS0= -go.opentelemetry.io/collector/extension/zpagesextension v0.102.1 h1:YV+ejCgOBJjACOi/l3ULeivOhh85FPE8T4UcFdWviyg= -go.opentelemetry.io/collector/extension/zpagesextension v0.102.1/go.mod h1:/CZXg9/C64k85/k4bc7NFbCNP/MiPUZucbxPUN04ny4= -go.opentelemetry.io/collector/featuregate v1.9.0 h1:mC4/HnR5cx/kkG1RKOQAvHxxg5Ktmd9gpFdttPEXQtA= -go.opentelemetry.io/collector/featuregate v1.9.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= -go.opentelemetry.io/collector/otelcol v0.102.1 h1:JdRG3ven+c5k703QpZG5bxJi4JJOnWaNP/EJvN+oYnI= -go.opentelemetry.io/collector/otelcol v0.102.1/go.mod h1:kHf9KBXOLZXajR1On8XJbBBGcgh2I2+/mVVroPzOLJU= -go.opentelemetry.io/collector/pdata v1.9.0 h1:qyXe3HEVYYxerIYu0rzgo1Tx2d1Zs6iF+TCckbHLFOw= -go.opentelemetry.io/collector/pdata v1.9.0/go.mod h1:vk7LrfpyVpGZrRWcpjyy0DDZzL3SZiYMQxfap25551w= -go.opentelemetry.io/collector/pdata/testdata v0.102.1 h1:S3idZaJxy8M7mCC4PG4EegmtiSaOuh6wXWatKIui8xU= -go.opentelemetry.io/collector/pdata/testdata v0.102.1/go.mod h1:JEoSJTMgeTKyGxoMRy48RMYyhkA5vCCq/abJq9B6vXs= -go.opentelemetry.io/collector/processor v0.102.1 h1:79NWs7kTgmgxOIQacuZyDf+mYWuoJZS07SHwZT7sZ4Y= -go.opentelemetry.io/collector/processor v0.102.1/go.mod h1:sNM41tEHgv3YA/Dz9/6F8oCeObrqnKCGOMs7wS6Ldus= -go.opentelemetry.io/collector/receiver v0.102.1 h1:353t4U3o0RdU007JcQ4sRRzl72GHCJZwXDr8cCOcEbI= -go.opentelemetry.io/collector/receiver v0.102.1/go.mod h1:pYjMzUkvUlxJ8xt+VbI1to8HMtVlv8AW/K/2GQQOTB0= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1 h1:65/8lkVmOu6gwBw99W+QUQBeDC2qVTwlaiqy7/SpauY= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1/go.mod h1:0hmxfFSSqKJjRGvgYjp/XvptbAgLhLguwNgJqMp7zd0= -go.opentelemetry.io/collector/semconv v0.102.1 h1:zLhz2Gu//j7HHESFTGTrfKIaoS4r+lZFQDnGCOThggo= -go.opentelemetry.io/collector/semconv v0.102.1/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/collector/service v0.102.1 h1:Lg7qrC4Zctd/OAlkpdsaZaUY+jLEGLLnOigfBLP2GW8= -go.opentelemetry.io/collector/service v0.102.1/go.mod h1:L5Sh3461B1Zij7vpMMbi6M/SZicgrLB3UgbG0oUK0pA= +go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo= +go.opentelemetry.io/collector v0.104.0/go.mod h1:Tm6F3na9ajnOm6I5goU9dURKxq1fSBK1yA94nvUix3k= +go.opentelemetry.io/collector/component v0.104.0 h1:jqu/X9rnv8ha0RNZ1a9+x7OU49KwSMsPbOuIEykHuQE= +go.opentelemetry.io/collector/component v0.104.0/go.mod h1:1C7C0hMVSbXyY1ycCmaMUAR9fVwpgyiNQqxXtEWhVpw= +go.opentelemetry.io/collector/config/configauth v0.104.0 h1:ULtjugImijpKuLgGVt0E0HwiZT7+uDUEtMquh1ODB24= +go.opentelemetry.io/collector/config/configauth v0.104.0/go.mod h1:Til+nLLrQwwhgmfcGTX4ZRcNuMhdaWhBW1jH9DLTabQ= +go.opentelemetry.io/collector/config/configcompression v1.11.0 h1:oTwbcLh7mWHSDUIZXkRJVdNAMoBGS39XF68goTMOQq8= +go.opentelemetry.io/collector/config/configcompression v1.11.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= +go.opentelemetry.io/collector/config/configgrpc v0.104.0 h1:E3RtqryQPOm/trJmhlJZj6cCqJNKgv9fOEQvSEpzsFM= +go.opentelemetry.io/collector/config/configgrpc v0.104.0/go.mod h1:tu3ifnJ5pv+4rZcaqNWfvVLjNKb8icSPoClN3THN8PU= +go.opentelemetry.io/collector/config/confighttp v0.104.0 h1:KSY0FSHSjuPyrR6iA2g5oFTozYFpYcy0ssJny8gTNTQ= +go.opentelemetry.io/collector/config/confighttp v0.104.0/go.mod h1:YgSXwuMYHANzzv+IBjHXaBMG/4G2mrseIpICHj+LB3U= +go.opentelemetry.io/collector/config/confignet v0.104.0 h1:i7AOTJf4EQox3SEt1YtQFQR+BwXr3v5D9x3Ai9/ovy8= +go.opentelemetry.io/collector/config/confignet v0.104.0/go.mod h1:pfOrCTfSZEB6H2rKtx41/3RN4dKs+X2EKQbw3MGRh0E= +go.opentelemetry.io/collector/config/configopaque v1.11.0 h1:Pt06PXWVmRaiSX63mzwT8Z9SV/hOc6VHNZbfZ10YY4o= +go.opentelemetry.io/collector/config/configopaque v1.11.0/go.mod h1:0xURn2sOy5j4fbaocpEYfM97HPGsiffkkVudSPyTJlM= +go.opentelemetry.io/collector/config/configretry v1.11.0 h1:UdEDD0ThxPU7+n2EiKJxVTvDCGygXu9hTfT6LOQv9DY= +go.opentelemetry.io/collector/config/configretry v1.11.0/go.mod h1:P+RA0IA+QoxnDn4072uyeAk1RIoYiCbxYsjpKX5eFC4= +go.opentelemetry.io/collector/config/configtelemetry v0.104.0 h1:eHv98XIhapZA8MgTiipvi+FDOXoFhCYOwyKReOt+E4E= +go.opentelemetry.io/collector/config/configtelemetry v0.104.0/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= +go.opentelemetry.io/collector/config/configtls v0.104.0 h1:bMmLz2+r+REpO7cDOR+srOJHfitqTZfSZCffDpKfwWk= +go.opentelemetry.io/collector/config/configtls v0.104.0/go.mod h1:e33o7TWcKfe4ToLFyGISEPGMgp6ezf3yHRGY4gs9nKk= +go.opentelemetry.io/collector/config/internal v0.104.0 h1:h3OkxTfXWWrHRyPEGMpJb4fH+54puSBuzm6GQbuEZ2o= +go.opentelemetry.io/collector/config/internal v0.104.0/go.mod h1:KjH43jsAUFyZPeTOz7GrPORMQCK13wRMCyQpWk99gMo= +go.opentelemetry.io/collector/confmap v0.104.0 h1:d3yuwX+CHpoyCh0iMv3rqb/vwAekjSm4ZDL6UK1nZSA= +go.opentelemetry.io/collector/confmap v0.104.0/go.mod h1:F8Lue+tPPn2oldXcfqI75PPMJoyzgUsKVtM/uHZLA4w= +go.opentelemetry.io/collector/connector v0.104.0 h1:Y82ytwZZ+EruWafEebO0dgWMH+TdkcSONEqZ5bm9JYA= +go.opentelemetry.io/collector/connector v0.104.0/go.mod h1:78SEHel3B3taFnSBg/syW4OV9aU1Ec9KjgbgHf/L8JA= +go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhRgQQpYKkDRtxy+4= +go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo= +go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBLd+JxEtAWo7JNbg= +go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= +go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ= +go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0/go.mod h1:fAF7Q3Xh0OkxYWUycdrNNDXkyz3nhHIRKDkez0aQ6zg= +go.opentelemetry.io/collector/extension v0.104.0 h1:bftkgFMKya/QIwK+bOxEAPVs/TvTez+s1mlaiUznJkA= +go.opentelemetry.io/collector/extension v0.104.0/go.mod h1:x7K0KyM1JGrtLbafEbRoVp0VpGBHpyx9hu87bsja6S4= +go.opentelemetry.io/collector/extension/auth v0.104.0 h1:SelhccGCrqLThPlkbv6lbAowHsjgOTAWcAPz085IEC4= +go.opentelemetry.io/collector/extension/auth v0.104.0/go.mod h1:s3/C7LTSfa91QK0JPMTRIvH/gCv+a4DGiiNeTAX9OhI= +go.opentelemetry.io/collector/extension/zpagesextension v0.104.0 h1:rJ9Sw6DR27s6bW7lWBjJhjth5CXpltAHBKIgUFgVwFs= +go.opentelemetry.io/collector/extension/zpagesextension v0.104.0/go.mod h1:85Exj8r237PIvaXL1a/S0KeVNnm3kQNpVXtu0O2Zk5k= +go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zRPz2PNaJvuokh0lQY= +go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= +go.opentelemetry.io/collector/otelcol v0.104.0 h1:RnMx7RaSFmX4dq/l3wbXWwcUnFK7RU19AM/0FbMr0Ig= +go.opentelemetry.io/collector/otelcol v0.104.0/go.mod h1:hWFRiHIKT3zbUx6SRevusPRa6mfm+70bPG5CK0glqSU= +go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= +go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= +go.opentelemetry.io/collector/pdata/pprofile v0.104.0 h1:MYOIHvPlKEJbWLiBKFQWGD0xd2u22xGVLt4jPbdxP4Y= +go.opentelemetry.io/collector/pdata/pprofile v0.104.0/go.mod h1:7WpyHk2wJZRx70CGkBio8klrYTTXASbyIhf+rH4FKnA= +go.opentelemetry.io/collector/pdata/testdata v0.104.0 h1:BKTZ7hIyAX5DMPecrXkVB2e86HwWtJyOlXn/5vSVXNw= +go.opentelemetry.io/collector/pdata/testdata v0.104.0/go.mod h1:3SnYKu8gLfxURJMWS/cFEUFs+jEKS6jvfqKXnOZsdkQ= +go.opentelemetry.io/collector/processor v0.104.0 h1:KSvMDu4DWmK1/k2z2rOzMtTvAa00jnTabtPEK9WOSYI= +go.opentelemetry.io/collector/processor v0.104.0/go.mod h1:qU2/xCCYdvVORkN6aq0H/WUWkvo505VGYg2eOwPvaTg= +go.opentelemetry.io/collector/receiver v0.104.0 h1:URL1ExkYYd+qbndm7CdGvI2mxzsv/pNfmwJ+1QSQ9/o= +go.opentelemetry.io/collector/receiver v0.104.0/go.mod h1:+enTCZQLf6dRRANWvykXEzrlRw2JDppXJtoYWd/Dd54= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.104.0 h1:t9cACuSc7kY09guws7VyB/z9QnG7/zWLC1NQ29WH4+o= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.104.0/go.mod h1:sPIIO4F6uit1i/XQgfe2WryvdO5Hr16bQgZTaXcR8mM= +go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= +go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= +go.opentelemetry.io/collector/service v0.104.0 h1:DTpkoX4C6qiA3v3cfB2cHv/cH705o5JI9J3P77SFUrE= +go.opentelemetry.io/collector/service v0.104.0/go.mod h1:eq68zgpqRDYaVp60NeRu973J0rA5vZJkezfw/EzxLXc= go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= go.opentelemetry.io/otel/bridge/opentracing v1.26.0 h1:Q/dHj0DOhfLMAs5u5ucAbC7gy66x9xxsZRLpHCJ4XhI= @@ -1098,28 +1083,28 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFg go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0/go.mod h1:xJntEd2KL6Qdg5lwp97HMLQDVeAhrYxmzFseAMDPQ8I= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 h1:CIHWikMsN3wO+wq1Tp5VGdVRTcON+DmOJSfDjXypKOc= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0/go.mod h1:TNupZ6cxqyFEpLXAZW7On+mLFL0/g0TE3unIYL91xWc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= +go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUThC98Cf8Zy6g0zkIimOng= +go.opentelemetry.io/otel/exporters/prometheus v0.50.0/go.mod h1:pMm5PkUo5YwbLiuEf7t2xg4wbP0/eSJrMxIMxKosynY= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= -go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= +go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -1148,8 +1133,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1229,16 +1214,16 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1326,14 +1311,13 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1428,8 +1412,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= -google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= +google.golang.org/api v0.171.0 h1:w174hnBPqut76FzW5Qaupt7zY8Kql6fiVjgys4f58sU= +google.golang.org/api v0.171.0/go.mod h1:Hnq5AHm4OTMt2BUVjael2CWZFD6vksJdWCWiUAmjC9o= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1472,10 +1456,10 @@ google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 h1:Q2RxlXqh1cgzzUgV261vBO2jI5R/3DD1J2pM0nI4NhU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1495,8 +1479,8 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1509,8 +1493,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/.patched-proto/collector/README.md b/pkg/.patched-proto/collector/README.md new file mode 100644 index 00000000000..f82dbb0278b --- /dev/null +++ b/pkg/.patched-proto/collector/README.md @@ -0,0 +1,10 @@ +# OpenTelemetry Collector Proto + +This package describes the OpenTelemetry collector protocol. + +## Packages + +1. `common` package contains the common messages shared between different services. +2. `trace` package contains the Trace Service protos. +3. `metrics` package contains the Metrics Service protos. +4. `logs` package contains the Logs Service protos. diff --git a/pkg/.patched-proto/collector/logs/v1/logs_service.proto b/pkg/.patched-proto/collector/logs/v1/logs_service.proto new file mode 100644 index 00000000000..9d29e8e29ef --- /dev/null +++ b/pkg/.patched-proto/collector/logs/v1/logs_service.proto @@ -0,0 +1,79 @@ +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package tempopb.collector.logs.v1; + +import "logs/v1/logs.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Collector.Logs.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.collector.logs.v1"; +option java_outer_classname = "LogsServiceProto"; +option go_package = "github.com/grafana/tempo/pkg/tempopb/collector/logs/v1"; + +// Service that can be used to push logs between one Application instrumented with +// OpenTelemetry and an collector, or between an collector and a central collector (in this +// case logs are sent/received to/from multiple Applications). +service LogsService { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(ExportLogsServiceRequest) returns (ExportLogsServiceResponse) {} +} + +message ExportLogsServiceRequest { + // An array of ResourceLogs. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + repeated tempopb.logs.v1.ResourceLogs resource_logs = 1; +} + +message ExportLogsServiceResponse { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + ExportLogsPartialSuccess partial_success = 1; +} + +message ExportLogsPartialSuccess { + // The number of rejected log records. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + int64 rejected_log_records = 1; + + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + string error_message = 2; +} diff --git a/pkg/.patched-proto/collector/logs/v1/logs_service_http.yaml b/pkg/.patched-proto/collector/logs/v1/logs_service_http.yaml new file mode 100644 index 00000000000..507473b9b3b --- /dev/null +++ b/pkg/.patched-proto/collector/logs/v1/logs_service_http.yaml @@ -0,0 +1,9 @@ +# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the +# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. +type: google.api.Service +config_version: 3 +http: + rules: + - selector: opentelemetry.proto.collector.logs.v1.LogsService.Export + post: /v1/logs + body: "*" \ No newline at end of file diff --git a/pkg/.patched-proto/collector/metrics/v1/metrics_service.proto b/pkg/.patched-proto/collector/metrics/v1/metrics_service.proto new file mode 100644 index 00000000000..a69e2ac50d8 --- /dev/null +++ b/pkg/.patched-proto/collector/metrics/v1/metrics_service.proto @@ -0,0 +1,79 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package tempopb.collector.metrics.v1; + +import "metrics/v1/metrics.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Collector.Metrics.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.collector.metrics.v1"; +option java_outer_classname = "MetricsServiceProto"; +option go_package = "github.com/grafana/tempo/pkg/tempopb/collector/metrics/v1"; + +// Service that can be used to push metrics between one Application +// instrumented with OpenTelemetry and a collector, or between a collector and a +// central collector. +service MetricsService { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(ExportMetricsServiceRequest) returns (ExportMetricsServiceResponse) {} +} + +message ExportMetricsServiceRequest { + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + repeated tempopb.metrics.v1.ResourceMetrics resource_metrics = 1; +} + +message ExportMetricsServiceResponse { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + ExportMetricsPartialSuccess partial_success = 1; +} + +message ExportMetricsPartialSuccess { + // The number of rejected data points. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + int64 rejected_data_points = 1; + + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + string error_message = 2; +} diff --git a/pkg/.patched-proto/collector/metrics/v1/metrics_service_http.yaml b/pkg/.patched-proto/collector/metrics/v1/metrics_service_http.yaml new file mode 100644 index 00000000000..a5456502607 --- /dev/null +++ b/pkg/.patched-proto/collector/metrics/v1/metrics_service_http.yaml @@ -0,0 +1,9 @@ +# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the +# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. +type: google.api.Service +config_version: 3 +http: + rules: + - selector: opentelemetry.proto.collector.metrics.v1.MetricsService.Export + post: /v1/metrics + body: "*" \ No newline at end of file diff --git a/pkg/.patched-proto/collector/profiles/v1experimental/profiles_service.proto b/pkg/.patched-proto/collector/profiles/v1experimental/profiles_service.proto new file mode 100644 index 00000000000..5730f189c97 --- /dev/null +++ b/pkg/.patched-proto/collector/profiles/v1experimental/profiles_service.proto @@ -0,0 +1,78 @@ +// Copyright 2023, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package tempopb.collector.profiles.v1experimental; + +import "profiles/v1experimental/profiles.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Collector.Profiles.V1Experimental"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.collector.profiles.v1experimental"; +option java_outer_classname = "ProfilesServiceProto"; +option go_package = "github.com/grafana/tempo/pkg/tempopb/collector/profiles/v1experimental"; + +// Service that can be used to push profiles between one Application instrumented with +// OpenTelemetry and a collector, or between a collector and a central collector. +service ProfilesService { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(ExportProfilesServiceRequest) returns (ExportProfilesServiceResponse) {} +} + +message ExportProfilesServiceRequest { + // An array of ResourceProfiles. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + repeated tempopb.profiles.v1experimental.ResourceProfiles resource_profiles = 1; +} + +message ExportProfilesServiceResponse { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + ExportProfilesPartialSuccess partial_success = 1; +} + +message ExportProfilesPartialSuccess { + // The number of rejected profiles. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + int64 rejected_profiles = 1; + + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + string error_message = 2; +} diff --git a/pkg/.patched-proto/collector/profiles/v1experimental/profiles_service_http.yaml b/pkg/.patched-proto/collector/profiles/v1experimental/profiles_service_http.yaml new file mode 100644 index 00000000000..6bb7cf740b9 --- /dev/null +++ b/pkg/.patched-proto/collector/profiles/v1experimental/profiles_service_http.yaml @@ -0,0 +1,9 @@ +# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the +# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. +type: google.api.Service +config_version: 3 +http: + rules: + - selector: opentelemetry.proto.collector.profiles.v1.ProfilesService.Export + post: /v1experimental/profiles + body: "*" diff --git a/pkg/.patched-proto/collector/trace/v1/trace_service.proto b/pkg/.patched-proto/collector/trace/v1/trace_service.proto new file mode 100644 index 00000000000..1a923ce2c6f --- /dev/null +++ b/pkg/.patched-proto/collector/trace/v1/trace_service.proto @@ -0,0 +1,79 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package tempopb.collector.trace.v1; + +import "trace/v1/trace.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Collector.Trace.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.collector.trace.v1"; +option java_outer_classname = "TraceServiceProto"; +option go_package = "github.com/grafana/tempo/pkg/tempopb/collector/trace/v1"; + +// Service that can be used to push spans between one Application instrumented with +// OpenTelemetry and a collector, or between a collector and a central collector (in this +// case spans are sent/received to/from multiple Applications). +service TraceService { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(ExportTraceServiceRequest) returns (ExportTraceServiceResponse) {} +} + +message ExportTraceServiceRequest { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + repeated tempopb.trace.v1.ResourceSpans resource_spans = 1; +} + +message ExportTraceServiceResponse { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + ExportTracePartialSuccess partial_success = 1; +} + +message ExportTracePartialSuccess { + // The number of rejected spans. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + int64 rejected_spans = 1; + + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + string error_message = 2; +} diff --git a/pkg/.patched-proto/collector/trace/v1/trace_service_http.yaml b/pkg/.patched-proto/collector/trace/v1/trace_service_http.yaml new file mode 100644 index 00000000000..d091b3a8d53 --- /dev/null +++ b/pkg/.patched-proto/collector/trace/v1/trace_service_http.yaml @@ -0,0 +1,9 @@ +# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the +# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. +type: google.api.Service +config_version: 3 +http: + rules: + - selector: opentelemetry.proto.collector.trace.v1.TraceService.Export + post: /v1/traces + body: "*" diff --git a/pkg/.patched-proto/common/v1/common.proto b/pkg/.patched-proto/common/v1/common.proto new file mode 100644 index 00000000000..e94c7e87042 --- /dev/null +++ b/pkg/.patched-proto/common/v1/common.proto @@ -0,0 +1,81 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package tempopb.common.v1; + +option csharp_namespace = "OpenTelemetry.Proto.Common.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.common.v1"; +option java_outer_classname = "CommonProto"; +option go_package = "github.com/grafana/tempo/pkg/tempopb/common/v1"; + +// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +message AnyValue { + // The value is one of the listed fields. It is valid for all values to be unspecified + // in which case this AnyValue is considered to be "empty". + oneof value { + string string_value = 1; + bool bool_value = 2; + int64 int_value = 3; + double double_value = 4; + ArrayValue array_value = 5; + KeyValueList kvlist_value = 6; + bytes bytes_value = 7; + } +} + +// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +// since oneof in AnyValue does not allow repeated fields. +message ArrayValue { + // Array of values. The array may be empty (contain 0 elements). + repeated AnyValue values = 1; +} + +// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +// are semantically equivalent. +message KeyValueList { + // A collection of key/value pairs of key-value pairs. The list may be empty (may + // contain 0 elements). + // The keys MUST be unique (it is not allowed to have more than one + // value with the same key). + repeated KeyValue values = 1; +} + +// KeyValue is a key-value pair that is used to store Span attributes, Link +// attributes, etc. +message KeyValue { + string key = 1; + AnyValue value = 2; +} + +// InstrumentationScope is a message representing the instrumentation scope information +// such as the fully qualified name and version. +message InstrumentationScope { + // An empty instrumentation scope name means the name is unknown. + string name = 1; + string version = 2; + + // Additional attributes that describe the scope. [Optional]. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated KeyValue attributes = 3; + uint32 dropped_attributes_count = 4; +} diff --git a/pkg/.patched-proto/logs/v1/logs.proto b/pkg/.patched-proto/logs/v1/logs.proto new file mode 100644 index 00000000000..2659d8b6c27 --- /dev/null +++ b/pkg/.patched-proto/logs/v1/logs.proto @@ -0,0 +1,211 @@ +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package tempopb.logs.v1; + +import "common/v1/common.proto"; +import "resource/v1/resource.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Logs.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.logs.v1"; +option java_outer_classname = "LogsProto"; +option go_package = "github.com/grafana/tempo/pkg/tempopb/logs/v1"; + +// LogsData represents the logs data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP logs data but do not +// implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message LogsData { + // An array of ResourceLogs. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceLogs resource_logs = 1; +} + +// A collection of ScopeLogs from a Resource. +message ResourceLogs { + reserved 1000; + + // The resource for the logs in this message. + // If this field is not set then resource info is unknown. + tempopb.resource.v1.Resource resource = 1; + + // A list of ScopeLogs that originate from a resource. + repeated ScopeLogs scope_logs = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_logs" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of Logs produced by a Scope. +message ScopeLogs { + // The instrumentation scope information for the logs in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + tempopb.common.v1.InstrumentationScope scope = 1; + + // A list of log records. + repeated LogRecord log_records = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the log data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all logs in the "logs" field. + string schema_url = 3; +} + +// Possible values for LogRecord.SeverityNumber. +enum SeverityNumber { + // UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. + SEVERITY_NUMBER_UNSPECIFIED = 0; + SEVERITY_NUMBER_TRACE = 1; + SEVERITY_NUMBER_TRACE2 = 2; + SEVERITY_NUMBER_TRACE3 = 3; + SEVERITY_NUMBER_TRACE4 = 4; + SEVERITY_NUMBER_DEBUG = 5; + SEVERITY_NUMBER_DEBUG2 = 6; + SEVERITY_NUMBER_DEBUG3 = 7; + SEVERITY_NUMBER_DEBUG4 = 8; + SEVERITY_NUMBER_INFO = 9; + SEVERITY_NUMBER_INFO2 = 10; + SEVERITY_NUMBER_INFO3 = 11; + SEVERITY_NUMBER_INFO4 = 12; + SEVERITY_NUMBER_WARN = 13; + SEVERITY_NUMBER_WARN2 = 14; + SEVERITY_NUMBER_WARN3 = 15; + SEVERITY_NUMBER_WARN4 = 16; + SEVERITY_NUMBER_ERROR = 17; + SEVERITY_NUMBER_ERROR2 = 18; + SEVERITY_NUMBER_ERROR3 = 19; + SEVERITY_NUMBER_ERROR4 = 20; + SEVERITY_NUMBER_FATAL = 21; + SEVERITY_NUMBER_FATAL2 = 22; + SEVERITY_NUMBER_FATAL3 = 23; + SEVERITY_NUMBER_FATAL4 = 24; +} + +// LogRecordFlags represents constants used to interpret the +// LogRecord.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) +// +enum LogRecordFlags { + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + LOG_RECORD_FLAGS_DO_NOT_USE = 0; + + // Bits 0-7 are used for trace flags. + LOG_RECORD_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; + + // Bits 8-31 are reserved for future use. +} + +// A log record according to OpenTelemetry Log Data Model: +// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md +message LogRecord { + reserved 4; + + // time_unix_nano is the time when the event occurred. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // Value of 0 indicates unknown or missing timestamp. + fixed64 time_unix_nano = 1; + + // Time when the event was observed by the collection system. + // For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK) + // this timestamp is typically set at the generation time and is equal to Timestamp. + // For events originating externally and collected by OpenTelemetry (e.g. using + // Collector) this is the time when OpenTelemetry's code observed the event measured + // by the clock of the OpenTelemetry code. This field MUST be set once the event is + // observed by OpenTelemetry. + // + // For converting OpenTelemetry log data to formats that support only one timestamp or + // when receiving OpenTelemetry log data by recipients that support only one timestamp + // internally the following logic is recommended: + // - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // Value of 0 indicates unknown or missing timestamp. + fixed64 observed_time_unix_nano = 11; + + // Numerical value of the severity, normalized to values described in Log Data Model. + // [Optional]. + SeverityNumber severity_number = 2; + + // The severity text (also known as log level). The original string representation as + // it is known at the source. [Optional]. + string severity_text = 3; + + // A value containing the body of the log record. Can be for example a human-readable + // string message (including multi-line) describing the event in a free form or it can + // be a structured data composed of arrays and maps of other values. [Optional]. + tempopb.common.v1.AnyValue body = 5; + + // Additional attributes that describe the specific event occurrence. [Optional]. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated tempopb.common.v1.KeyValue attributes = 6; + uint32 dropped_attributes_count = 7; + + // Flags, a bit field. 8 least significant bits are the trace flags as + // defined in W3C Trace Context specification. 24 most significant bits are reserved + // and must be set to 0. Readers must not assume that 24 most significant bits + // will be zero and must correctly mask the bits when reading 8-bit trace flag (use + // flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional]. + fixed32 flags = 8; + + // A unique identifier for a trace. All logs from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is optional. + // + // The receivers SHOULD assume that the log record is not associated with a + // trace if any of the following is true: + // - the field is not present, + // - the field contains an invalid value. + bytes trace_id = 9; + + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is optional. If the sender specifies a valid span_id then it SHOULD also + // specify a valid trace_id. + // + // The receivers SHOULD assume that the log record is not associated with a + // span if any of the following is true: + // - the field is not present, + // - the field contains an invalid value. + bytes span_id = 10; +} diff --git a/pkg/.patched-proto/metrics/v1/metrics.proto b/pkg/.patched-proto/metrics/v1/metrics.proto new file mode 100644 index 00000000000..d2621b5b2dd --- /dev/null +++ b/pkg/.patched-proto/metrics/v1/metrics.proto @@ -0,0 +1,691 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package tempopb.metrics.v1; + +import "common/v1/common.proto"; +import "resource/v1/resource.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Metrics.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.metrics.v1"; +option java_outer_classname = "MetricsProto"; +option go_package = "github.com/grafana/tempo/pkg/tempopb/metrics/v1"; + +// MetricsData represents the metrics data that can be stored in a persistent +// storage, OR can be embedded by other protocols that transfer OTLP metrics +// data but do not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message MetricsData { + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceMetrics resource_metrics = 1; +} + +// A collection of ScopeMetrics from a Resource. +message ResourceMetrics { + reserved 1000; + + // The resource for the metrics in this message. + // If this field is not set then no resource info is known. + tempopb.resource.v1.Resource resource = 1; + + // A list of metrics that originate from a resource. + repeated ScopeMetrics scope_metrics = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_metrics" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of Metrics produced by an Scope. +message ScopeMetrics { + // The instrumentation scope information for the metrics in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + tempopb.common.v1.InstrumentationScope scope = 1; + + // A list of metrics that originate from an instrumentation library. + repeated Metric metrics = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the metric data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all metrics in the "metrics" field. + string schema_url = 3; +} + +// Defines a Metric which has one or more timeseries. The following is a +// brief summary of the Metric data model. For more details, see: +// +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md +// +// +// The data model and relation between entities is shown in the +// diagram below. Here, "DataPoint" is the term used to refer to any +// one of the specific data point value types, and "points" is the term used +// to refer to any one of the lists of points contained in the Metric. +// +// - Metric is composed of a metadata and data. +// - Metadata part contains a name, description, unit. +// - Data is one of the possible types (Sum, Gauge, Histogram, Summary). +// - DataPoint contains timestamps, attributes, and one of the possible value type +// fields. +// +// Metric +// +------------+ +// |name | +// |description | +// |unit | +------------------------------------+ +// |data |---> |Gauge, Sum, Histogram, Summary, ... | +// +------------+ +------------------------------------+ +// +// Data [One of Gauge, Sum, Histogram, Summary, ...] +// +-----------+ +// |... | // Metadata about the Data. +// |points |--+ +// +-----------+ | +// | +---------------------------+ +// | |DataPoint 1 | +// v |+------+------+ +------+ | +// +-----+ ||label |label |...|label | | +// | 1 |-->||value1|value2|...|valueN| | +// +-----+ |+------+------+ +------+ | +// | . | |+-----+ | +// | . | ||value| | +// | . | |+-----+ | +// | . | +---------------------------+ +// | . | . +// | . | . +// | . | . +// | . | +---------------------------+ +// | . | |DataPoint M | +// +-----+ |+------+------+ +------+ | +// | M |-->||label |label |...|label | | +// +-----+ ||value1|value2|...|valueN| | +// |+------+------+ +------+ | +// |+-----+ | +// ||value| | +// |+-----+ | +// +---------------------------+ +// +// Each distinct type of DataPoint represents the output of a specific +// aggregation function, the result of applying the DataPoint's +// associated function of to one or more measurements. +// +// All DataPoint types have three common fields: +// - Attributes includes key-value pairs associated with the data point +// - TimeUnixNano is required, set to the end time of the aggregation +// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints +// having an AggregationTemporality field, as discussed below. +// +// Both TimeUnixNano and StartTimeUnixNano values are expressed as +// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. +// +// # TimeUnixNano +// +// This field is required, having consistent interpretation across +// DataPoint types. TimeUnixNano is the moment corresponding to when +// the data point's aggregate value was captured. +// +// Data points with the 0 value for TimeUnixNano SHOULD be rejected +// by consumers. +// +// # StartTimeUnixNano +// +// StartTimeUnixNano in general allows detecting when a sequence of +// observations is unbroken. This field indicates to consumers the +// start time for points with cumulative and delta +// AggregationTemporality, and it should be included whenever possible +// to support correct rate calculation. Although it may be omitted +// when the start time is truly unknown, setting StartTimeUnixNano is +// strongly encouraged. +message Metric { + reserved 4, 6, 8; + + // name of the metric. + string name = 1; + + // description of the metric, which can be used in documentation. + string description = 2; + + // unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + string unit = 3; + + // Data determines the aggregation type (if any) of the metric, what is the + // reported value type for the data points, as well as the relatationship to + // the time interval over which they are reported. + oneof data { + Gauge gauge = 5; + Sum sum = 7; + Histogram histogram = 9; + ExponentialHistogram exponential_histogram = 10; + Summary summary = 11; + } + + // Additional metadata attributes that describe the metric. [Optional]. + // Attributes are non-identifying. + // Consumers SHOULD NOT need to be aware of these attributes. + // These attributes MAY be used to encode information allowing + // for lossless roundtrip translation to / from another data model. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated tempopb.common.v1.KeyValue metadata = 12; +} + +// Gauge represents the type of a scalar metric that always exports the +// "current value" for every data point. It should be used for an "unknown" +// aggregation. +// +// A Gauge does not support different aggregation temporalities. Given the +// aggregation is unknown, points cannot be combined using the same +// aggregation, regardless of aggregation temporalities. Therefore, +// AggregationTemporality is not included. Consequently, this also means +// "StartTimeUnixNano" is ignored for all data points. +message Gauge { + repeated NumberDataPoint data_points = 1; +} + +// Sum represents the type of a scalar metric that is calculated as a sum of all +// reported measurements over a time interval. +message Sum { + repeated NumberDataPoint data_points = 1; + + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality aggregation_temporality = 2; + + // If "true" means that the sum is monotonic. + bool is_monotonic = 3; +} + +// Histogram represents the type of a metric that is calculated by aggregating +// as a Histogram of all reported measurements over a time interval. +message Histogram { + repeated HistogramDataPoint data_points = 1; + + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality aggregation_temporality = 2; +} + +// ExponentialHistogram represents the type of a metric that is calculated by aggregating +// as a ExponentialHistogram of all reported double measurements over a time interval. +message ExponentialHistogram { + repeated ExponentialHistogramDataPoint data_points = 1; + + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality aggregation_temporality = 2; +} + +// Summary metric data are used to convey quantile summaries, +// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) +// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) +// data type. These data points cannot always be merged in a meaningful way. +// While they can be useful in some applications, histogram data points are +// recommended for new applications. +message Summary { + repeated SummaryDataPoint data_points = 1; +} + +// AggregationTemporality defines how a metric aggregator reports aggregated +// values. It describes how those values relate to the time interval over +// which they are aggregated. +enum AggregationTemporality { + // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. + AGGREGATION_TEMPORALITY_UNSPECIFIED = 0; + + // DELTA is an AggregationTemporality for a metric aggregator which reports + // changes since last report time. Successive metrics contain aggregation of + // values from continuous and non-overlapping intervals. + // + // The values for a DELTA metric are based only on the time interval + // associated with one measurement cycle. There is no dependency on + // previous measurements like is the case for CUMULATIVE metrics. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // DELTA metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0+1 to + // t_0+2 with a value of 2. + AGGREGATION_TEMPORALITY_DELTA = 1; + + // CUMULATIVE is an AggregationTemporality for a metric aggregator which + // reports changes since a fixed start time. This means that current values + // of a CUMULATIVE metric depend on all previous measurements since the + // start time. Because of this, the sender is required to retain this state + // in some form. If this state is lost or invalidated, the CUMULATIVE metric + // values MUST be reset and a new fixed start time following the last + // reported measurement time sent MUST be used. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // CUMULATIVE metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+2 with a value of 5. + // 9. The system experiences a fault and loses state. + // 10. The system recovers and resumes receiving at time=t_1. + // 11. A request is received, the system measures 1 request. + // 12. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_1 to + // t_0+1 with a value of 1. + // + // Note: Even though, when reporting changes since last report time, using + // CUMULATIVE is valid, it is not recommended. This may cause problems for + // systems that do not use start_time to determine when the aggregation + // value was reset (e.g. Prometheus). + AGGREGATION_TEMPORALITY_CUMULATIVE = 2; +} + +// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a +// bit-field representing 32 distinct boolean flags. Each flag defined in this +// enum is a bit-mask. To test the presence of a single flag in the flags of +// a data point, for example, use an expression like: +// +// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK +// +enum DataPointFlags { + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + DATA_POINT_FLAGS_DO_NOT_USE = 0; + + // This DataPoint is valid but has no recorded value. This value + // SHOULD be used to reflect explicitly missing data in a series, as + // for an equivalent to the Prometheus "staleness marker". + DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1; + + // Bits 2-31 are reserved for future use. +} + +// NumberDataPoint is a single data point in a timeseries that describes the +// time-varying scalar value of a metric. +message NumberDataPoint { + reserved 1; + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated tempopb.common.v1.KeyValue attributes = 7; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // The value itself. A point is considered invalid when one of the recognized + // value fields is not present inside this oneof. + oneof value { + double as_double = 4; + sfixed64 as_int = 6; + } + + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + repeated Exemplar exemplars = 5; + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 8; +} + +// HistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Histogram. A Histogram contains summary statistics +// for a population of values, it may optionally contain the distribution of +// those values across a set of buckets. +// +// If the histogram contains the distribution of values, then both +// "explicit_bounds" and "bucket counts" fields must be defined. +// If the histogram does not contain the distribution of values, then both +// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and +// "sum" are known. +message HistogramDataPoint { + reserved 1; + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated tempopb.common.v1.KeyValue attributes = 9; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // count is the number of values in the population. Must be non-negative. This + // value must be equal to the sum of the "count" fields in buckets if a + // histogram is provided. + fixed64 count = 4; + + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + optional double sum = 5; + + // bucket_counts is an optional field contains the count values of histogram + // for each bucket. + // + // The sum of the bucket_counts must equal the value in the count field. + // + // The number of elements in bucket_counts array must be by one greater than + // the number of elements in explicit_bounds array. + repeated fixed64 bucket_counts = 6; + + // explicit_bounds specifies buckets with explicitly defined bounds for values. + // + // The boundaries for bucket at index i are: + // + // (-infinity, explicit_bounds[i]] for i == 0 + // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) + // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) + // + // The values in the explicit_bounds array must be strictly increasing. + // + // Histogram buckets are inclusive of their upper boundary, except the last + // bucket where the boundary is at infinity. This format is intentionally + // compatible with the OpenMetrics histogram definition. + repeated double explicit_bounds = 7; + + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + repeated Exemplar exemplars = 8; + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 10; + + // min is the minimum value over (start_time, end_time]. + optional double min = 11; + + // max is the maximum value over (start_time, end_time]. + optional double max = 12; +} + +// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains +// summary statistics for a population of values, it may optionally contain the +// distribution of those values across a set of buckets. +// +message ExponentialHistogramDataPoint { + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated tempopb.common.v1.KeyValue attributes = 1; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // count is the number of values in the population. Must be + // non-negative. This value must be equal to the sum of the "bucket_counts" + // values in the positive and negative Buckets plus the "zero_count" field. + fixed64 count = 4; + + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + optional double sum = 5; + + // scale describes the resolution of the histogram. Boundaries are + // located at powers of the base, where: + // + // base = (2^(2^-scale)) + // + // The histogram bucket identified by `index`, a signed integer, + // contains values that are greater than (base^index) and + // less than or equal to (base^(index+1)). + // + // The positive and negative ranges of the histogram are expressed + // separately. Negative values are mapped by their absolute value + // into the negative range using the same scale as the positive range. + // + // scale is not restricted by the protocol, as the permissible + // values depend on the range of the data. + sint32 scale = 6; + + // zero_count is the count of values that are either exactly zero or + // within the region considered zero by the instrumentation at the + // tolerated degree of precision. This bucket stores values that + // cannot be expressed using the standard exponential formula as + // well as values that have been rounded to zero. + // + // Implementations MAY consider the zero bucket to have probability + // mass equal to (zero_count / count). + fixed64 zero_count = 7; + + // positive carries the positive range of exponential bucket counts. + Buckets positive = 8; + + // negative carries the negative range of exponential bucket counts. + Buckets negative = 9; + + // Buckets are a set of bucket counts, encoded in a contiguous array + // of counts. + message Buckets { + // Offset is the bucket index of the first entry in the bucket_counts array. + // + // Note: This uses a varint encoding as a simple form of compression. + sint32 offset = 1; + + // bucket_counts is an array of count values, where bucket_counts[i] carries + // the count of the bucket at index (offset+i). bucket_counts[i] is the count + // of values greater than base^(offset+i) and less than or equal to + // base^(offset+i+1). + // + // Note: By contrast, the explicit HistogramDataPoint uses + // fixed64. This field is expected to have many buckets, + // especially zeros, so uint64 has been selected to ensure + // varint encoding. + repeated uint64 bucket_counts = 2; + } + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 10; + + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + repeated Exemplar exemplars = 11; + + // min is the minimum value over (start_time, end_time]. + optional double min = 12; + + // max is the maximum value over (start_time, end_time]. + optional double max = 13; + + // ZeroThreshold may be optionally set to convey the width of the zero + // region. Where the zero region is defined as the closed interval + // [-ZeroThreshold, ZeroThreshold]. + // When ZeroThreshold is 0, zero count bucket stores values that cannot be + // expressed using the standard exponential formula as well as values that + // have been rounded to zero. + double zero_threshold = 14; +} + +// SummaryDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Summary metric. +message SummaryDataPoint { + reserved 1; + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated tempopb.common.v1.KeyValue attributes = 7; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // count is the number of values in the population. Must be non-negative. + fixed64 count = 4; + + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary + double sum = 5; + + // Represents the value at a given quantile of a distribution. + // + // To record Min and Max values following conventions are used: + // - The 1.0 quantile is equivalent to the maximum value observed. + // - The 0.0 quantile is equivalent to the minimum value observed. + // + // See the following issue for more context: + // https://github.com/open-telemetry/opentelemetry-proto/issues/125 + message ValueAtQuantile { + // The quantile of a distribution. Must be in the interval + // [0.0, 1.0]. + double quantile = 1; + + // The value at the given quantile of a distribution. + // + // Quantile values must NOT be negative. + double value = 2; + } + + // (Optional) list of values at different quantiles of the distribution calculated + // from the current snapshot. The quantiles must be strictly increasing. + repeated ValueAtQuantile quantile_values = 6; + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 8; +} + +// A representation of an exemplar, which is a sample input measurement. +// Exemplars also hold information about the environment when the measurement +// was recorded, for example the span and trace ID of the active span when the +// exemplar was recorded. +message Exemplar { + reserved 1; + + // The set of key/value pairs that were filtered out by the aggregator, but + // recorded alongside the original measurement. Only key/value pairs that were + // filtered out by the aggregator should be included + repeated tempopb.common.v1.KeyValue filtered_attributes = 7; + + // time_unix_nano is the exact time when this exemplar was recorded + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 2; + + // The value of the measurement that was recorded. An exemplar is + // considered invalid when one of the recognized value fields is not present + // inside this oneof. + oneof value { + double as_double = 3; + sfixed64 as_int = 6; + } + + // (Optional) Span ID of the exemplar trace. + // span_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + bytes span_id = 4; + + // (Optional) Trace ID of the exemplar trace. + // trace_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + bytes trace_id = 5; +} diff --git a/pkg/.patched-proto/profiles/v1experimental/pprofextended.proto b/pkg/.patched-proto/profiles/v1experimental/pprofextended.proto new file mode 100644 index 00000000000..da58d95d41a --- /dev/null +++ b/pkg/.patched-proto/profiles/v1experimental/pprofextended.proto @@ -0,0 +1,386 @@ +// Copyright 2023, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file includes work covered by the following copyright and permission notices: +// +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Profile is a common stacktrace profile format. +// +// Measurements represented with this format should follow the +// following conventions: +// +// - Consumers should treat unset optional fields as if they had been +// set with their default value. +// +// - When possible, measurements should be stored in "unsampled" form +// that is most useful to humans. There should be enough +// information present to determine the original sampled values. +// +// - On-disk, the serialized proto must be gzip-compressed. +// +// - The profile is represented as a set of samples, where each sample +// references a sequence of locations, and where each location belongs +// to a mapping. +// - There is a N->1 relationship from sample.location_id entries to +// locations. For every sample.location_id entry there must be a +// unique Location with that index. +// - There is an optional N->1 relationship from locations to +// mappings. For every nonzero Location.mapping_id there must be a +// unique Mapping with that index. + +syntax = "proto3"; + +package tempopb.profiles.v1experimental; + +import "common/v1/common.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Profiles.V1Experimental"; +option go_package = "github.com/grafana/tempo/pkg/tempopb/profiles/v1experimental"; + +// Represents a complete profile, including sample types, samples, +// mappings to binaries, locations, functions, string table, and additional metadata. +message Profile { + // A description of the samples associated with each Sample.value. + // For a cpu profile this might be: + // [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]] + // For a heap profile, this might be: + // [["allocations","count"], ["space","bytes"]], + // If one of the values represents the number of events represented + // by the sample, by convention it should be at index 0 and use + // sample_type.unit == "count". + repeated ValueType sample_type = 1; + // The set of samples recorded in this profile. + repeated Sample sample = 2; + // Mapping from address ranges to the image/binary/library mapped + // into that address range. mapping[0] will be the main binary. + repeated Mapping mapping = 3; + // Locations referenced by samples via location_indices. + repeated Location location = 4; + // Array of locations referenced by samples. + repeated int64 location_indices = 15; + // Functions referenced by locations. + repeated Function function = 5; + // Lookup table for attributes. + repeated tempopb.common.v1.KeyValue attribute_table = 16; + // Represents a mapping between Attribute Keys and Units. + repeated AttributeUnit attribute_units = 17; + // Lookup table for links. + repeated Link link_table = 18; + // A common table for strings referenced by various messages. + // string_table[0] must always be "". + repeated string string_table = 6; + // frames with Function.function_name fully matching the following + // regexp will be dropped from the samples, along with their successors. + int64 drop_frames = 7; // Index into string table. + // frames with Function.function_name fully matching the following + // regexp will be kept, even if it matches drop_frames. + int64 keep_frames = 8; // Index into string table. + + // The following fields are informational, do not affect + // interpretation of results. + + // Time of collection (UTC) represented as nanoseconds past the epoch. + int64 time_nanos = 9; + // Duration of the profile, if a duration makes sense. + int64 duration_nanos = 10; + // The kind of events between sampled occurrences. + // e.g [ "cpu","cycles" ] or [ "heap","bytes" ] + ValueType period_type = 11; + // The number of events between sampled occurrences. + int64 period = 12; + // Free-form text associated with the profile. The text is displayed as is + // to the user by the tools that read profiles (e.g. by pprof). This field + // should not be used to store any machine-readable information, it is only + // for human-friendly content. The profile must stay functional if this field + // is cleaned. + repeated int64 comment = 13; // Indices into string table. + // Index into the string table of the type of the preferred sample + // value. If unset, clients should default to the last sample value. + int64 default_sample_type = 14; +} + +// Represents a mapping between Attribute Keys and Units. +message AttributeUnit { + // Index into string table. + int64 attribute_key = 1; + // Index into string table. + int64 unit = 2; +} + +// A pointer from a profile Sample to a trace Span. +// Connects a profile sample to a trace span, identified by unique trace and span IDs. +message Link { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + bytes trace_id = 1; + + // A unique identifier for the linked span. The ID is an 8-byte array. + bytes span_id = 2; +} + +// Specifies the method of aggregating metric values, either DELTA (change since last report) +// or CUMULATIVE (total since a fixed start time). +enum AggregationTemporality { + /* UNSPECIFIED is the default AggregationTemporality, it MUST not be used. */ + AGGREGATION_TEMPORALITY_UNSPECIFIED = 0; + + /** DELTA is an AggregationTemporality for a profiler which reports + changes since last report time. Successive metrics contain aggregation of + values from continuous and non-overlapping intervals. + + The values for a DELTA metric are based only on the time interval + associated with one measurement cycle. There is no dependency on + previous measurements like is the case for CUMULATIVE metrics. + + For example, consider a system measuring the number of requests that + it receives and reports the sum of these requests every second as a + DELTA metric: + + 1. The system starts receiving at time=t_0. + 2. A request is received, the system measures 1 request. + 3. A request is received, the system measures 1 request. + 4. A request is received, the system measures 1 request. + 5. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+1 with a value of 3. + 6. A request is received, the system measures 1 request. + 7. A request is received, the system measures 1 request. + 8. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0+1 to + t_0+2 with a value of 2. */ + AGGREGATION_TEMPORALITY_DELTA = 1; + + /** CUMULATIVE is an AggregationTemporality for a profiler which + reports changes since a fixed start time. This means that current values + of a CUMULATIVE metric depend on all previous measurements since the + start time. Because of this, the sender is required to retain this state + in some form. If this state is lost or invalidated, the CUMULATIVE metric + values MUST be reset and a new fixed start time following the last + reported measurement time sent MUST be used. + + For example, consider a system measuring the number of requests that + it receives and reports the sum of these requests every second as a + CUMULATIVE metric: + + 1. The system starts receiving at time=t_0. + 2. A request is received, the system measures 1 request. + 3. A request is received, the system measures 1 request. + 4. A request is received, the system measures 1 request. + 5. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+1 with a value of 3. + 6. A request is received, the system measures 1 request. + 7. A request is received, the system measures 1 request. + 8. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+2 with a value of 5. + 9. The system experiences a fault and loses state. + 10. The system recovers and resumes receiving at time=t_1. + 11. A request is received, the system measures 1 request. + 12. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_1 to + t_0+1 with a value of 1. + + Note: Even though, when reporting changes since last report time, using + CUMULATIVE is valid, it is not recommended. */ + AGGREGATION_TEMPORALITY_CUMULATIVE = 2; +} + +// ValueType describes the type and units of a value, with an optional aggregation temporality. +message ValueType { + int64 type = 1; // Index into string table. + int64 unit = 2; // Index into string table. + + AggregationTemporality aggregation_temporality = 3; +} + +// Each Sample records values encountered in some program +// context. The program context is typically a stack trace, perhaps +// augmented with auxiliary information like the thread-id, some +// indicator of a higher level request being handled etc. +message Sample { + // The indices recorded here correspond to locations in Profile.location. + // The leaf is at location_index[0]. [deprecated, superseded by locations_start_index / locations_length] + repeated uint64 location_index = 1; + // locations_start_index along with locations_length refers to to a slice of locations in Profile.location. + // Supersedes location_index. + uint64 locations_start_index = 7; + // locations_length along with locations_start_index refers to a slice of locations in Profile.location. + // Supersedes location_index. + uint64 locations_length = 8; + // A 128bit id that uniquely identifies this stacktrace, globally. Index into string table. [optional] + uint32 stacktrace_id_index = 9; + // The type and unit of each value is defined by the corresponding + // entry in Profile.sample_type. All samples must have the same + // number of values, the same as the length of Profile.sample_type. + // When aggregating multiple samples into a single sample, the + // result has a list of values that is the element-wise sum of the + // lists of the originals. + repeated int64 value = 2; + // label includes additional context for this sample. It can include + // things like a thread id, allocation size, etc. + // + // NOTE: While possible, having multiple values for the same label key is + // strongly discouraged and should never be used. Most tools (e.g. pprof) do + // not have good (or any) support for multi-value labels. And an even more + // discouraged case is having a string label and a numeric label of the same + // name on a sample. Again, possible to express, but should not be used. + // [deprecated, superseded by attributes] + repeated Label label = 3; + // References to attributes in Profile.attribute_table. [optional] + repeated uint64 attributes = 10; + + // Reference to link in Profile.link_table. [optional] + uint64 link = 12; + + // Timestamps associated with Sample represented in nanoseconds. These timestamps are expected + // to fall within the Profile's time range. [optional] + repeated uint64 timestamps_unix_nano = 13; +} + +// Provides additional context for a sample, +// such as thread ID or allocation size, with optional units. [deprecated] +message Label { + int64 key = 1; // Index into string table + + // At most one of the following must be present + int64 str = 2; // Index into string table + int64 num = 3; + + // Should only be present when num is present. + // Specifies the units of num. + // Use arbitrary string (for example, "requests") as a custom count unit. + // If no unit is specified, consumer may apply heuristic to deduce the unit. + // Consumers may also interpret units like "bytes" and "kilobytes" as memory + // units and units like "seconds" and "nanoseconds" as time units, + // and apply appropriate unit conversions to these. + int64 num_unit = 4; // Index into string table +} + +// Indicates the semantics of the build_id field. +enum BuildIdKind { + // Linker-generated build ID, stored in the ELF binary notes. + BUILD_ID_LINKER = 0; + // Build ID based on the content hash of the binary. Currently no particular + // hashing approach is standardized, so a given producer needs to define it + // themselves and thus unlike BUILD_ID_LINKER this kind of hash is producer-specific. + // We may choose to provide a standardized stable hash recommendation later. + BUILD_ID_BINARY_HASH = 1; +} + +// Describes the mapping of a binary in memory, including its address range, +// file offset, and metadata like build ID +message Mapping { + // Unique nonzero id for the mapping. [deprecated] + uint64 id = 1; + // Address at which the binary (or DLL) is loaded into memory. + uint64 memory_start = 2; + // The limit of the address range occupied by this mapping. + uint64 memory_limit = 3; + // Offset in the binary that corresponds to the first mapped address. + uint64 file_offset = 4; + // The object this entry is loaded from. This can be a filename on + // disk for the main binary and shared libraries, or virtual + // abstractions like "[vdso]". + int64 filename = 5; // Index into string table + // A string that uniquely identifies a particular program version + // with high probability. E.g., for binaries generated by GNU tools, + // it could be the contents of the .note.gnu.build-id field. + int64 build_id = 6; // Index into string table + // Specifies the kind of build id. See BuildIdKind enum for more details [optional] + BuildIdKind build_id_kind = 11; + // References to attributes in Profile.attribute_table. [optional] + repeated uint64 attributes = 12; + // The following fields indicate the resolution of symbolic info. + bool has_functions = 7; + bool has_filenames = 8; + bool has_line_numbers = 9; + bool has_inline_frames = 10; +} + +// Describes function and line table debug information. +message Location { + // Unique nonzero id for the location. A profile could use + // instruction addresses or any integer sequence as ids. [deprecated] + uint64 id = 1; + // The index of the corresponding profile.Mapping for this location. + // It can be unset if the mapping is unknown or not applicable for + // this profile type. + uint64 mapping_index = 2; + // The instruction address for this location, if available. It + // should be within [Mapping.memory_start...Mapping.memory_limit] + // for the corresponding mapping. A non-leaf address may be in the + // middle of a call instruction. It is up to display tools to find + // the beginning of the instruction if necessary. + uint64 address = 3; + // Multiple line indicates this location has inlined functions, + // where the last entry represents the caller into which the + // preceding entries were inlined. + // + // E.g., if memcpy() is inlined into printf: + // line[0].function_name == "memcpy" + // line[1].function_name == "printf" + repeated Line line = 4; + // Provides an indication that multiple symbols map to this location's + // address, for example due to identical code folding by the linker. In that + // case the line information above represents one of the multiple + // symbols. This field must be recomputed when the symbolization state of the + // profile changes. + bool is_folded = 5; + + // Type of frame (e.g. kernel, native, python, hotspot, php). Index into string table. + uint32 type_index = 6; + + // References to attributes in Profile.attribute_table. [optional] + repeated uint64 attributes = 7; +} + +// Details a specific line in a source code, linked to a function. +message Line { + // The index of the corresponding profile.Function for this line. + uint64 function_index = 1; + // Line number in source code. + int64 line = 2; + // Column number in source code. + int64 column = 3; +} + +// Describes a function, including its human-readable name, system name, +// source file, and starting line number in the source. +message Function { + // Unique nonzero id for the function. [deprecated] + uint64 id = 1; + // Name of the function, in human-readable form if available. + int64 name = 2; // Index into string table + // Name of the function, as identified by the system. + // For instance, it can be a C++ mangled name. + int64 system_name = 3; // Index into string table + // Source file containing the function. + int64 filename = 4; // Index into string table + // Line number in source file. + int64 start_line = 5; +} diff --git a/pkg/.patched-proto/profiles/v1experimental/profiles.proto b/pkg/.patched-proto/profiles/v1experimental/profiles.proto new file mode 100644 index 00000000000..818f44adb2f --- /dev/null +++ b/pkg/.patched-proto/profiles/v1experimental/profiles.proto @@ -0,0 +1,191 @@ +// Copyright 2023, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package tempopb.profiles.v1experimental; + +import "common/v1/common.proto"; +import "resource/v1/resource.proto"; +import "profiles/v1experimental/pprofextended.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Profiles.V1Experimental"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.profiles.v1experimental"; +option java_outer_classname = "ProfilesProto"; +option go_package = "github.com/grafana/tempo/pkg/tempopb/profiles/v1experimental"; + +// Relationships Diagram +// +// ┌──────────────────┐ LEGEND +// │ ProfilesData │ +// └──────────────────┘ ─────▶ embedded +// │ +// │ 1-n ─────▷ referenced by index +// ▼ +// ┌──────────────────┐ +// │ ResourceProfiles │ +// └──────────────────┘ +// │ +// │ 1-n +// ▼ +// ┌──────────────────┐ +// │ ScopeProfiles │ +// └──────────────────┘ +// │ +// │ 1-n +// ▼ +// ┌──────────────────┐ +// │ ProfileContainer │ +// └──────────────────┘ +// │ +// │ 1-1 +// ▼ +// ┌──────────────────┐ +// │ Profile │ +// └──────────────────┘ +// │ 1-n +// │ 1-n ┌───────────────────────────────────────┐ +// ▼ │ ▽ +// ┌──────────────────┐ 1-n ┌──────────────┐ ┌──────────┐ +// │ Sample │ ──────▷ │ KeyValue │ │ Link │ +// └──────────────────┘ └──────────────┘ └──────────┘ +// │ 1-n △ △ +// │ 1-n ┌─────────────────┘ │ 1-n +// ▽ │ │ +// ┌──────────────────┐ n-1 ┌──────────────┐ +// │ Location │ ──────▷ │ Mapping │ +// └──────────────────┘ └──────────────┘ +// │ +// │ 1-n +// ▼ +// ┌──────────────────┐ +// │ Line │ +// └──────────────────┘ +// │ +// │ 1-1 +// ▽ +// ┌──────────────────┐ +// │ Function │ +// └──────────────────┘ +// + +// ProfilesData represents the profiles data that can be stored in persistent storage, +// OR can be embedded by other protocols that transfer OTLP profiles data but do not +// implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message ProfilesData { + // An array of ResourceProfiles. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceProfiles resource_profiles = 1; +} + + +// A collection of ScopeProfiles from a Resource. +message ResourceProfiles { + reserved 1000; + + // The resource for the profiles in this message. + // If this field is not set then no resource info is known. + tempopb.resource.v1.Resource resource = 1; + + // A list of ScopeProfiles that originate from a resource. + repeated ScopeProfiles scope_profiles = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_profiles" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of ProfileContainers produced by an InstrumentationScope. +message ScopeProfiles { + // The instrumentation scope information for the profiles in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + tempopb.common.v1.InstrumentationScope scope = 1; + + // A list of ProfileContainers that originate from an instrumentation scope. + repeated ProfileContainer profiles = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the metric data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all profiles in the "profiles" field. + string schema_url = 3; +} + +// A ProfileContainer represents a single profile. It wraps pprof profile with OpenTelemetry specific metadata. +message ProfileContainer { + // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with + // all zeroes is considered invalid. + // + // This field is required. + bytes profile_id = 1; + + // start_time_unix_nano is the start time of the profile. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 start_time_unix_nano = 2; + + // end_time_unix_nano is the end time of the profile. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 end_time_unix_nano = 3; + + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated tempopb.common.v1.KeyValue attributes = 4; + + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + uint32 dropped_attributes_count = 5; + + // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present] + string original_payload_format = 6; + + // Original payload can be stored in this field. This can be useful for users who want to get the original payload. + // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec. + // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload. + // If the original payload is in pprof format, it SHOULD not be included in this field. + // The field is optional, however if it is present `profile` MUST be present and contain the same profiling information. + bytes original_payload = 7; + + // This is a reference to a pprof profile. Required, even when original_payload is present. + tempopb.profiles.v1experimental.Profile profile = 8; +} diff --git a/pkg/.patched-proto/resource/v1/resource.proto b/pkg/.patched-proto/resource/v1/resource.proto new file mode 100644 index 00000000000..dd91f48a185 --- /dev/null +++ b/pkg/.patched-proto/resource/v1/resource.proto @@ -0,0 +1,37 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package tempopb.resource.v1; + +import "common/v1/common.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Resource.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.resource.v1"; +option java_outer_classname = "ResourceProto"; +option go_package = "github.com/grafana/tempo/pkg/tempopb/resource/v1"; + +// Resource information. +message Resource { + // Set of attributes that describe the resource. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated tempopb.common.v1.KeyValue attributes = 1; + + // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // no attributes were dropped. + uint32 dropped_attributes_count = 2; +} diff --git a/pkg/.patched-proto/trace/v1/trace.proto b/pkg/.patched-proto/trace/v1/trace.proto new file mode 100644 index 00000000000..1274443c1f0 --- /dev/null +++ b/pkg/.patched-proto/trace/v1/trace.proto @@ -0,0 +1,355 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package tempopb.trace.v1; + +import "common/v1/common.proto"; +import "resource/v1/resource.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Trace.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.trace.v1"; +option java_outer_classname = "TraceProto"; +option go_package = "github.com/grafana/tempo/pkg/tempopb/trace/v1"; + +// TracesData represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message TracesData { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceSpans resource_spans = 1; +} + +// A collection of ScopeSpans from a Resource. +message ResourceSpans { + reserved 1000; + + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + tempopb.resource.v1.Resource resource = 1; + + // A list of ScopeSpans that originate from a resource. + repeated ScopeSpans scope_spans = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of Spans produced by an InstrumentationScope. +message ScopeSpans { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + tempopb.common.v1.InstrumentationScope scope = 1; + + // A list of Spans that originate from an instrumentation scope. + repeated Span spans = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + string schema_url = 3; +} + +// A Span represents a single operation performed by a single component of the system. +// +// The next available field id is 17. +message Span { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + bytes trace_id = 1; + + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + bytes span_id = 2; + + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + string trace_state = 3; + + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + bytes parent_span_id = 4; + + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + fixed32 flags = 16; + + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + string name = 5; + + // SpanKind is the type of span. Can be used to specify additional relationships between spans + // in addition to a parent/child relationship. + enum SpanKind { + // Unspecified. Do NOT use as default. + // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. + SPAN_KIND_UNSPECIFIED = 0; + + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SPAN_KIND_INTERNAL = 1; + + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SPAN_KIND_SERVER = 2; + + // Indicates that the span describes a request to some remote service. + SPAN_KIND_CLIENT = 3; + + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SPAN_KIND_PRODUCER = 4; + + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SPAN_KIND_CONSUMER = 5; + } + + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + SpanKind kind = 6; + + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 start_time_unix_nano = 7; + + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 end_time_unix_nano = 8; + + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated tempopb.common.v1.KeyValue attributes = 9; + + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + uint32 dropped_attributes_count = 10; + + // Event is a time-stamped annotation of the span, consisting of user-supplied + // text description and key-value pairs. + message Event { + // time_unix_nano is the time the event occurred. + fixed64 time_unix_nano = 1; + + // name of the event. + // This field is semantically required to be set to non-empty string. + string name = 2; + + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated tempopb.common.v1.KeyValue attributes = 3; + + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + uint32 dropped_attributes_count = 4; + } + + // events is a collection of Event items. + repeated Event events = 11; + + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + uint32 dropped_events_count = 12; + + // A pointer from the current span to another span in the same trace or in a + // different trace. For example, this can be used in batching operations, + // where a single batch handler processes multiple requests from different + // traces or when the handler receives a request from a different project. + message Link { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + bytes trace_id = 1; + + // A unique identifier for the linked span. The ID is an 8-byte array. + bytes span_id = 2; + + // The trace_state associated with the link. + string trace_state = 3; + + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated tempopb.common.v1.KeyValue attributes = 4; + + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + uint32 dropped_attributes_count = 5; + + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + fixed32 flags = 6; + } + + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + repeated Link links = 13; + + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + uint32 dropped_links_count = 14; + + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status status = 15; +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +message Status { + reserved 1; + + // A developer-facing human readable error message. + string message = 2; + + // For the semantics of status codes see + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status + enum StatusCode { + // The default status. + STATUS_CODE_UNSET = 0; + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + STATUS_CODE_OK = 1; + // The Span contains an error. + STATUS_CODE_ERROR = 2; + }; + + // The status code. + StatusCode code = 3; +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +enum SpanFlags { + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + SPAN_FLAGS_DO_NOT_USE = 0; + + // Bits 0-7 are used for trace flags. + SPAN_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; + + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK = 0x00000100; + SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK = 0x00000200; + + // Bits 10-31 are reserved for future use. +} diff --git a/vendor/github.com/VividCortex/gohistogram/.gitignore b/vendor/github.com/VividCortex/gohistogram/.gitignore deleted file mode 100644 index 4c51178c9a2..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -\#* -.\#* \ No newline at end of file diff --git a/vendor/github.com/VividCortex/gohistogram/LICENSE b/vendor/github.com/VividCortex/gohistogram/LICENSE deleted file mode 100644 index d23fea36567..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2013 VividCortex - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/VividCortex/gohistogram/README.md b/vendor/github.com/VividCortex/gohistogram/README.md deleted file mode 100644 index eeb14d36664..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# gohistogram - Histograms in Go - -![build status](https://circleci.com/gh/VividCortex/gohistogram.png?circle-token=d37ec652ea117165cd1b342400a801438f575209) - -This package provides [Streaming Approximate Histograms](https://vividcortex.com/blog/2013/07/08/streaming-approximate-histograms/) -for efficient quantile approximations. - -The histograms in this package are based on the algorithms found in -Ben-Haim & Yom-Tov's *A Streaming Parallel Decision Tree Algorithm* -([PDF](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf)). -Histogram bins do not have a preset size. As values stream into -the histogram, bins are dynamically added and merged. - -Another implementation can be found in the Apache Hive project (see -[NumericHistogram](http://hive.apache.org/docs/r0.11.0/api/org/apache/hadoop/hive/ql/udf/generic/NumericHistogram.html)). - -An example: - -![histogram](http://i.imgur.com/5OplaRs.png) - -The accurate method of calculating quantiles (like percentiles) requires -data to be sorted. Streaming histograms make it possible to approximate -quantiles without sorting (or even individually storing) values. - -NumericHistogram is the more basic implementation of a streaming -histogram. WeightedHistogram implements bin values as exponentially-weighted -moving averages. - -A maximum bin size is passed as an argument to the constructor methods. A -larger bin size yields more accurate approximations at the cost of increased -memory utilization and performance. - -A picture of kittens: - -![stack of kittens](http://i.imgur.com/QxRTWAE.jpg) - -## Getting started - -### Using in your own code - - $ go get github.com/VividCortex/gohistogram - -```go -import "github.com/VividCortex/gohistogram" -``` - -### Running tests and making modifications - -Get the code into your workspace: - - $ cd $GOPATH - $ git clone git@github.com:VividCortex/gohistogram.git ./src/github.com/VividCortex/gohistogram - -You can run the tests now: - - $ cd src/github.com/VividCortex/gohistogram - $ go test . - -## API Documentation - -Full source documentation can be found [here][godoc]. - -[godoc]: http://godoc.org/github.com/VividCortex/gohistogram - -## Contributing - -We only accept pull requests for minor fixes or improvements. This includes: - -* Small bug fixes -* Typos -* Documentation or comments - -Please open issues to discuss new features. Pull requests for new features will be rejected, -so we recommend forking the repository and making changes in your fork for your use case. - -## License - -Copyright (c) 2013 VividCortex - -Released under MIT License. Check `LICENSE` file for details. diff --git a/vendor/github.com/VividCortex/gohistogram/histogram.go b/vendor/github.com/VividCortex/gohistogram/histogram.go deleted file mode 100644 index ede21fd311b..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/histogram.go +++ /dev/null @@ -1,23 +0,0 @@ -package gohistogram - -// Copyright (c) 2013 VividCortex, Inc. All rights reserved. -// Please see the LICENSE file for applicable license terms. - -// Histogram is the interface that wraps the Add and Quantile methods. -type Histogram interface { - // Add adds a new value, n, to the histogram. Trimming is done - // automatically. - Add(n float64) - - // Quantile returns an approximation. - Quantile(n float64) (q float64) - - // String returns a string reprentation of the histogram, - // which is useful for printing to a terminal. - String() (str string) -} - -type bin struct { - value float64 - count float64 -} diff --git a/vendor/github.com/VividCortex/gohistogram/numerichistogram.go b/vendor/github.com/VividCortex/gohistogram/numerichistogram.go deleted file mode 100644 index 20dea740d19..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/numerichistogram.go +++ /dev/null @@ -1,160 +0,0 @@ -package gohistogram - -// Copyright (c) 2013 VividCortex, Inc. All rights reserved. -// Please see the LICENSE file for applicable license terms. - -import ( - "fmt" -) - -type NumericHistogram struct { - bins []bin - maxbins int - total uint64 -} - -// NewHistogram returns a new NumericHistogram with a maximum of n bins. -// -// There is no "optimal" bin count, but somewhere between 20 and 80 bins -// should be sufficient. -func NewHistogram(n int) *NumericHistogram { - return &NumericHistogram{ - bins: make([]bin, 0), - maxbins: n, - total: 0, - } -} - -func (h *NumericHistogram) Add(n float64) { - defer h.trim() - h.total++ - for i := range h.bins { - if h.bins[i].value == n { - h.bins[i].count++ - return - } - - if h.bins[i].value > n { - - newbin := bin{value: n, count: 1} - head := append(make([]bin, 0), h.bins[0:i]...) - - head = append(head, newbin) - tail := h.bins[i:] - h.bins = append(head, tail...) - return - } - } - - h.bins = append(h.bins, bin{count: 1, value: n}) -} - -func (h *NumericHistogram) Quantile(q float64) float64 { - count := q * float64(h.total) - for i := range h.bins { - count -= float64(h.bins[i].count) - - if count <= 0 { - return h.bins[i].value - } - } - - return -1 -} - -// CDF returns the value of the cumulative distribution function -// at x -func (h *NumericHistogram) CDF(x float64) float64 { - count := 0.0 - for i := range h.bins { - if h.bins[i].value <= x { - count += float64(h.bins[i].count) - } - } - - return count / float64(h.total) -} - -// Mean returns the sample mean of the distribution -func (h *NumericHistogram) Mean() float64 { - if h.total == 0 { - return 0 - } - - sum := 0.0 - - for i := range h.bins { - sum += h.bins[i].value * h.bins[i].count - } - - return sum / float64(h.total) -} - -// Variance returns the variance of the distribution -func (h *NumericHistogram) Variance() float64 { - if h.total == 0 { - return 0 - } - - sum := 0.0 - mean := h.Mean() - - for i := range h.bins { - sum += (h.bins[i].count * (h.bins[i].value - mean) * (h.bins[i].value - mean)) - } - - return sum / float64(h.total) -} - -func (h *NumericHistogram) Count() float64 { - return float64(h.total) -} - -// trim merges adjacent bins to decrease the bin count to the maximum value -func (h *NumericHistogram) trim() { - for len(h.bins) > h.maxbins { - // Find closest bins in terms of value - minDelta := 1e99 - minDeltaIndex := 0 - for i := range h.bins { - if i == 0 { - continue - } - - if delta := h.bins[i].value - h.bins[i-1].value; delta < minDelta { - minDelta = delta - minDeltaIndex = i - } - } - - // We need to merge bins minDeltaIndex-1 and minDeltaIndex - totalCount := h.bins[minDeltaIndex-1].count + h.bins[minDeltaIndex].count - mergedbin := bin{ - value: (h.bins[minDeltaIndex-1].value* - h.bins[minDeltaIndex-1].count + - h.bins[minDeltaIndex].value* - h.bins[minDeltaIndex].count) / - totalCount, // weighted average - count: totalCount, // summed heights - } - head := append(make([]bin, 0), h.bins[0:minDeltaIndex-1]...) - tail := append([]bin{mergedbin}, h.bins[minDeltaIndex+1:]...) - h.bins = append(head, tail...) - } -} - -// String returns a string reprentation of the histogram, -// which is useful for printing to a terminal. -func (h *NumericHistogram) String() (str string) { - str += fmt.Sprintln("Total:", h.total) - - for i := range h.bins { - var bar string - for j := 0; j < int(float64(h.bins[i].count)/float64(h.total)*200); j++ { - bar += "." - } - str += fmt.Sprintln(h.bins[i].value, "\t", bar) - } - - return -} diff --git a/vendor/github.com/VividCortex/gohistogram/weightedhistogram.go b/vendor/github.com/VividCortex/gohistogram/weightedhistogram.go deleted file mode 100644 index 16eed37193b..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/weightedhistogram.go +++ /dev/null @@ -1,190 +0,0 @@ -// Package gohistogram contains implementations of weighted and exponential histograms. -package gohistogram - -// Copyright (c) 2013 VividCortex, Inc. All rights reserved. -// Please see the LICENSE file for applicable license terms. - -import "fmt" - -// A WeightedHistogram implements Histogram. A WeightedHistogram has bins that have values -// which are exponentially weighted moving averages. This allows you keep inserting large -// amounts of data into the histogram and approximate quantiles with recency factored in. -type WeightedHistogram struct { - bins []bin - maxbins int - total float64 - alpha float64 -} - -// NewWeightedHistogram returns a new WeightedHistogram with a maximum of n bins with a decay factor -// of alpha. -// -// There is no "optimal" bin count, but somewhere between 20 and 80 bins should be -// sufficient. -// -// Alpha should be set to 2 / (N+1), where N represents the average age of the moving window. -// For example, a 60-second window with an average age of 30 seconds would yield an -// alpha of 0.064516129. -func NewWeightedHistogram(n int, alpha float64) *WeightedHistogram { - return &WeightedHistogram{ - bins: make([]bin, 0), - maxbins: n, - total: 0, - alpha: alpha, - } -} - -func ewma(existingVal float64, newVal float64, alpha float64) (result float64) { - result = newVal*(1-alpha) + existingVal*alpha - return -} - -func (h *WeightedHistogram) scaleDown(except int) { - for i := range h.bins { - if i != except { - h.bins[i].count = ewma(h.bins[i].count, 0, h.alpha) - } - } -} - -func (h *WeightedHistogram) Add(n float64) { - defer h.trim() - for i := range h.bins { - if h.bins[i].value == n { - h.bins[i].count++ - - defer h.scaleDown(i) - return - } - - if h.bins[i].value > n { - - newbin := bin{value: n, count: 1} - head := append(make([]bin, 0), h.bins[0:i]...) - - head = append(head, newbin) - tail := h.bins[i:] - h.bins = append(head, tail...) - - defer h.scaleDown(i) - return - } - } - - h.bins = append(h.bins, bin{count: 1, value: n}) -} - -func (h *WeightedHistogram) Quantile(q float64) float64 { - count := q * h.total - for i := range h.bins { - count -= float64(h.bins[i].count) - - if count <= 0 { - return h.bins[i].value - } - } - - return -1 -} - -// CDF returns the value of the cumulative distribution function -// at x -func (h *WeightedHistogram) CDF(x float64) float64 { - count := 0.0 - for i := range h.bins { - if h.bins[i].value <= x { - count += float64(h.bins[i].count) - } - } - - return count / h.total -} - -// Mean returns the sample mean of the distribution -func (h *WeightedHistogram) Mean() float64 { - if h.total == 0 { - return 0 - } - - sum := 0.0 - - for i := range h.bins { - sum += h.bins[i].value * h.bins[i].count - } - - return sum / h.total -} - -// Variance returns the variance of the distribution -func (h *WeightedHistogram) Variance() float64 { - if h.total == 0 { - return 0 - } - - sum := 0.0 - mean := h.Mean() - - for i := range h.bins { - sum += (h.bins[i].count * (h.bins[i].value - mean) * (h.bins[i].value - mean)) - } - - return sum / h.total -} - -func (h *WeightedHistogram) Count() float64 { - return h.total -} - -func (h *WeightedHistogram) trim() { - total := 0.0 - for i := range h.bins { - total += h.bins[i].count - } - h.total = total - for len(h.bins) > h.maxbins { - - // Find closest bins in terms of value - minDelta := 1e99 - minDeltaIndex := 0 - for i := range h.bins { - if i == 0 { - continue - } - - if delta := h.bins[i].value - h.bins[i-1].value; delta < minDelta { - minDelta = delta - minDeltaIndex = i - } - } - - // We need to merge bins minDeltaIndex-1 and minDeltaIndex - totalCount := h.bins[minDeltaIndex-1].count + h.bins[minDeltaIndex].count - mergedbin := bin{ - value: (h.bins[minDeltaIndex-1].value* - h.bins[minDeltaIndex-1].count + - h.bins[minDeltaIndex].value* - h.bins[minDeltaIndex].count) / - totalCount, // weighted average - count: totalCount, // summed heights - } - head := append(make([]bin, 0), h.bins[0:minDeltaIndex-1]...) - tail := append([]bin{mergedbin}, h.bins[minDeltaIndex+1:]...) - h.bins = append(head, tail...) - } -} - -// String returns a string reprentation of the histogram, -// which is useful for printing to a terminal. -func (h *WeightedHistogram) String() (str string) { - str += fmt.Sprintln("Total:", h.total) - - for i := range h.bins { - var bar string - for j := 0; j < int(float64(h.bins[i].count)/float64(h.total)*200); j++ { - bar += "." - } - str += fmt.Sprintln(h.bins[i].value, "\t", bar) - } - - return -} diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE deleted file mode 100644 index 9d83342acdc..00000000000 --- a/vendor/github.com/go-kit/kit/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Peter Bourgon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/go-kit/kit/metrics/README.md b/vendor/github.com/go-kit/kit/metrics/README.md deleted file mode 100644 index 5aa791a750d..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# package metrics - -`package metrics` provides a set of uniform interfaces for service instrumentation. -It has - [counters](http://prometheus.io/docs/concepts/metric_types/#counter), - [gauges](http://prometheus.io/docs/concepts/metric_types/#gauge), and - [histograms](http://prometheus.io/docs/concepts/metric_types/#histogram), -and provides adapters to popular metrics packages, like - [expvar](https://golang.org/pkg/expvar), - [StatsD](https://github.com/etsy/statsd), and - [Prometheus](https://prometheus.io). - -## Rationale - -Code instrumentation is absolutely essential to achieve - [observability](https://speakerdeck.com/mattheath/observability-in-micro-service-architectures) - into a distributed system. -Metrics and instrumentation tools have coalesced around a few well-defined idioms. -`package metrics` provides a common, minimal interface those idioms for service authors. - -## Usage - -A simple counter, exported via expvar. - -```go -import ( - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/expvar" -) - -func main() { - var myCount metrics.Counter - myCount = expvar.NewCounter("my_count") - myCount.Add(1) -} -``` - -A histogram for request duration, - exported via a Prometheus summary with dynamically-computed quantiles. - -```go -import ( - "time" - - stdprometheus "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/prometheus" -) - -func main() { - var dur metrics.Histogram = prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ - Namespace: "myservice", - Subsystem: "api", - Name: "request_duration_seconds", - Help: "Total time spent serving requests.", - }, []string{}) - // ... -} - -func handleRequest(dur metrics.Histogram) { - defer func(begin time.Time) { dur.Observe(time.Since(begin).Seconds()) }(time.Now()) - // handle request -} -``` - -A gauge for the number of goroutines currently running, exported via StatsD. - -```go -import ( - "context" - "net" - "os" - "runtime" - "time" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/statsd" -) - -func main() { - statsd := statsd.New("foo_svc.", log.NewNopLogger()) - report := time.NewTicker(5 * time.Second) - defer report.Stop() - go statsd.SendLoop(context.Background(), report.C, "tcp", "statsd.internal:8125") - goroutines := statsd.NewGauge("goroutine_count") - go exportGoroutines(goroutines) - // ... -} - -func exportGoroutines(g metrics.Gauge) { - for range time.Tick(time.Second) { - g.Set(float64(runtime.NumGoroutine())) - } -} -``` - -For more information, see [the package documentation](https://godoc.org/github.com/go-kit/kit/metrics). diff --git a/vendor/github.com/go-kit/kit/metrics/doc.go b/vendor/github.com/go-kit/kit/metrics/doc.go deleted file mode 100644 index 25cda4f7c81..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/doc.go +++ /dev/null @@ -1,97 +0,0 @@ -// Package metrics provides a framework for application instrumentation. It's -// primarily designed to help you get started with good and robust -// instrumentation, and to help you migrate from a less-capable system like -// Graphite to a more-capable system like Prometheus. If your organization has -// already standardized on an instrumentation system like Prometheus, and has no -// plans to change, it may make sense to use that system's instrumentation -// library directly. -// -// This package provides three core metric abstractions (Counter, Gauge, and -// Histogram) and implementations for almost all common instrumentation -// backends. Each metric has an observation method (Add, Set, or Observe, -// respectively) used to record values, and a With method to "scope" the -// observation by various parameters. For example, you might have a Histogram to -// record request durations, parameterized by the method that's being called. -// -// var requestDuration metrics.Histogram -// // ... -// requestDuration.With("method", "MyMethod").Observe(time.Since(begin)) -// -// This allows a single high-level metrics object (requestDuration) to work with -// many code paths somewhat dynamically. The concept of With is fully supported -// in some backends like Prometheus, and not supported in other backends like -// Graphite. So, With may be a no-op, depending on the concrete implementation -// you choose. Please check the implementation to know for sure. For -// implementations that don't provide With, it's necessary to fully parameterize -// each metric in the metric name, e.g. -// -// // Statsd -// c := statsd.NewCounter("request_duration_MyMethod_200") -// c.Add(1) -// -// // Prometheus -// c := prometheus.NewCounter(stdprometheus.CounterOpts{ -// Name: "request_duration", -// ... -// }, []string{"method", "status_code"}) -// c.With("method", "MyMethod", "status_code", strconv.Itoa(code)).Add(1) -// -// Usage -// -// Metrics are dependencies, and should be passed to the components that need -// them in the same way you'd construct and pass a database handle, or reference -// to another component. Metrics should *not* be created in the global scope. -// Instead, instantiate metrics in your func main, using whichever concrete -// implementation is appropriate for your organization. -// -// latency := prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ -// Namespace: "myteam", -// Subsystem: "foosvc", -// Name: "request_latency_seconds", -// Help: "Incoming request latency in seconds.", -// }, []string{"method", "status_code"}) -// -// Write your components to take the metrics they will use as parameters to -// their constructors. Use the interface types, not the concrete types. That is, -// -// // NewAPI takes metrics.Histogram, not *prometheus.Summary -// func NewAPI(s Store, logger log.Logger, latency metrics.Histogram) *API { -// // ... -// } -// -// func (a *API) ServeFoo(w http.ResponseWriter, r *http.Request) { -// begin := time.Now() -// // ... -// a.latency.Observe(time.Since(begin).Seconds()) -// } -// -// Finally, pass the metrics as dependencies when building your object graph. -// This should happen in func main, not in the global scope. -// -// api := NewAPI(store, logger, latency) -// http.ListenAndServe("/", api) -// -// Note that metrics are "write-only" interfaces. -// -// Implementation details -// -// All metrics are safe for concurrent use. Considerable design influence has -// been taken from https://github.com/codahale/metrics and -// https://prometheus.io. -// -// Each telemetry system has different semantics for label values, push vs. -// pull, support for histograms, etc. These properties influence the design of -// their respective packages. This table attempts to summarize the key points of -// distinction. -// -// SYSTEM DIM COUNTERS GAUGES HISTOGRAMS -// dogstatsd n batch, push-aggregate batch, push-aggregate native, batch, push-each -// statsd 1 batch, push-aggregate batch, push-aggregate native, batch, push-each -// graphite 1 batch, push-aggregate batch, push-aggregate synthetic, batch, push-aggregate -// expvar 1 atomic atomic synthetic, batch, in-place expose -// influx n custom custom custom -// prometheus n native native native -// pcp 1 native native native -// cloudwatch n batch push-aggregate batch push-aggregate synthetic, batch, push-aggregate -// -package metrics diff --git a/vendor/github.com/go-kit/kit/metrics/expvar/expvar.go b/vendor/github.com/go-kit/kit/metrics/expvar/expvar.go deleted file mode 100644 index ce6f3b836a0..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/expvar/expvar.go +++ /dev/null @@ -1,94 +0,0 @@ -// Package expvar provides expvar backends for metrics. -// Label values are not supported. -package expvar - -import ( - "expvar" - "sync" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/generic" -) - -// Counter implements the counter metric with an expvar float. -// Label values are not supported. -type Counter struct { - f *expvar.Float -} - -// NewCounter creates an expvar Float with the given name, and returns an object -// that implements the Counter interface. -func NewCounter(name string) *Counter { - return &Counter{ - f: expvar.NewFloat(name), - } -} - -// With is a no-op. -func (c *Counter) With(labelValues ...string) metrics.Counter { return c } - -// Add implements Counter. -func (c *Counter) Add(delta float64) { c.f.Add(delta) } - -// Gauge implements the gauge metric with an expvar float. -// Label values are not supported. -type Gauge struct { - f *expvar.Float -} - -// NewGauge creates an expvar Float with the given name, and returns an object -// that implements the Gauge interface. -func NewGauge(name string) *Gauge { - return &Gauge{ - f: expvar.NewFloat(name), - } -} - -// With is a no-op. -func (g *Gauge) With(labelValues ...string) metrics.Gauge { return g } - -// Set implements Gauge. -func (g *Gauge) Set(value float64) { g.f.Set(value) } - -// Add implements metrics.Gauge. -func (g *Gauge) Add(delta float64) { g.f.Add(delta) } - -// Histogram implements the histogram metric with a combination of the generic -// Histogram object and several expvar Floats, one for each of the 50th, 90th, -// 95th, and 99th quantiles of observed values, with the quantile attached to -// the name as a suffix. Label values are not supported. -type Histogram struct { - mtx sync.Mutex - h *generic.Histogram - p50 *expvar.Float - p90 *expvar.Float - p95 *expvar.Float - p99 *expvar.Float -} - -// NewHistogram returns a Histogram object with the given name and number of -// buckets in the underlying histogram object. 50 is a good default number of -// buckets. -func NewHistogram(name string, buckets int) *Histogram { - return &Histogram{ - h: generic.NewHistogram(name, buckets), - p50: expvar.NewFloat(name + ".p50"), - p90: expvar.NewFloat(name + ".p90"), - p95: expvar.NewFloat(name + ".p95"), - p99: expvar.NewFloat(name + ".p99"), - } -} - -// With is a no-op. -func (h *Histogram) With(labelValues ...string) metrics.Histogram { return h } - -// Observe implements Histogram. -func (h *Histogram) Observe(value float64) { - h.mtx.Lock() - defer h.mtx.Unlock() - h.h.Observe(value) - h.p50.Set(h.h.Quantile(0.50)) - h.p90.Set(h.h.Quantile(0.90)) - h.p95.Set(h.h.Quantile(0.95)) - h.p99.Set(h.h.Quantile(0.99)) -} diff --git a/vendor/github.com/go-kit/kit/metrics/generic/generic.go b/vendor/github.com/go-kit/kit/metrics/generic/generic.go deleted file mode 100644 index 39216c04f09..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/generic/generic.go +++ /dev/null @@ -1,247 +0,0 @@ -// Package generic implements generic versions of each of the metric types. They -// can be embedded by other implementations, and converted to specific formats -// as necessary. -package generic - -import ( - "fmt" - "io" - "math" - "sync" - "sync/atomic" - - "github.com/VividCortex/gohistogram" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/internal/lv" -) - -// Counter is an in-memory implementation of a Counter. -type Counter struct { - bits uint64 // bits has to be the first word in order to be 64-aligned on 32-bit - Name string - lvs lv.LabelValues -} - -// NewCounter returns a new, usable Counter. -func NewCounter(name string) *Counter { - return &Counter{ - Name: name, - } -} - -// With implements Counter. -func (c *Counter) With(labelValues ...string) metrics.Counter { - return &Counter{ - Name: c.Name, - bits: atomic.LoadUint64(&c.bits), - lvs: c.lvs.With(labelValues...), - } -} - -// Add implements Counter. -func (c *Counter) Add(delta float64) { - for { - var ( - old = atomic.LoadUint64(&c.bits) - newf = math.Float64frombits(old) + delta - new = math.Float64bits(newf) - ) - if atomic.CompareAndSwapUint64(&c.bits, old, new) { - break - } - } -} - -// Value returns the current value of the counter. -func (c *Counter) Value() float64 { - return math.Float64frombits(atomic.LoadUint64(&c.bits)) -} - -// ValueReset returns the current value of the counter, and resets it to zero. -// This is useful for metrics backends whose counter aggregations expect deltas, -// like Graphite. -func (c *Counter) ValueReset() float64 { - for { - var ( - old = atomic.LoadUint64(&c.bits) - newf = 0.0 - new = math.Float64bits(newf) - ) - if atomic.CompareAndSwapUint64(&c.bits, old, new) { - return math.Float64frombits(old) - } - } -} - -// LabelValues returns the set of label values attached to the counter. -func (c *Counter) LabelValues() []string { - return c.lvs -} - -// Gauge is an in-memory implementation of a Gauge. -type Gauge struct { - bits uint64 // bits has to be the first word in order to be 64-aligned on 32-bit - Name string - lvs lv.LabelValues -} - -// NewGauge returns a new, usable Gauge. -func NewGauge(name string) *Gauge { - return &Gauge{ - Name: name, - } -} - -// With implements Gauge. -func (g *Gauge) With(labelValues ...string) metrics.Gauge { - return &Gauge{ - Name: g.Name, - bits: atomic.LoadUint64(&g.bits), - lvs: g.lvs.With(labelValues...), - } -} - -// Set implements Gauge. -func (g *Gauge) Set(value float64) { - atomic.StoreUint64(&g.bits, math.Float64bits(value)) -} - -// Add implements metrics.Gauge. -func (g *Gauge) Add(delta float64) { - for { - var ( - old = atomic.LoadUint64(&g.bits) - newf = math.Float64frombits(old) + delta - new = math.Float64bits(newf) - ) - if atomic.CompareAndSwapUint64(&g.bits, old, new) { - break - } - } -} - -// Value returns the current value of the gauge. -func (g *Gauge) Value() float64 { - return math.Float64frombits(atomic.LoadUint64(&g.bits)) -} - -// LabelValues returns the set of label values attached to the gauge. -func (g *Gauge) LabelValues() []string { - return g.lvs -} - -// Histogram is an in-memory implementation of a streaming histogram, based on -// VividCortex/gohistogram. It dynamically computes quantiles, so it's not -// suitable for aggregation. -type Histogram struct { - Name string - lvs lv.LabelValues - h *safeHistogram -} - -// NewHistogram returns a numeric histogram based on VividCortex/gohistogram. A -// good default value for buckets is 50. -func NewHistogram(name string, buckets int) *Histogram { - return &Histogram{ - Name: name, - h: &safeHistogram{Histogram: gohistogram.NewHistogram(buckets)}, - } -} - -// With implements Histogram. -func (h *Histogram) With(labelValues ...string) metrics.Histogram { - return &Histogram{ - Name: h.Name, - lvs: h.lvs.With(labelValues...), - h: h.h, - } -} - -// Observe implements Histogram. -func (h *Histogram) Observe(value float64) { - h.h.Lock() - defer h.h.Unlock() - h.h.Add(value) -} - -// Quantile returns the value of the quantile q, 0.0 < q < 1.0. -func (h *Histogram) Quantile(q float64) float64 { - h.h.RLock() - defer h.h.RUnlock() - return h.h.Quantile(q) -} - -// LabelValues returns the set of label values attached to the histogram. -func (h *Histogram) LabelValues() []string { - return h.lvs -} - -// Print writes a string representation of the histogram to the passed writer. -// Useful for printing to a terminal. -func (h *Histogram) Print(w io.Writer) { - h.h.RLock() - defer h.h.RUnlock() - fmt.Fprint(w, h.h.String()) -} - -// safeHistogram exists as gohistogram.Histogram is not goroutine-safe. -type safeHistogram struct { - sync.RWMutex - gohistogram.Histogram -} - -// Bucket is a range in a histogram which aggregates observations. -type Bucket struct { - From, To, Count int64 -} - -// Quantile is a pair of a quantile (0..100) and its observed maximum value. -type Quantile struct { - Quantile int // 0..100 - Value int64 -} - -// SimpleHistogram is an in-memory implementation of a Histogram. It only tracks -// an approximate moving average, so is likely too naïve for many use cases. -type SimpleHistogram struct { - mtx sync.RWMutex - lvs lv.LabelValues - avg float64 - n uint64 -} - -// NewSimpleHistogram returns a SimpleHistogram, ready for observations. -func NewSimpleHistogram() *SimpleHistogram { - return &SimpleHistogram{} -} - -// With implements Histogram. -func (h *SimpleHistogram) With(labelValues ...string) metrics.Histogram { - return &SimpleHistogram{ - lvs: h.lvs.With(labelValues...), - avg: h.avg, - n: h.n, - } -} - -// Observe implements Histogram. -func (h *SimpleHistogram) Observe(value float64) { - h.mtx.Lock() - defer h.mtx.Unlock() - h.n++ - h.avg -= h.avg / float64(h.n) - h.avg += value / float64(h.n) -} - -// ApproximateMovingAverage returns the approximate moving average of observations. -func (h *SimpleHistogram) ApproximateMovingAverage() float64 { - h.mtx.RLock() - defer h.mtx.RUnlock() - return h.avg -} - -// LabelValues returns the set of label values attached to the histogram. -func (h *SimpleHistogram) LabelValues() []string { - return h.lvs -} diff --git a/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go b/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go deleted file mode 100644 index 8bb1ba09414..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go +++ /dev/null @@ -1,14 +0,0 @@ -package lv - -// LabelValues is a type alias that provides validation on its With method. -// Metrics may include it as a member to help them satisfy With semantics and -// save some code duplication. -type LabelValues []string - -// With validates the input, and returns a new aggregate labelValues. -func (lvs LabelValues) With(labelValues ...string) LabelValues { - if len(labelValues)%2 != 0 { - labelValues = append(labelValues, "unknown") - } - return append(lvs, labelValues...) -} diff --git a/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go b/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go deleted file mode 100644 index 371964a35ae..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go +++ /dev/null @@ -1,145 +0,0 @@ -package lv - -import "sync" - -// NewSpace returns an N-dimensional vector space. -func NewSpace() *Space { - return &Space{} -} - -// Space represents an N-dimensional vector space. Each name and unique label -// value pair establishes a new dimension and point within that dimension. Order -// matters, i.e. [a=1 b=2] identifies a different timeseries than [b=2 a=1]. -type Space struct { - mtx sync.RWMutex - nodes map[string]*node -} - -// Observe locates the time series identified by the name and label values in -// the vector space, and appends the value to the list of observations. -func (s *Space) Observe(name string, lvs LabelValues, value float64) { - s.nodeFor(name).observe(lvs, value) -} - -// Add locates the time series identified by the name and label values in -// the vector space, and appends the delta to the last value in the list of -// observations. -func (s *Space) Add(name string, lvs LabelValues, delta float64) { - s.nodeFor(name).add(lvs, delta) -} - -// Walk traverses the vector space and invokes fn for each non-empty time series -// which is encountered. Return false to abort the traversal. -func (s *Space) Walk(fn func(name string, lvs LabelValues, observations []float64) bool) { - s.mtx.RLock() - defer s.mtx.RUnlock() - for name, node := range s.nodes { - f := func(lvs LabelValues, observations []float64) bool { return fn(name, lvs, observations) } - if !node.walk(LabelValues{}, f) { - return - } - } -} - -// Reset empties the current space and returns a new Space with the old -// contents. Reset a Space to get an immutable copy suitable for walking. -func (s *Space) Reset() *Space { - s.mtx.Lock() - defer s.mtx.Unlock() - n := NewSpace() - n.nodes, s.nodes = s.nodes, n.nodes - return n -} - -func (s *Space) nodeFor(name string) *node { - s.mtx.Lock() - defer s.mtx.Unlock() - if s.nodes == nil { - s.nodes = map[string]*node{} - } - n, ok := s.nodes[name] - if !ok { - n = &node{} - s.nodes[name] = n - } - return n -} - -// node exists at a specific point in the N-dimensional vector space of all -// possible label values. The node collects observations and has child nodes -// with greater specificity. -type node struct { - mtx sync.RWMutex - observations []float64 - children map[pair]*node -} - -type pair struct{ label, value string } - -func (n *node) observe(lvs LabelValues, value float64) { - n.mtx.Lock() - defer n.mtx.Unlock() - if len(lvs) <= 0 { - n.observations = append(n.observations, value) - return - } - if len(lvs) < 2 { - panic("too few LabelValues; programmer error!") - } - head, tail := pair{lvs[0], lvs[1]}, lvs[2:] - if n.children == nil { - n.children = map[pair]*node{} - } - child, ok := n.children[head] - if !ok { - child = &node{} - n.children[head] = child - } - child.observe(tail, value) -} - -func (n *node) add(lvs LabelValues, delta float64) { - n.mtx.Lock() - defer n.mtx.Unlock() - if len(lvs) <= 0 { - var value float64 - if len(n.observations) > 0 { - value = last(n.observations) + delta - } else { - value = delta - } - n.observations = append(n.observations, value) - return - } - if len(lvs) < 2 { - panic("too few LabelValues; programmer error!") - } - head, tail := pair{lvs[0], lvs[1]}, lvs[2:] - if n.children == nil { - n.children = map[pair]*node{} - } - child, ok := n.children[head] - if !ok { - child = &node{} - n.children[head] = child - } - child.add(tail, delta) -} - -func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool { - n.mtx.RLock() - defer n.mtx.RUnlock() - if len(n.observations) > 0 && !fn(lvs, n.observations) { - return false - } - for p, child := range n.children { - if !child.walk(append(lvs, p.label, p.value), fn) { - return false - } - } - return true -} - -func last(a []float64) float64 { - return a[len(a)-1] -} diff --git a/vendor/github.com/go-kit/kit/metrics/metrics.go b/vendor/github.com/go-kit/kit/metrics/metrics.go deleted file mode 100644 index a7ba1b1fe3f..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/metrics.go +++ /dev/null @@ -1,25 +0,0 @@ -package metrics - -// Counter describes a metric that accumulates values monotonically. -// An example of a counter is the number of received HTTP requests. -type Counter interface { - With(labelValues ...string) Counter - Add(delta float64) -} - -// Gauge describes a metric that takes specific values over time. -// An example of a gauge is the current depth of a job queue. -type Gauge interface { - With(labelValues ...string) Gauge - Set(value float64) - Add(delta float64) -} - -// Histogram describes a metric that takes repeated observations of the same -// kind of thing, and produces a statistical summary of those observations, -// typically expressed as quantiles or buckets. An example of a histogram is -// HTTP request latencies. -type Histogram interface { - With(labelValues ...string) Histogram - Observe(value float64) -} diff --git a/vendor/github.com/go-kit/kit/metrics/timer.go b/vendor/github.com/go-kit/kit/metrics/timer.go deleted file mode 100644 index e12d9cd5c49..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/timer.go +++ /dev/null @@ -1,36 +0,0 @@ -package metrics - -import "time" - -// Timer acts as a stopwatch, sending observations to a wrapped histogram. -// It's a bit of helpful syntax sugar for h.Observe(time.Since(x)). -type Timer struct { - h Histogram - t time.Time - u time.Duration -} - -// NewTimer wraps the given histogram and records the current time. -func NewTimer(h Histogram) *Timer { - return &Timer{ - h: h, - t: time.Now(), - u: time.Second, - } -} - -// ObserveDuration captures the number of seconds since the timer was -// constructed, and forwards that observation to the histogram. -func (t *Timer) ObserveDuration() { - d := float64(time.Since(t.t).Nanoseconds()) / float64(t.u) - if d < 0 { - d = 0 - } - t.h.Observe(d) -} - -// Unit sets the unit of the float64 emitted by the timer. -// By default, the timer emits seconds. -func (t *Timer) Unit(u time.Duration) { - t.u = u -} diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index 8969526a6e5..7c7f0c69cd9 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index fb2f866f4b7..30568e768dc 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - parentValuesStr string - depth int - opts *Options - group string // for slog groups - groupDepth int + outputFormat outputFormat + prefix string + values []any + valuesStr string + depth int + opts *Options + groupName string // for slog groups + groups []groupDef } // outputFormat indicates which outputFormat to use. @@ -257,6 +256,13 @@ const ( outputJSON ) +// groupDef represents a saved group. The values may be empty, but we don't +// know if we need to render the group until the final record is rendered. +type groupDef struct { + name string + values string +} + // PseudoStruct is a list of key-value pairs that gets logged as a struct. type PseudoStruct []any @@ -264,76 +270,102 @@ type PseudoStruct []any func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { - buf.WriteByte('{') // for the whole line + buf.WriteByte('{') // for the whole record } + // Render builtins vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape + f.flatten(buf, vals, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if f.parentValuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) + // Turn the inner-most group into a string + argsStr := func() string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) } - buf.WriteString(f.parentValuesStr) - continuing = true - } + f.flatten(buf, vals, true) // escape user-provided keys - groupDepth := f.groupDepth - if f.group != "" { - if f.valuesStr != "" || len(args) != 0 { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } else { - // The group was empty - groupDepth-- + return buf.String() + }() + + // Render the stack of groups from the inside out. + bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) + for i := len(f.groups) - 1; i >= 0; i-- { + grp := &f.groups[i] + if grp.values == "" && bodyStr == "" { + // no contents, so we must elide the whole group + continue } + bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) } - if f.valuesStr != "" { + if bodyStr != "" { if continuing { buf.WriteByte(f.comma()) } - buf.WriteString(f.valuesStr) - continuing = true + buf.WriteString(bodyStr) } - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) + if f.outputFormat == outputJSON { + buf.WriteByte('}') // for the whole record } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - for i := 0; i < groupDepth; i++ { - buf.WriteByte('}') // for the groups + return buf.String() +} + +// renderGroup returns a string representation of the named group with rendered +// values and args. If the name is empty, this will return the values and args, +// joined. If the name is not empty, this will return a single key-value pair, +// where the value is a grouping of the values and args. If the values and +// args are both empty, this will return an empty string, even if the name was +// specified. +func (f Formatter) renderGroup(name string, values string, args string) string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + needClosingBrace := false + if name != "" && (values != "" || args != "") { + buf.WriteString(f.quoted(name, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') + needClosingBrace = true } - if f.outputFormat == outputJSON { - buf.WriteByte('}') // for the whole line + continuing := false + if values != "" { + buf.WriteString(values) + continuing = true + } + + if args != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(args) + } + + if needClosingBrace { + buf.WriteByte('}') } return buf.String() } -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. +// flatten renders a list of key-value pairs into a buffer. If escapeKeys is +// true, the keys are assumed to have non-JSON-compatible characters in them +// and must be evaluated for escapes. // // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } v := kvList[i+1] - if i > 0 || continuing { + if i > 0 { if f.outputFormat == outputJSON { buf.WriteByte(f.comma()) } else { @@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any { // startGroup opens a new group scope (basically a sub-struct), which locks all // the current saved values and starts them anew. This is needed to satisfy // slog. -func (f *Formatter) startGroup(group string) { +func (f *Formatter) startGroup(name string) { // Unnamed groups are just inlined. - if group == "" { + if name == "" { return } - // Any saved values can no longer be changed. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - continuing := false - - if f.parentValuesStr != "" { - buf.WriteString(f.parentValuesStr) - continuing = true - } - - if f.group != "" && f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } - - if f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.valuesStr) - } - - // NOTE: We don't close the scope here - that's done later, when a log line - // is actually rendered (because we have N scopes to close). - - f.parentValuesStr = buf.String() + n := len(f.groups) + f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) // Start collecting new values. - f.group = group - f.groupDepth++ + f.groupName = name f.valuesStr = "" f.values = nil } @@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) { // Pre-render values, so we don't have to do it on each Info/Error call. buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys + f.flatten(buf, vals, true) // escape user-provided keys f.valuesStr = buf.String() } diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index b2a67c0a222..feb372228b4 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.12.2" + "v2": "2.12.3" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index 5f0908e689f..0d019d97fd3 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,12 @@ # Changelog +## [2.12.3](https://github.com/googleapis/gax-go/compare/v2.12.2...v2.12.3) (2024-03-14) + + +### Bug Fixes + +* bump protobuf dep to v1.33 ([#333](https://github.com/googleapis/gax-go/issues/333)) ([2892b22](https://github.com/googleapis/gax-go/commit/2892b22c1ae8a70dec3448d82e634643fe6c1be2)) + ## [2.12.2](https://github.com/googleapis/gax-go/compare/v2.12.1...v2.12.2) (2024-02-23) diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 53c04d4d450..90348f303df 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.12.2" +const Version = "2.12.3" diff --git a/vendor/github.com/gorilla/handlers/.editorconfig b/vendor/github.com/gorilla/handlers/.editorconfig new file mode 100644 index 00000000000..c6b74c3e0d0 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/handlers/.gitignore b/vendor/github.com/gorilla/handlers/.gitignore new file mode 100644 index 00000000000..577a89e8138 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/.gitignore @@ -0,0 +1,2 @@ +# Output of the go test coverage tool +coverage.coverprofile diff --git a/vendor/github.com/gorilla/handlers/LICENSE b/vendor/github.com/gorilla/handlers/LICENSE index 66ea3c8ae71..bb9d80bc9b6 100644 --- a/vendor/github.com/gorilla/handlers/LICENSE +++ b/vendor/github.com/gorilla/handlers/LICENSE @@ -1,22 +1,27 @@ -Copyright (c) 2013 The Gorilla Handlers Authors. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +modification, are permitted provided that the following conditions are +met: - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/handlers/Makefile b/vendor/github.com/gorilla/handlers/Makefile new file mode 100644 index 00000000000..003b784f7ed --- /dev/null +++ b/vendor/github.com/gorilla/handlers/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: verify +verify: sec govulncheck lint test + +.PHONY: lint +lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint #####" + golangci-lint run -v + +.PHONY: sec +sec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec #####" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck #####" + govulncheck ./... + +.PHONY: test +test: + @echo "##### Running tests #####" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/gorilla/handlers/README.md index 6eba66bf302..02555b2642c 100644 --- a/vendor/github.com/gorilla/handlers/README.md +++ b/vendor/github.com/gorilla/handlers/README.md @@ -1,10 +1,10 @@ -gorilla/handlers -================ +# gorilla/handlers + +![Testing](https://github.com/gorilla/handlers/actions/workflows/test.yml/badge.svg) +[![Codecov](https://codecov.io/github/gorilla/handlers/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/handlers) [![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) -[![CircleCI](https://circleci.com/gh/gorilla/handlers.svg?style=svg)](https://circleci.com/gh/gorilla/handlers) [![Sourcegraph](https://sourcegraph.com/github.com/gorilla/handlers/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/handlers?badge) - Package handlers is a collection of handlers (aka "HTTP middleware") for use with Go's `net/http` package (or any framework supporting `http.Handler`), including: diff --git a/vendor/github.com/gorilla/handlers/canonical.go b/vendor/github.com/gorilla/handlers/canonical.go index 8437fefc1ef..7121f5307be 100644 --- a/vendor/github.com/gorilla/handlers/canonical.go +++ b/vendor/github.com/gorilla/handlers/canonical.go @@ -21,12 +21,11 @@ type canonical struct { // // Example: // -// r := mux.NewRouter() -// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) -// r.HandleFunc("/route", YourHandler) -// -// log.Fatal(http.ListenAndServe(":7000", canonical(r))) +// r := mux.NewRouter() +// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) +// r.HandleFunc("/route", YourHandler) // +// log.Fatal(http.ListenAndServe(":7000", canonical(r))) func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler { fn := func(h http.Handler) http.Handler { return canonical{h, domain, code} diff --git a/vendor/github.com/gorilla/handlers/compress.go b/vendor/github.com/gorilla/handlers/compress.go index 1e95f1ccbfa..d6f589503b5 100644 --- a/vendor/github.com/gorilla/handlers/compress.go +++ b/vendor/github.com/gorilla/handlers/compress.go @@ -44,13 +44,13 @@ type flusher interface { Flush() error } -func (w *compressResponseWriter) Flush() { +func (cw *compressResponseWriter) Flush() { // Flush compressed data if compressor supports it. - if f, ok := w.compressor.(flusher); ok { - f.Flush() + if f, ok := cw.compressor.(flusher); ok { + _ = f.Flush() } // Flush HTTP response. - if f, ok := w.w.(http.Flusher); ok { + if f, ok := cw.w.(http.Flusher); ok { f.Flush() } } diff --git a/vendor/github.com/gorilla/handlers/cors.go b/vendor/github.com/gorilla/handlers/cors.go index 0dcdffb3d32..8af9c096e5e 100644 --- a/vendor/github.com/gorilla/handlers/cors.go +++ b/vendor/github.com/gorilla/handlers/cors.go @@ -26,14 +26,14 @@ type cors struct { type OriginValidator func(string) bool var ( - defaultCorsOptionStatusCode = 200 - defaultCorsMethods = []string{"GET", "HEAD", "POST"} + defaultCorsOptionStatusCode = http.StatusOK + defaultCorsMethods = []string{http.MethodGet, http.MethodHead, http.MethodPost} defaultCorsHeaders = []string{"Accept", "Accept-Language", "Content-Language", "Origin"} - // (WebKit/Safari v9 sends the Origin header by default in AJAX requests) + // (WebKit/Safari v9 sends the Origin header by default in AJAX requests). ) const ( - corsOptionMethod string = "OPTIONS" + corsOptionMethod string = http.MethodOptions corsAllowOriginHeader string = "Access-Control-Allow-Origin" corsExposeHeadersHeader string = "Access-Control-Expose-Headers" corsMaxAgeHeader string = "Access-Control-Max-Age" @@ -101,10 +101,8 @@ func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !ch.isMatch(method, defaultCorsMethods) { w.Header().Set(corsAllowMethodsHeader, method) } - } else { - if len(ch.exposedHeaders) > 0 { - w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ",")) - } + } else if len(ch.exposedHeaders) > 0 { + w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ",")) } if ch.allowCredentials { @@ -141,22 +139,21 @@ func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) { // CORS provides Cross-Origin Resource Sharing middleware. // Example: // -// import ( -// "net/http" -// -// "github.com/gorilla/handlers" -// "github.com/gorilla/mux" -// ) +// import ( +// "net/http" // -// func main() { -// r := mux.NewRouter() -// r.HandleFunc("/users", UserEndpoint) -// r.HandleFunc("/projects", ProjectEndpoint) +// "github.com/gorilla/handlers" +// "github.com/gorilla/mux" +// ) // -// // Apply the CORS middleware to our top-level router, with the defaults. -// http.ListenAndServe(":8000", handlers.CORS()(r)) -// } +// func main() { +// r := mux.NewRouter() +// r.HandleFunc("/users", UserEndpoint) +// r.HandleFunc("/projects", ProjectEndpoint) // +// // Apply the CORS middleware to our top-level router, with the defaults. +// http.ListenAndServe(":8000", handlers.CORS()(r)) +// } func CORS(opts ...CORSOption) func(http.Handler) http.Handler { return func(h http.Handler) http.Handler { ch := parseCORSOptions(opts...) @@ -174,7 +171,7 @@ func parseCORSOptions(opts ...CORSOption) *cors { } for _, option := range opts { - option(ch) + _ = option(ch) //TODO: @bharat-rajani, return error to caller if not nil? } return ch diff --git a/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/gorilla/handlers/handlers.go index 0509482ad7a..9b92fce3333 100644 --- a/vendor/github.com/gorilla/handlers/handlers.go +++ b/vendor/github.com/gorilla/handlers/handlers.go @@ -35,7 +35,7 @@ func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } sort.Strings(allow) w.Header().Set("Allow", strings.Join(allow, ", ")) - if req.Method == "OPTIONS" { + if req.Method == http.MethodOptions { w.WriteHeader(http.StatusOK) } else { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) @@ -44,7 +44,7 @@ func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } // responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP -// status code and body size +// status code and body size. type responseLogger struct { w http.ResponseWriter status int @@ -97,7 +97,7 @@ func isContentType(h http.Header, contentType string) bool { // Only PUT, POST, and PATCH requests are considered. func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !(r.Method == "PUT" || r.Method == "POST" || r.Method == "PATCH") { + if !(r.Method == http.MethodPut || r.Method == http.MethodPost || r.Method == http.MethodPatch) { h.ServeHTTP(w, r) return } @@ -108,7 +108,10 @@ func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { return } } - http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", r.Header.Get("Content-Type"), contentTypes), http.StatusUnsupportedMediaType) + http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", + r.Header.Get("Content-Type"), + contentTypes), + http.StatusUnsupportedMediaType) }) } @@ -133,12 +136,12 @@ const ( // Form method takes precedence over header method. func HTTPMethodOverrideHandler(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { + if r.Method == http.MethodPost { om := r.FormValue(HTTPMethodOverrideFormKey) if om == "" { om = r.Header.Get(HTTPMethodOverrideHeader) } - if om == "PUT" || om == "PATCH" || om == "DELETE" { + if om == http.MethodPut || om == http.MethodPatch || om == http.MethodDelete { r.Method = om } } diff --git a/vendor/github.com/gorilla/handlers/logging.go b/vendor/github.com/gorilla/handlers/logging.go index 228465eba00..2badb6fbff8 100644 --- a/vendor/github.com/gorilla/handlers/logging.go +++ b/vendor/github.com/gorilla/handlers/logging.go @@ -18,7 +18,7 @@ import ( // Logging -// LogFormatterParams is the structure any formatter will be handed when time to log comes +// LogFormatterParams is the structure any formatter will be handed when time to log comes. type LogFormatterParams struct { Request *http.Request URL url.URL @@ -27,7 +27,7 @@ type LogFormatterParams struct { Size int } -// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler +// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler. type LogFormatter func(writer io.Writer, params LogFormatterParams) // loggingHandler is the http.Handler implementation for LoggingHandlerTo and its @@ -46,7 +46,10 @@ func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { h.handler.ServeHTTP(w, req) if req.MultipartForm != nil { - req.MultipartForm.RemoveAll() + err := req.MultipartForm.RemoveAll() + if err != nil { + return + } } params := LogFormatterParams{ @@ -76,7 +79,7 @@ const lowerhex = "0123456789abcdef" func appendQuoted(buf []byte, s string) []byte { var runeTmp [utf8.UTFMax]byte - for width := 0; len(s) > 0; s = s[width:] { + for width := 0; len(s) > 0; s = s[width:] { //nolint: wastedassign //TODO: why width starts from 0and reassigned as 1 r := rune(s[0]) width = 1 if r >= utf8.RuneSelf { @@ -191,7 +194,7 @@ func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int func writeLog(writer io.Writer, params LogFormatterParams) { buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size) buf = append(buf, '\n') - writer.Write(buf) + _, _ = writer.Write(buf) } // writeCombinedLog writes a log entry for req to w in Apache Combined Log Format. @@ -204,7 +207,7 @@ func writeCombinedLog(writer io.Writer, params LogFormatterParams) { buf = append(buf, `" "`...) buf = appendQuoted(buf, params.Request.UserAgent()) buf = append(buf, '"', '\n') - writer.Write(buf) + _, _ = writer.Write(buf) } // CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in @@ -212,7 +215,7 @@ func writeCombinedLog(writer io.Writer, params LogFormatterParams) { // // See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format. // -// LoggingHandler always sets the ident field of the log to - +// LoggingHandler always sets the ident field of the log to -. func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { return loggingHandler{out, h, writeCombinedLog} } @@ -226,19 +229,18 @@ func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { // // Example: // -// r := mux.NewRouter() -// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { -// w.Write([]byte("This is a catch-all route")) -// }) -// loggedRouter := handlers.LoggingHandler(os.Stdout, r) -// http.ListenAndServe(":1123", loggedRouter) -// +// r := mux.NewRouter() +// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("This is a catch-all route")) +// }) +// loggedRouter := handlers.LoggingHandler(os.Stdout, r) +// http.ListenAndServe(":1123", loggedRouter) func LoggingHandler(out io.Writer, h http.Handler) http.Handler { return loggingHandler{out, h, writeLog} } // CustomLoggingHandler provides a way to supply a custom log formatter -// while taking advantage of the mechanisms in this package +// while taking advantage of the mechanisms in this package. func CustomLoggingHandler(out io.Writer, h http.Handler, f LogFormatter) http.Handler { return loggingHandler{out, h, f} } diff --git a/vendor/github.com/gorilla/handlers/proxy_headers.go b/vendor/github.com/gorilla/handlers/proxy_headers.go index ed939dcef5d..281d753e95a 100644 --- a/vendor/github.com/gorilla/handlers/proxy_headers.go +++ b/vendor/github.com/gorilla/handlers/proxy_headers.go @@ -18,7 +18,7 @@ var ( var ( // RFC7239 defines a new "Forwarded: " header designed to replace the // existing use of X-Forwarded-* headers. - // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43 + // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43. forwarded = http.CanonicalHeaderKey("Forwarded") // Allows for a sub-match of the first value after 'for=' to the next // comma, semi-colon or space. The match is case-insensitive. @@ -67,7 +67,9 @@ func ProxyHeaders(h http.Handler) http.Handler { func getIP(r *http.Request) string { var addr string - if fwd := r.Header.Get(xForwardedFor); fwd != "" { + switch { + case r.Header.Get(xForwardedFor) != "": + fwd := r.Header.Get(xForwardedFor) // Only grab the first (client) address. Note that '192.168.0.1, // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after // the first may represent forwarding proxies earlier in the chain. @@ -76,17 +78,15 @@ func getIP(r *http.Request) string { s = len(fwd) } addr = fwd[:s] - } else if fwd := r.Header.Get(xRealIP); fwd != "" { - // X-Real-IP should only contain one IP address (the client making the - // request). - addr = fwd - } else if fwd := r.Header.Get(forwarded); fwd != "" { + case r.Header.Get(xRealIP) != "": + addr = r.Header.Get(xRealIP) + case r.Header.Get(forwarded) != "": // match should contain at least two elements if the protocol was // specified in the Forwarded header. The first element will always be // the 'for=' capture, which we ignore. In the case of multiple IP // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only // extract the first, which should be the client IP. - if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 { + if match := forRegex.FindStringSubmatch(r.Header.Get(forwarded)); len(match) > 1 { // IPv6 addresses in Forwarded headers are quoted-strings. We strip // these quotes. addr = strings.Trim(match[1], `"`) diff --git a/vendor/github.com/gorilla/handlers/recovery.go b/vendor/github.com/gorilla/handlers/recovery.go index 4c4c1d9c6ce..0d4f955ecbd 100644 --- a/vendor/github.com/gorilla/handlers/recovery.go +++ b/vendor/github.com/gorilla/handlers/recovery.go @@ -36,12 +36,12 @@ func parseRecoveryOptions(h http.Handler, opts ...RecoveryOption) http.Handler { // // Example: // -// r := mux.NewRouter() -// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { -// panic("Unexpected error!") -// }) +// r := mux.NewRouter() +// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// panic("Unexpected error!") +// }) // -// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r)) +// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r)) func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler { return func(h http.Handler) http.Handler { r := &recoveryHandler{handler: h} @@ -50,20 +50,22 @@ func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler { } // RecoveryLogger is a functional option to override -// the default logger +// the default logger. func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption { return func(h http.Handler) { - r := h.(*recoveryHandler) + r := h.(*recoveryHandler) //nolint:errcheck //TODO: + // @bharat-rajani should return type-assertion error but would break the API? r.logger = logger } } // PrintRecoveryStack is a functional option to enable // or disable printing stack traces on panic. -func PrintRecoveryStack(print bool) RecoveryOption { +func PrintRecoveryStack(shouldPrint bool) RecoveryOption { return func(h http.Handler) { - r := h.(*recoveryHandler) - r.printStack = print + r := h.(*recoveryHandler) //nolint:errcheck //TODO: + // @bharat-rajani should return type-assertion error but would break the API? + r.printStack = shouldPrint } } diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/common.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/common.go deleted file mode 100644 index 3e905cf1e32..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/common.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_logging - -import ( - "context" - "io" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// ErrorToCode function determines the error code of an error -// This makes using custom errors with grpc middleware easier -type ErrorToCode func(err error) codes.Code - -func DefaultErrorToCode(err error) codes.Code { - return status.Code(err) -} - -// Decider function defines rules for suppressing any interceptor logs -type Decider func(fullMethodName string, err error) bool - -// DefaultDeciderMethod is the default implementation of decider to see if you should log the call -// by default this if always true so all calls are logged -func DefaultDeciderMethod(fullMethodName string, err error) bool { - return true -} - -// ServerPayloadLoggingDecider is a user-provided function for deciding whether to log the server-side -// request/response payloads -type ServerPayloadLoggingDecider func(ctx context.Context, fullMethodName string, servingObject interface{}) bool - -// ClientPayloadLoggingDecider is a user-provided function for deciding whether to log the client-side -// request/response payloads -type ClientPayloadLoggingDecider func(ctx context.Context, fullMethodName string) bool - -// JsonPbMarshaller is a marshaller that serializes protobuf messages. -type JsonPbMarshaler interface { - Marshal(out io.Writer, pb proto.Message) error -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/doc.go deleted file mode 100644 index d8fcea081a6..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// -/* -grpc_logging is a "parent" package for gRPC logging middlewares. - -General functionality of all middleware - -The gRPC logging middleware populates request-scoped data to `grpc_ctxtags.Tags` that relate to the current gRPC call -(e.g. service and method names). - -Once the gRPC logging middleware has added the gRPC specific Tags to the ctx they will then be written with the logs -that are made using the `ctx_logrus` or `ctx_zap` loggers. - -All logging middleware will emit a final log statement. It is based on the error returned by the handler function, -the gRPC status code, an error (if any) and it will emit at a level controlled via `WithLevels`. - -This parent package - -This particular package is intended for use by other middleware, logging or otherwise. It contains interfaces that other -logging middlewares *could* share . This allows code to be shared between different implementations. - -Field names - -All field names of loggers follow the OpenTracing semantics definitions, with `grpc.` prefix if needed: -https://github.com/opentracing/specification/blob/master/semantic_conventions.md - -Implementations - -There are three implementations at the moment: logrus, zap and kit - -See relevant packages below. -*/ -package grpc_logging diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/doc.go deleted file mode 100644 index c447ec6b9e5..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// -/* -grpc_logsettable contains a thread-safe wrapper around grpc-logging -infrastructure. - -The go-grpc assumes that logger can be only configured once as the `SetLoggerV2` -method is: -```Not mutex-protected, should be called before any gRPC functions.``` - -This package allows to supply parent logger once ("before any grpc"), but -later change underlying implementation in thread-safe way when needed. - -It's in particular useful for testing, where each testcase might need its own -logger. -*/ -package grpc_logsettable diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/logsettable.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/logsettable.go deleted file mode 100644 index 9e403b2b2d0..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/logsettable.go +++ /dev/null @@ -1,99 +0,0 @@ -package grpc_logsettable - -import ( - "io/ioutil" - "sync" - - "google.golang.org/grpc/grpclog" -) - -// SettableLoggerV2 is thread-safe. -type SettableLoggerV2 interface { - grpclog.LoggerV2 - // Sets given logger as the underlying implementation. - Set(loggerv2 grpclog.LoggerV2) - // Sets `discard` logger as the underlying implementation. - Reset() -} - -// ReplaceGrpcLoggerV2 creates and configures SettableLoggerV2 as grpc logger. -func ReplaceGrpcLoggerV2() SettableLoggerV2 { - settable := &settableLoggerV2{} - settable.Reset() - grpclog.SetLoggerV2(settable) - return settable -} - -// SettableLoggerV2 implements SettableLoggerV2 -type settableLoggerV2 struct { - log grpclog.LoggerV2 - mu sync.RWMutex -} - -func (s *settableLoggerV2) Set(log grpclog.LoggerV2) { - s.mu.Lock() - defer s.mu.Unlock() - s.log = log -} - -func (s *settableLoggerV2) Reset() { - s.Set(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) -} - -func (s *settableLoggerV2) get() grpclog.LoggerV2 { - s.mu.RLock() - defer s.mu.RUnlock() - return s.log -} - -func (s *settableLoggerV2) Info(args ...interface{}) { - s.get().Info(args) -} - -func (s *settableLoggerV2) Infoln(args ...interface{}) { - s.get().Infoln(args) -} - -func (s *settableLoggerV2) Infof(format string, args ...interface{}) { - s.get().Infof(format, args) -} - -func (s *settableLoggerV2) Warning(args ...interface{}) { - s.get().Warning(args) -} - -func (s *settableLoggerV2) Warningln(args ...interface{}) { - s.get().Warningln(args) -} - -func (s *settableLoggerV2) Warningf(format string, args ...interface{}) { - s.get().Warningf(format, args) -} - -func (s *settableLoggerV2) Error(args ...interface{}) { - s.get().Error(args) -} - -func (s *settableLoggerV2) Errorln(args ...interface{}) { - s.get().Errorln(args) -} - -func (s *settableLoggerV2) Errorf(format string, args ...interface{}) { - s.get().Errorf(format, args) -} - -func (s *settableLoggerV2) Fatal(args ...interface{}) { - s.get().Fatal(args) -} - -func (s *settableLoggerV2) Fatalln(args ...interface{}) { - s.get().Fatalln(args) -} - -func (s *settableLoggerV2) Fatalf(format string, args ...interface{}) { - s.get().Fatalf(format, args) -} - -func (s *settableLoggerV2) V(l int) bool { - return s.get().V(l) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/client_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/client_interceptors.go deleted file mode 100644 index de80c1cc02e..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/client_interceptors.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_zap - -import ( - "context" - "path" - "time" - - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc" -) - -var ( - // ClientField is used in every client-side log statement made through grpc_zap. Can be overwritten before initialization. - ClientField = zap.String("span.kind", "client") -) - -// UnaryClientInterceptor returns a new unary client interceptor that optionally logs the execution of external gRPC calls. -func UnaryClientInterceptor(logger *zap.Logger, opts ...Option) grpc.UnaryClientInterceptor { - o := evaluateClientOpt(opts) - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - fields := newClientLoggerFields(ctx, method) - startTime := time.Now() - err := invoker(ctx, method, req, reply, cc, opts...) - newCtx := ctxzap.ToContext(ctx, logger.With(fields...)) - logFinalClientLine(newCtx, o, startTime, err, "finished client unary call") - return err - } -} - -// StreamClientInterceptor returns a new streaming client interceptor that optionally logs the execution of external gRPC calls. -func StreamClientInterceptor(logger *zap.Logger, opts ...Option) grpc.StreamClientInterceptor { - o := evaluateClientOpt(opts) - return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - fields := newClientLoggerFields(ctx, method) - startTime := time.Now() - clientStream, err := streamer(ctx, desc, cc, method, opts...) - newCtx := ctxzap.ToContext(ctx, logger.With(fields...)) - logFinalClientLine(newCtx, o, startTime, err, "finished client streaming call") - return clientStream, err - } -} - -func logFinalClientLine(ctx context.Context, o *options, startTime time.Time, err error, msg string) { - code := o.codeFunc(err) - level := o.levelFunc(code) - duration := o.durationFunc(time.Now().Sub(startTime)) - o.messageFunc(ctx, msg, level, code, err, duration) -} - -func newClientLoggerFields(ctx context.Context, fullMethodString string) []zapcore.Field { - service := path.Dir(fullMethodString)[1:] - method := path.Base(fullMethodString) - return []zapcore.Field{ - SystemField, - ClientField, - zap.String("grpc.service", service), - zap.String("grpc.method", method), - } -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/context.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/context.go deleted file mode 100644 index 56f6408a620..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/context.go +++ /dev/null @@ -1,21 +0,0 @@ -package grpc_zap - -import ( - "context" - - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// AddFields adds zap fields to the logger. -// Deprecated: should use the ctxzap.AddFields instead -func AddFields(ctx context.Context, fields ...zapcore.Field) { - ctxzap.AddFields(ctx, fields...) -} - -// Extract takes the call-scoped Logger from grpc_zap middleware. -// Deprecated: should use the ctxzap.Extract instead -func Extract(ctx context.Context) *zap.Logger { - return ctxzap.Extract(ctx) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/context.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/context.go deleted file mode 100644 index 1d8ae49a19b..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/context.go +++ /dev/null @@ -1,88 +0,0 @@ -package ctxzap - -import ( - "context" - - "github.com/grpc-ecosystem/go-grpc-middleware/tags" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -type ctxMarker struct{} - -type ctxLogger struct { - logger *zap.Logger - fields []zapcore.Field -} - -var ( - ctxMarkerKey = &ctxMarker{} - nullLogger = zap.NewNop() -) - -// AddFields adds zap fields to the logger. -func AddFields(ctx context.Context, fields ...zapcore.Field) { - l, ok := ctx.Value(ctxMarkerKey).(*ctxLogger) - if !ok || l == nil { - return - } - l.fields = append(l.fields, fields...) -} - -// Extract takes the call-scoped Logger from grpc_zap middleware. -// -// It always returns a Logger that has all the grpc_ctxtags updated. -func Extract(ctx context.Context) *zap.Logger { - l, ok := ctx.Value(ctxMarkerKey).(*ctxLogger) - if !ok || l == nil { - return nullLogger - } - // Add grpc_ctxtags tags metadata until now. - fields := TagsToFields(ctx) - // Add zap fields added until now. - fields = append(fields, l.fields...) - return l.logger.With(fields...) -} - -// TagsToFields transforms the Tags on the supplied context into zap fields. -func TagsToFields(ctx context.Context) []zapcore.Field { - fields := []zapcore.Field{} - tags := grpc_ctxtags.Extract(ctx) - for k, v := range tags.Values() { - fields = append(fields, zap.Any(k, v)) - } - return fields -} - -// ToContext adds the zap.Logger to the context for extraction later. -// Returning the new context that has been created. -func ToContext(ctx context.Context, logger *zap.Logger) context.Context { - l := &ctxLogger{ - logger: logger, - } - return context.WithValue(ctx, ctxMarkerKey, l) -} - -// Debug is equivalent to calling Debug on the zap.Logger in the context. -// It is a no-op if the context does not contain a zap.Logger. -func Debug(ctx context.Context, msg string, fields ...zap.Field) { - Extract(ctx).WithOptions(zap.AddCallerSkip(1)).Debug(msg, fields...) -} - -// Info is equivalent to calling Info on the zap.Logger in the context. -// It is a no-op if the context does not contain a zap.Logger. -func Info(ctx context.Context, msg string, fields ...zap.Field) { - Extract(ctx).WithOptions(zap.AddCallerSkip(1)).Info(msg, fields...) -} - -// Warn is equivalent to calling Warn on the zap.Logger in the context. -// It is a no-op if the context does not contain a zap.Logger. -func Warn(ctx context.Context, msg string, fields ...zap.Field) { - Extract(ctx).WithOptions(zap.AddCallerSkip(1)).Warn(msg, fields...) -} - -// Error is equivalent to calling Error on the zap.Logger in the context. -// It is a no-op if the context does not contain a zap.Logger. -func Error(ctx context.Context, msg string, fields ...zap.Field) { - Extract(ctx).WithOptions(zap.AddCallerSkip(1)).Error(msg, fields...) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/doc.go deleted file mode 100644 index 3591e3585ea..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -/* -`ctxzap` is a ctxlogger that is backed by Zap - -It accepts a user-configured `zap.Logger` that will be used for logging. The same `zap.Logger` will -be populated into the `context.Context` passed into gRPC handler code. - -You can use `ctxzap.Extract` to log into a request-scoped `zap.Logger` instance in your handler code. - -As `ctxzap.Extract` will iterate all tags on from `grpc_ctxtags` it is therefore expensive so it is advised that you -extract once at the start of the function from the context and reuse it for the remainder of the function (see examples). - -Please see examples and tests for examples of use. -*/ -package ctxzap diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/doc.go deleted file mode 100644 index ffa6b5c3194..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/doc.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -`grpc_zap` is a gRPC logging middleware backed by ZAP loggers - -It accepts a user-configured `zap.Logger` that will be used for logging completed gRPC calls. The same `zap.Logger` will -be used for logging completed gRPC calls, and be populated into the `context.Context` passed into gRPC handler code. - -On calling `StreamServerInterceptor` or `UnaryServerInterceptor` this logging middleware will add gRPC call information -to the ctx so that it will be present on subsequent use of the `ctx_zap` logger. - -If a deadline is present on the gRPC request the grpc.request.deadline tag is populated when the request begins. grpc.request.deadline -is a string representing the time (RFC3339) when the current call will expire. - -This package also implements request and response *payload* logging, both for server-side and client-side. These will be -logged as structured `jsonpb` fields for every message received/sent (both unary and streaming). For that please use -`Payload*Interceptor` functions for that. Please note that the user-provided function that determines whether to log -the full request/response payload needs to be written with care, this can significantly slow down gRPC. - -ZAP can also be made as a backend for gRPC library internals. For that use `ReplaceGrpcLoggerV2`. - - -*Server Interceptor* -Below is a JSON formatted example of a log that would be logged by the server interceptor: - - { - "level": "info", // string zap log levels - "msg": "finished unary call", // string log message - - "grpc.code": "OK", // string grpc status code - "grpc.method": "Ping", // string method name - "grpc.service": "mwitkow.testproto.TestService", // string full name of the called service - "grpc.start_time": "2006-01-02T15:04:05Z07:00", // string RFC3339 representation of the start time - "grpc.request.deadline": "2006-01-02T15:04:05Z07:00", // string RFC3339 deadline of the current request if supplied - "grpc.request.value": "something", // string value on the request - "grpc.time_ms": 1.345, // float32 run time of the call in ms - - "peer.address": { - "IP": "127.0.0.1", // string IP address of calling party - "Port": 60216, // int port call is coming in on - "Zone": "" // string peer zone for caller - }, - "span.kind": "server", // string client | server - "system": "grpc" // string - - "custom_field": "custom_value", // string user defined field - "custom_tags.int": 1337, // int user defined tag on the ctx - "custom_tags.string": "something", // string user defined tag on the ctx - } - -*Payload Interceptor* -Below is a JSON formatted example of a log that would be logged by the payload interceptor: - - { - "level": "info", // string zap log levels - "msg": "client request payload logged as grpc.request.content", // string log message - - "grpc.request.content": { // object content of RPC request - "msg" : { // object ZAP specific inner object - "value": "something", // string defined by caller - "sleepTimeMs": 9999 // int defined by caller - } - }, - "grpc.method": "Ping", // string method being called - "grpc.service": "mwitkow.testproto.TestService", // string service being called - - "span.kind": "client", // string client | server - "system": "grpc" // string - } - -Note - due to implementation ZAP differs from Logrus in the "grpc.request.content" object by having an inner "msg" object. - - -Please see examples and tests for examples of use. -Please see settable_test.go for canonical integration through "zaptest" with golang testing infrastructure. -*/ -package grpc_zap diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/grpclogger.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/grpclogger.go deleted file mode 100644 index 4cdee602048..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/grpclogger.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_zap - -import ( - "fmt" - - grpc_logsettable "github.com/grpc-ecosystem/go-grpc-middleware/logging/settable" - "go.uber.org/zap" - "google.golang.org/grpc/grpclog" -) - -// ReplaceGrpcLogger sets the given zap.Logger as a gRPC-level logger. -// This should be called *before* any other initialization, preferably from init() functions. -// Deprecated: use ReplaceGrpcLoggerV2. -func ReplaceGrpcLogger(logger *zap.Logger) { - zgl := &zapGrpcLogger{logger.With(SystemField, zap.Bool("grpc_log", true))} - grpclog.SetLogger(zgl) -} - -type zapGrpcLogger struct { - logger *zap.Logger -} - -func (l *zapGrpcLogger) Fatal(args ...interface{}) { - l.logger.Fatal(fmt.Sprint(args...)) -} - -func (l *zapGrpcLogger) Fatalf(format string, args ...interface{}) { - l.logger.Fatal(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLogger) Fatalln(args ...interface{}) { - l.logger.Fatal(fmt.Sprint(args...)) -} - -func (l *zapGrpcLogger) Print(args ...interface{}) { - l.logger.Info(fmt.Sprint(args...)) -} - -func (l *zapGrpcLogger) Printf(format string, args ...interface{}) { - l.logger.Info(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLogger) Println(args ...interface{}) { - l.logger.Info(fmt.Sprint(args...)) -} - -// ReplaceGrpcLoggerV2 replaces the grpclog.LoggerV2 with the provided logger. -// It should be called before any gRPC functions. Logging verbosity defaults to info level. -// To adjust gRPC logging verbosity, see ReplaceGrpcLoggerV2WithVerbosity. -func ReplaceGrpcLoggerV2(logger *zap.Logger) { - ReplaceGrpcLoggerV2WithVerbosity(logger, 0) -} - -// ReplaceGrpcLoggerV2WithVerbosity replaces the grpclog.Logger with the provided logger and verbosity. -// It should be called before any gRPC functions. -// verbosity correlates to grpclogs verbosity levels. A higher verbosity value results in less logging. -func ReplaceGrpcLoggerV2WithVerbosity(logger *zap.Logger, verbosity int) { - zgl := &zapGrpcLoggerV2{ - logger: logger.With(SystemField, zap.Bool("grpc_log", true)).WithOptions(zap.AddCallerSkip(2)), - verbosity: verbosity, - } - grpclog.SetLoggerV2(zgl) -} - -// SetGrpcLoggerV2 replaces the grpc_log.Logger with the provided logger. -// It can be used even when grpc infrastructure was initialized. -func SetGrpcLoggerV2(settable grpc_logsettable.SettableLoggerV2, logger *zap.Logger) { - SetGrpcLoggerV2WithVerbosity(settable, logger, 0) -} - -// SetGrpcLoggerV2WithVerbosity replaces the grpc_.LoggerV2 with the provided logger and verbosity. -// It can be used even when grpc infrastructure was initialized. -func SetGrpcLoggerV2WithVerbosity(settable grpc_logsettable.SettableLoggerV2, logger *zap.Logger, verbosity int) { - zgl := &zapGrpcLoggerV2{ - logger: logger.With(SystemField, zap.Bool("grpc_log", true)), - verbosity: verbosity, - } - settable.Set(zgl) -} - -type zapGrpcLoggerV2 struct { - logger *zap.Logger - verbosity int -} - -func (l *zapGrpcLoggerV2) Info(args ...interface{}) { - l.logger.Info(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Infoln(args ...interface{}) { - l.logger.Info(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Infof(format string, args ...interface{}) { - l.logger.Info(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLoggerV2) Warning(args ...interface{}) { - l.logger.Warn(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Warningln(args ...interface{}) { - l.logger.Warn(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Warningf(format string, args ...interface{}) { - l.logger.Warn(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLoggerV2) Error(args ...interface{}) { - l.logger.Error(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Errorln(args ...interface{}) { - l.logger.Error(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Errorf(format string, args ...interface{}) { - l.logger.Error(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLoggerV2) Fatal(args ...interface{}) { - l.logger.Fatal(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Fatalln(args ...interface{}) { - l.logger.Fatal(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Fatalf(format string, args ...interface{}) { - l.logger.Fatal(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLoggerV2) V(level int) bool { - // Check whether the verbosity of the current log ('level') is within the specified threshold ('l.verbosity'). - // As in https://github.com/grpc/grpc-go/blob/41e044e1c82fcf6a5801d6cbd7ecf952505eecb1/grpclog/loggerv2.go#L199-L201. - return level <= l.verbosity -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/options.go deleted file mode 100644 index b59db9ad69e..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/options.go +++ /dev/null @@ -1,217 +0,0 @@ -package grpc_zap - -import ( - "context" - "time" - - grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/logging" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc/codes" -) - -var ( - defaultOptions = &options{ - levelFunc: DefaultCodeToLevel, - shouldLog: grpc_logging.DefaultDeciderMethod, - codeFunc: grpc_logging.DefaultErrorToCode, - durationFunc: DefaultDurationToField, - messageFunc: DefaultMessageProducer, - timestampFormat: time.RFC3339, - } -) - -type options struct { - levelFunc CodeToLevel - shouldLog grpc_logging.Decider - codeFunc grpc_logging.ErrorToCode - durationFunc DurationToField - messageFunc MessageProducer - timestampFormat string -} - -func evaluateServerOpt(opts []Option) *options { - optCopy := &options{} - *optCopy = *defaultOptions - optCopy.levelFunc = DefaultCodeToLevel - for _, o := range opts { - o(optCopy) - } - return optCopy -} - -func evaluateClientOpt(opts []Option) *options { - optCopy := &options{} - *optCopy = *defaultOptions - optCopy.levelFunc = DefaultClientCodeToLevel - for _, o := range opts { - o(optCopy) - } - return optCopy -} - -type Option func(*options) - -// CodeToLevel function defines the mapping between gRPC return codes and interceptor log level. -type CodeToLevel func(code codes.Code) zapcore.Level - -// DurationToField function defines how to produce duration fields for logging -type DurationToField func(duration time.Duration) zapcore.Field - -// WithDecider customizes the function for deciding if the gRPC interceptor logs should log. -func WithDecider(f grpc_logging.Decider) Option { - return func(o *options) { - o.shouldLog = f - } -} - -// WithLevels customizes the function for mapping gRPC return codes and interceptor log level statements. -func WithLevels(f CodeToLevel) Option { - return func(o *options) { - o.levelFunc = f - } -} - -// WithCodes customizes the function for mapping errors to error codes. -func WithCodes(f grpc_logging.ErrorToCode) Option { - return func(o *options) { - o.codeFunc = f - } -} - -// WithDurationField customizes the function for mapping request durations to Zap fields. -func WithDurationField(f DurationToField) Option { - return func(o *options) { - o.durationFunc = f - } -} - -// WithMessageProducer customizes the function for message formation. -func WithMessageProducer(f MessageProducer) Option { - return func(o *options) { - o.messageFunc = f - } -} - -// WithTimestampFormat customizes the timestamps emitted in the log fields. -func WithTimestampFormat(format string) Option { - return func(o *options) { - o.timestampFormat = format - } -} - -// DefaultCodeToLevel is the default implementation of gRPC return codes and interceptor log level for server side. -func DefaultCodeToLevel(code codes.Code) zapcore.Level { - switch code { - case codes.OK: - return zap.InfoLevel - case codes.Canceled: - return zap.InfoLevel - case codes.Unknown: - return zap.ErrorLevel - case codes.InvalidArgument: - return zap.InfoLevel - case codes.DeadlineExceeded: - return zap.WarnLevel - case codes.NotFound: - return zap.InfoLevel - case codes.AlreadyExists: - return zap.InfoLevel - case codes.PermissionDenied: - return zap.WarnLevel - case codes.Unauthenticated: - return zap.InfoLevel // unauthenticated requests can happen - case codes.ResourceExhausted: - return zap.WarnLevel - case codes.FailedPrecondition: - return zap.WarnLevel - case codes.Aborted: - return zap.WarnLevel - case codes.OutOfRange: - return zap.WarnLevel - case codes.Unimplemented: - return zap.ErrorLevel - case codes.Internal: - return zap.ErrorLevel - case codes.Unavailable: - return zap.WarnLevel - case codes.DataLoss: - return zap.ErrorLevel - default: - return zap.ErrorLevel - } -} - -// DefaultClientCodeToLevel is the default implementation of gRPC return codes to log levels for client side. -func DefaultClientCodeToLevel(code codes.Code) zapcore.Level { - switch code { - case codes.OK: - return zap.DebugLevel - case codes.Canceled: - return zap.DebugLevel - case codes.Unknown: - return zap.InfoLevel - case codes.InvalidArgument: - return zap.DebugLevel - case codes.DeadlineExceeded: - return zap.InfoLevel - case codes.NotFound: - return zap.DebugLevel - case codes.AlreadyExists: - return zap.DebugLevel - case codes.PermissionDenied: - return zap.InfoLevel - case codes.Unauthenticated: - return zap.InfoLevel // unauthenticated requests can happen - case codes.ResourceExhausted: - return zap.DebugLevel - case codes.FailedPrecondition: - return zap.DebugLevel - case codes.Aborted: - return zap.DebugLevel - case codes.OutOfRange: - return zap.DebugLevel - case codes.Unimplemented: - return zap.WarnLevel - case codes.Internal: - return zap.WarnLevel - case codes.Unavailable: - return zap.WarnLevel - case codes.DataLoss: - return zap.WarnLevel - default: - return zap.InfoLevel - } -} - -// DefaultDurationToField is the default implementation of converting request duration to a Zap field. -var DefaultDurationToField = DurationToTimeMillisField - -// DurationToTimeMillisField converts the duration to milliseconds and uses the key `grpc.time_ms`. -func DurationToTimeMillisField(duration time.Duration) zapcore.Field { - return zap.Float32("grpc.time_ms", durationToMilliseconds(duration)) -} - -// DurationToDurationField uses a Duration field to log the request duration -// and leaves it up to Zap's encoder settings to determine how that is output. -func DurationToDurationField(duration time.Duration) zapcore.Field { - return zap.Duration("grpc.duration", duration) -} - -func durationToMilliseconds(duration time.Duration) float32 { - return float32(duration.Nanoseconds()/1000) / 1000 -} - -// MessageProducer produces a user defined log message -type MessageProducer func(ctx context.Context, msg string, level zapcore.Level, code codes.Code, err error, duration zapcore.Field) - -// DefaultMessageProducer writes the default message -func DefaultMessageProducer(ctx context.Context, msg string, level zapcore.Level, code codes.Code, err error, duration zapcore.Field) { - // re-extract logger from newCtx, as it may have extra fields that changed in the holder. - ctxzap.Extract(ctx).Check(level, msg).Write( - zap.Error(err), - zap.String("grpc.code", code.String()), - duration, - ) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/payload_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/payload_interceptors.go deleted file mode 100644 index 329019b1722..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/payload_interceptors.go +++ /dev/null @@ -1,150 +0,0 @@ -package grpc_zap - -import ( - "bytes" - "context" - "fmt" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/go-grpc-middleware/logging" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc" -) - -var ( - // JsonPbMarshaller is the marshaller used for serializing protobuf messages. - // If needed, this variable can be reassigned with a different marshaller with the same Marshal() signature. - JsonPbMarshaller grpc_logging.JsonPbMarshaler = &jsonpb.Marshaler{} -) - -// PayloadUnaryServerInterceptor returns a new unary server interceptors that logs the payloads of requests. -// -// This *only* works when placed *after* the `grpc_zap.UnaryServerInterceptor`. However, the logging can be done to a -// separate instance of the logger. -func PayloadUnaryServerInterceptor(logger *zap.Logger, decider grpc_logging.ServerPayloadLoggingDecider) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - if !decider(ctx, info.FullMethod, info.Server) { - return handler(ctx, req) - } - // Use the provided zap.Logger for logging but use the fields from context. - logEntry := logger.With(append(serverCallFields(info.FullMethod), ctxzap.TagsToFields(ctx)...)...) - logProtoMessageAsJson(logEntry, req, "grpc.request.content", "server request payload logged as grpc.request.content field") - resp, err := handler(ctx, req) - if err == nil { - logProtoMessageAsJson(logEntry, resp, "grpc.response.content", "server response payload logged as grpc.response.content field") - } - return resp, err - } -} - -// PayloadStreamServerInterceptor returns a new server server interceptors that logs the payloads of requests. -// -// This *only* works when placed *after* the `grpc_zap.StreamServerInterceptor`. However, the logging can be done to a -// separate instance of the logger. -func PayloadStreamServerInterceptor(logger *zap.Logger, decider grpc_logging.ServerPayloadLoggingDecider) grpc.StreamServerInterceptor { - return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - if !decider(stream.Context(), info.FullMethod, srv) { - return handler(srv, stream) - } - logEntry := logger.With(append(serverCallFields(info.FullMethod), ctxzap.TagsToFields(stream.Context())...)...) - newStream := &loggingServerStream{ServerStream: stream, logger: logEntry} - return handler(srv, newStream) - } -} - -// PayloadUnaryClientInterceptor returns a new unary client interceptor that logs the payloads of requests and responses. -func PayloadUnaryClientInterceptor(logger *zap.Logger, decider grpc_logging.ClientPayloadLoggingDecider) grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - if !decider(ctx, method) { - return invoker(ctx, method, req, reply, cc, opts...) - } - logEntry := logger.With(newClientLoggerFields(ctx, method)...) - logProtoMessageAsJson(logEntry, req, "grpc.request.content", "client request payload logged as grpc.request.content") - err := invoker(ctx, method, req, reply, cc, opts...) - if err == nil { - logProtoMessageAsJson(logEntry, reply, "grpc.response.content", "client response payload logged as grpc.response.content") - } - return err - } -} - -// PayloadStreamClientInterceptor returns a new streaming client interceptor that logs the payloads of requests and responses. -func PayloadStreamClientInterceptor(logger *zap.Logger, decider grpc_logging.ClientPayloadLoggingDecider) grpc.StreamClientInterceptor { - return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - if !decider(ctx, method) { - return streamer(ctx, desc, cc, method, opts...) - } - logEntry := logger.With(newClientLoggerFields(ctx, method)...) - clientStream, err := streamer(ctx, desc, cc, method, opts...) - newStream := &loggingClientStream{ClientStream: clientStream, logger: logEntry} - return newStream, err - } -} - -type loggingClientStream struct { - grpc.ClientStream - logger *zap.Logger -} - -func (l *loggingClientStream) SendMsg(m interface{}) error { - err := l.ClientStream.SendMsg(m) - if err == nil { - logProtoMessageAsJson(l.logger, m, "grpc.request.content", "server request payload logged as grpc.request.content field") - } - return err -} - -func (l *loggingClientStream) RecvMsg(m interface{}) error { - err := l.ClientStream.RecvMsg(m) - if err == nil { - logProtoMessageAsJson(l.logger, m, "grpc.response.content", "server response payload logged as grpc.response.content field") - } - return err -} - -type loggingServerStream struct { - grpc.ServerStream - logger *zap.Logger -} - -func (l *loggingServerStream) SendMsg(m interface{}) error { - err := l.ServerStream.SendMsg(m) - if err == nil { - logProtoMessageAsJson(l.logger, m, "grpc.response.content", "server response payload logged as grpc.response.content field") - } - return err -} - -func (l *loggingServerStream) RecvMsg(m interface{}) error { - err := l.ServerStream.RecvMsg(m) - if err == nil { - logProtoMessageAsJson(l.logger, m, "grpc.request.content", "server request payload logged as grpc.request.content field") - } - return err -} - -func logProtoMessageAsJson(logger *zap.Logger, pbMsg interface{}, key string, msg string) { - if p, ok := pbMsg.(proto.Message); ok { - logger.Check(zapcore.InfoLevel, msg).Write(zap.Object(key, &jsonpbObjectMarshaler{pb: p})) - } -} - -type jsonpbObjectMarshaler struct { - pb proto.Message -} - -func (j *jsonpbObjectMarshaler) MarshalLogObject(e zapcore.ObjectEncoder) error { - // ZAP jsonEncoder deals with AddReflect by using json.MarshalObject. The same thing applies for consoleEncoder. - return e.AddReflected("msg", j) -} - -func (j *jsonpbObjectMarshaler) MarshalJSON() ([]byte, error) { - b := &bytes.Buffer{} - if err := JsonPbMarshaller.Marshal(b, j.pb); err != nil { - return nil, fmt.Errorf("jsonpb serializer failed: %v", err) - } - return b.Bytes(), nil -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/server_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/server_interceptors.go deleted file mode 100644 index 1db035897eb..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/server_interceptors.go +++ /dev/null @@ -1,85 +0,0 @@ -package grpc_zap - -import ( - "context" - "path" - "time" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc" -) - -var ( - // SystemField is used in every log statement made through grpc_zap. Can be overwritten before any initialization code. - SystemField = zap.String("system", "grpc") - - // ServerField is used in every server-side log statement made through grpc_zap.Can be overwritten before initialization. - ServerField = zap.String("span.kind", "server") -) - -// UnaryServerInterceptor returns a new unary server interceptors that adds zap.Logger to the context. -func UnaryServerInterceptor(logger *zap.Logger, opts ...Option) grpc.UnaryServerInterceptor { - o := evaluateServerOpt(opts) - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - startTime := time.Now() - - newCtx := newLoggerForCall(ctx, logger, info.FullMethod, startTime, o.timestampFormat) - - resp, err := handler(newCtx, req) - if !o.shouldLog(info.FullMethod, err) { - return resp, err - } - code := o.codeFunc(err) - level := o.levelFunc(code) - duration := o.durationFunc(time.Since(startTime)) - - o.messageFunc(newCtx, "finished unary call with code "+code.String(), level, code, err, duration) - return resp, err - } -} - -// StreamServerInterceptor returns a new streaming server interceptor that adds zap.Logger to the context. -func StreamServerInterceptor(logger *zap.Logger, opts ...Option) grpc.StreamServerInterceptor { - o := evaluateServerOpt(opts) - return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - startTime := time.Now() - newCtx := newLoggerForCall(stream.Context(), logger, info.FullMethod, startTime, o.timestampFormat) - wrapped := grpc_middleware.WrapServerStream(stream) - wrapped.WrappedContext = newCtx - - err := handler(srv, wrapped) - if !o.shouldLog(info.FullMethod, err) { - return err - } - code := o.codeFunc(err) - level := o.levelFunc(code) - duration := o.durationFunc(time.Since(startTime)) - - o.messageFunc(newCtx, "finished streaming call with code "+code.String(), level, code, err, duration) - return err - } -} - -func serverCallFields(fullMethodString string) []zapcore.Field { - service := path.Dir(fullMethodString)[1:] - method := path.Base(fullMethodString) - return []zapcore.Field{ - SystemField, - ServerField, - zap.String("grpc.service", service), - zap.String("grpc.method", method), - } -} - -func newLoggerForCall(ctx context.Context, logger *zap.Logger, fullMethodString string, start time.Time, timestampFormat string) context.Context { - var f []zapcore.Field - f = append(f, zap.String("grpc.start_time", start.Format(timestampFormat))) - if d, ok := ctx.Deadline(); ok { - f = append(f, zap.String("grpc.request.deadline", d.Format(timestampFormat))) - } - callLog := logger.With(append(f, serverCallFields(fullMethodString)...)...) - return ctxzap.ToContext(ctx, callLog) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go deleted file mode 100644 index ad35f09a87f..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_retry - -import ( - "time" - - "github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils" -) - -// BackoffLinear is very simple: it waits for a fixed period of time between calls. -func BackoffLinear(waitBetween time.Duration) BackoffFunc { - return func(attempt uint) time.Duration { - return waitBetween - } -} - -// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). -// -// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. -func BackoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) BackoffFunc { - return func(attempt uint) time.Duration { - return backoffutils.JitterUp(waitBetween, jitterFraction) - } -} - -// BackoffExponential produces increasing intervals for each attempt. -// -// The scalar is multiplied times 2 raised to the current attempt. So the first -// retry with a scalar of 100ms is 100ms, while the 5th attempt would be 1.6s. -func BackoffExponential(scalar time.Duration) BackoffFunc { - return func(attempt uint) time.Duration { - return scalar * time.Duration(backoffutils.ExponentBase2(attempt)) - } -} - -// BackoffExponentialWithJitter creates an exponential backoff like -// BackoffExponential does, but adds jitter. -func BackoffExponentialWithJitter(scalar time.Duration, jitterFraction float64) BackoffFunc { - return func(attempt uint) time.Duration { - return backoffutils.JitterUp(scalar*time.Duration(backoffutils.ExponentBase2(attempt)), jitterFraction) - } -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go deleted file mode 100644 index 0da1658bbe8..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go +++ /dev/null @@ -1,78 +0,0 @@ -package grpc_ctxtags - -import ( - "context" -) - -type ctxMarker struct{} - -var ( - // ctxMarkerKey is the Context value marker used by *all* logging middleware. - // The logging middleware object must interf - ctxMarkerKey = &ctxMarker{} - // NoopTags is a trivial, minimum overhead implementation of Tags for which all operations are no-ops. - NoopTags = &noopTags{} -) - -// Tags is the interface used for storing request tags between Context calls. -// The default implementation is *not* thread safe, and should be handled only in the context of the request. -type Tags interface { - // Set sets the given key in the metadata tags. - Set(key string, value interface{}) Tags - // Has checks if the given key exists. - Has(key string) bool - // Values returns a map of key to values. - // Do not modify the underlying map, please use Set instead. - Values() map[string]interface{} -} - -type mapTags struct { - values map[string]interface{} -} - -func (t *mapTags) Set(key string, value interface{}) Tags { - t.values[key] = value - return t -} - -func (t *mapTags) Has(key string) bool { - _, ok := t.values[key] - return ok -} - -func (t *mapTags) Values() map[string]interface{} { - return t.values -} - -type noopTags struct{} - -func (t *noopTags) Set(key string, value interface{}) Tags { - return t -} - -func (t *noopTags) Has(key string) bool { - return false -} - -func (t *noopTags) Values() map[string]interface{} { - return nil -} - -// Extracts returns a pre-existing Tags object in the Context. -// If the context wasn't set in a tag interceptor, a no-op Tag storage is returned that will *not* be propagated in context. -func Extract(ctx context.Context) Tags { - t, ok := ctx.Value(ctxMarkerKey).(Tags) - if !ok { - return NoopTags - } - - return t -} - -func SetInContext(ctx context.Context, tags Tags) context.Context { - return context.WithValue(ctx, ctxMarkerKey, tags) -} - -func NewTags() Tags { - return &mapTags{values: make(map[string]interface{})} -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go deleted file mode 100644 index 960638d0fa3..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -`grpc_ctxtags` adds a Tag object to the context that can be used by other middleware to add context about a request. - -Request Context Tags - -Tags describe information about the request, and can be set and used by other middleware, or handlers. Tags are used -for logging and tracing of requests. Tags are populated both upwards, *and* downwards in the interceptor-handler stack. - -You can automatically extract tags (in `grpc.request.`) from request payloads. - -For unary and server-streaming methods, pass in the `WithFieldExtractor` option. For client-streams and bidirectional-streams, you can -use `WithFieldExtractorForInitialReq` which will extract the tags from the first message passed from client to server. -Note the tags will not be modified for subsequent requests, so this option only makes sense when the initial message -establishes the meta-data for the stream. - -If a user doesn't use the interceptors that initialize the `Tags` object, all operations following from an `Extract(ctx)` -will be no-ops. This is to ensure that code doesn't panic if the interceptors weren't used. - -Tags fields are typed, and shallow and should follow the OpenTracing semantics convention: -https://github.com/opentracing/specification/blob/master/semantic_conventions.md -*/ -package grpc_ctxtags diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go deleted file mode 100644 index a4073ab49b2..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_ctxtags - -import ( - "reflect" -) - -// RequestFieldExtractorFunc is a user-provided function that extracts field information from a gRPC request. -// It is called from tags middleware on arrival of unary request or a server-stream request. -// Keys and values will be added to the context tags of the request. If there are no fields, you should return a nil. -type RequestFieldExtractorFunc func(fullMethod string, req interface{}) map[string]interface{} - -type requestFieldsExtractor interface { - // ExtractRequestFields is a method declared on a Protobuf message that extracts fields from the interface. - // The values from the extracted fields should be set in the appendToMap, in order to avoid allocations. - ExtractRequestFields(appendToMap map[string]interface{}) -} - -// CodeGenRequestFieldExtractor is a function that relies on code-generated functions that export log fields from requests. -// These are usually coming from a protoc-plugin that generates additional information based on custom field options. -func CodeGenRequestFieldExtractor(fullMethod string, req interface{}) map[string]interface{} { - if ext, ok := req.(requestFieldsExtractor); ok { - retMap := make(map[string]interface{}) - ext.ExtractRequestFields(retMap) - if len(retMap) == 0 { - return nil - } - return retMap - } - return nil -} - -// TagBasedRequestFieldExtractor is a function that relies on Go struct tags to export log fields from requests. -// These are usually coming from a protoc-plugin, such as Gogo protobuf. -// -// message Metadata { -// repeated string tags = 1 [ (gogoproto.moretags) = "log_field:\"meta_tags\"" ]; -// } -// -// The tagName is configurable using the tagName variable. Here it would be "log_field". -func TagBasedRequestFieldExtractor(tagName string) RequestFieldExtractorFunc { - return func(fullMethod string, req interface{}) map[string]interface{} { - retMap := make(map[string]interface{}) - reflectMessageTags(req, retMap, tagName) - if len(retMap) == 0 { - return nil - } - return retMap - } -} - -func reflectMessageTags(msg interface{}, existingMap map[string]interface{}, tagName string) { - v := reflect.ValueOf(msg) - // Only deal with pointers to structs. - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { - return - } - // Deref the pointer get to the struct. - v = v.Elem() - t := v.Type() - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - kind := field.Kind() - // Only recurse down direct pointers, which should only be to nested structs. - if (kind == reflect.Ptr || kind == reflect.Interface) && field.CanInterface() { - reflectMessageTags(field.Interface(), existingMap, tagName) - } - // In case of arrays/slices (repeated fields) go down to the concrete type. - if kind == reflect.Array || kind == reflect.Slice { - if field.Len() == 0 { - continue - } - kind = field.Index(0).Kind() - } - // Only be interested in - if (kind >= reflect.Bool && kind <= reflect.Float64) || kind == reflect.String { - if tag := t.Field(i).Tag.Get(tagName); tag != "" { - existingMap[tag] = field.Interface() - } - } - } - return -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go deleted file mode 100644 index a7ced60f5b9..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_ctxtags - -import ( - "context" - - "google.golang.org/grpc" - "google.golang.org/grpc/peer" - - "github.com/grpc-ecosystem/go-grpc-middleware" -) - -// UnaryServerInterceptor returns a new unary server interceptors that sets the values for request tags. -func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { - o := evaluateOptions(opts) - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - newCtx := newTagsForCtx(ctx) - if o.requestFieldsFunc != nil { - setRequestFieldTags(newCtx, o.requestFieldsFunc, info.FullMethod, req) - } - return handler(newCtx, req) - } -} - -// StreamServerInterceptor returns a new streaming server interceptor that sets the values for request tags. -func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { - o := evaluateOptions(opts) - return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - newCtx := newTagsForCtx(stream.Context()) - if o.requestFieldsFunc == nil { - // Short-circuit, don't do the expensive bit of allocating a wrappedStream. - wrappedStream := grpc_middleware.WrapServerStream(stream) - wrappedStream.WrappedContext = newCtx - return handler(srv, wrappedStream) - } - wrapped := &wrappedStream{stream, info, o, newCtx, true} - err := handler(srv, wrapped) - return err - } -} - -// wrappedStream is a thin wrapper around grpc.ServerStream that allows modifying context and extracts log fields from the initial message. -type wrappedStream struct { - grpc.ServerStream - info *grpc.StreamServerInfo - opts *options - // WrappedContext is the wrapper's own Context. You can assign it. - WrappedContext context.Context - initial bool -} - -// Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context() -func (w *wrappedStream) Context() context.Context { - return w.WrappedContext -} - -func (w *wrappedStream) RecvMsg(m interface{}) error { - err := w.ServerStream.RecvMsg(m) - // We only do log fields extraction on the single-request of a server-side stream. - if !w.info.IsClientStream || w.opts.requestFieldsFromInitial && w.initial { - w.initial = false - - setRequestFieldTags(w.Context(), w.opts.requestFieldsFunc, w.info.FullMethod, m) - } - return err -} - -func newTagsForCtx(ctx context.Context) context.Context { - t := NewTags() - if peer, ok := peer.FromContext(ctx); ok { - t.Set("peer.address", peer.Addr.String()) - } - return SetInContext(ctx, t) -} - -func setRequestFieldTags(ctx context.Context, f RequestFieldExtractorFunc, fullMethodName string, req interface{}) { - if valMap := f(fullMethodName, req); valMap != nil { - t := Extract(ctx) - for k, v := range valMap { - t.Set("grpc.request."+k, v) - } - } -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go deleted file mode 100644 index 952775f88d4..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_ctxtags - -var ( - defaultOptions = &options{ - requestFieldsFunc: nil, - } -) - -type options struct { - requestFieldsFunc RequestFieldExtractorFunc - requestFieldsFromInitial bool -} - -func evaluateOptions(opts []Option) *options { - optCopy := &options{} - *optCopy = *defaultOptions - for _, o := range opts { - o(optCopy) - } - return optCopy -} - -type Option func(*options) - -// WithFieldExtractor customizes the function for extracting log fields from protobuf messages, for -// unary and server-streamed methods only. -func WithFieldExtractor(f RequestFieldExtractorFunc) Option { - return func(o *options) { - o.requestFieldsFunc = f - } -} - -// WithFieldExtractorForInitialReq customizes the function for extracting log fields from protobuf messages, -// for all unary and streaming methods. For client-streams and bidirectional-streams, the tags will be -// extracted from the first message from the client. -func WithFieldExtractorForInitialReq(f RequestFieldExtractorFunc) Option { - return func(o *options) { - o.requestFieldsFunc = f - o.requestFieldsFromInitial = true - } -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go deleted file mode 100644 index 4e69a6305aa..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -/* -Backoff Helper Utilities - -Implements common backoff features. -*/ -package backoffutils - -import ( - "math/rand" - "time" -) - -// JitterUp adds random jitter to the duration. -// -// This adds or subtracts time from the duration within a given jitter fraction. -// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) -func JitterUp(duration time.Duration, jitter float64) time.Duration { - multiplier := jitter * (rand.Float64()*2 - 1) - return time.Duration(float64(duration) * (1 + multiplier)) -} - -// ExponentBase2 computes 2^(a-1) where a >= 1. If a is 0, the result is 0. -func ExponentBase2(a uint) uint { - return (1 << a) >> 1 -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go deleted file mode 100644 index 1ed9bb499b3..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -/* -Package `metautils` provides convenience functions for dealing with gRPC metadata.MD objects inside -Context handlers. - -While the upstream grpc-go package contains decent functionality (see https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) -they are hard to use. - -The majority of functions center around the NiceMD, which is a convenience wrapper around metadata.MD. For example -the following code allows you to easily extract incoming metadata (server handler) and put it into a new client context -metadata. - - nmd := metautils.ExtractIncoming(serverCtx).Clone(":authorization", ":custom") - clientCtx := nmd.Set("x-client-header", "2").Set("x-another", "3").ToOutgoing(ctx) -*/ - -package metautils diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go deleted file mode 100644 index 15225d710ac..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package metautils - -import ( - "context" - "strings" - - "google.golang.org/grpc/metadata" -) - -// NiceMD is a convenience wrapper defining extra functions on the metadata. -type NiceMD metadata.MD - -// ExtractIncoming extracts an inbound metadata from the server-side context. -// -// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns -// a new empty NiceMD. -func ExtractIncoming(ctx context.Context) NiceMD { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return NiceMD(metadata.Pairs()) - } - return NiceMD(md) -} - -// ExtractOutgoing extracts an outbound metadata from the client-side context. -// -// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns -// a new empty NiceMD. -func ExtractOutgoing(ctx context.Context) NiceMD { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - return NiceMD(metadata.Pairs()) - } - return NiceMD(md) -} - -// Clone performs a *deep* copy of the metadata.MD. -// -// You can specify the lower-case copiedKeys to only copy certain allow-listed keys. If no keys are explicitly allow-listed -// all keys get copied. -func (m NiceMD) Clone(copiedKeys ...string) NiceMD { - newMd := NiceMD(metadata.Pairs()) - for k, vv := range m { - found := false - if len(copiedKeys) == 0 { - found = true - } else { - for _, allowedKey := range copiedKeys { - if strings.EqualFold(allowedKey, k) { - found = true - break - } - } - } - if !found { - continue - } - newMd[k] = make([]string, len(vv)) - copy(newMd[k], vv) - } - return newMd -} - -// ToOutgoing sets the given NiceMD as a client-side context for dispatching. -func (m NiceMD) ToOutgoing(ctx context.Context) context.Context { - return metadata.NewOutgoingContext(ctx, metadata.MD(m)) -} - -// ToIncoming sets the given NiceMD as a server-side context for dispatching. -// -// This is mostly useful in ServerInterceptors.. -func (m NiceMD) ToIncoming(ctx context.Context) context.Context { - return metadata.NewIncomingContext(ctx, metadata.MD(m)) -} - -// Get retrieves a single value from the metadata. -// -// It works analogously to http.Header.Get, returning the first value if there are many set. If the value is not set, -// an empty string is returned. -// -// The function is binary-key safe. -func (m NiceMD) Get(key string) string { - k := strings.ToLower(key) - vv, ok := m[k] - if !ok { - return "" - } - return vv[0] -} - -// Del retrieves a single value from the metadata. -// -// It works analogously to http.Header.Del, deleting all values if they exist. -// -// The function is binary-key safe. - -func (m NiceMD) Del(key string) NiceMD { - k := strings.ToLower(key) - delete(m, k) - return m -} - -// Set sets the given value in a metadata. -// -// It works analogously to http.Header.Set, overwriting all previous metadata values. -// -// The function is binary-key safe. -func (m NiceMD) Set(key string, value string) NiceMD { - k := strings.ToLower(key) - m[k] = []string{value} - return m -} - -// Add retrieves a single value from the metadata. -// -// It works analogously to http.Header.Add, as it appends to any existing values associated with key. -// -// The function is binary-key safe. -func (m NiceMD) Add(key string, value string) NiceMD { - k := strings.ToLower(key) - m[k] = append(m[k], value) - return m -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT new file mode 100644 index 00000000000..3b13627cdbb --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT @@ -0,0 +1,2 @@ +Copyright (c) The go-grpc-middleware Authors. +Licensed under the Apache License 2.0. diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE similarity index 99% rename from vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/LICENSE rename to vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE index d6456956733..b2b065037fc 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/LICENSE +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE @@ -1,5 +1,4 @@ - - Apache License + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -199,4 +198,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/backoff.go new file mode 100644 index 00000000000..5b8aaa683f1 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/backoff.go @@ -0,0 +1,55 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package retry + +import ( + "context" + "math/rand" + "time" +) + +// BackoffLinear is very simple: it waits for a fixed period of time between calls. +func BackoffLinear(waitBetween time.Duration) BackoffFunc { + return func(ctx context.Context, attempt uint) time.Duration { + return waitBetween + } +} + +// jitterUp adds random jitter to the duration. +// This adds or subtracts time from the duration within a given jitter fraction. +// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) +func jitterUp(duration time.Duration, jitter float64) time.Duration { + multiplier := jitter * (rand.Float64()*2 - 1) + return time.Duration(float64(duration) * (1 + multiplier)) +} + +// exponentBase2 computes 2^(a-1) where a >= 1. If a is 0, the result is 0. +func exponentBase2(a uint) uint { + return (1 << a) >> 1 +} + +// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). +// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. +func BackoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) BackoffFunc { + return func(ctx context.Context, attempt uint) time.Duration { + return jitterUp(waitBetween, jitterFraction) + } +} + +// BackoffExponential produces increasing intervals for each attempt. +// The scalar is multiplied times 2 raised to the current attempt. So the first +// retry with a scalar of 100ms is 100ms, while the 5th attempt would be 1.6s. +func BackoffExponential(scalar time.Duration) BackoffFunc { + return func(ctx context.Context, attempt uint) time.Duration { + return scalar * time.Duration(exponentBase2(attempt)) + } +} + +// BackoffExponentialWithJitter creates an exponential backoff like +// BackoffExponential does, but adds jitter. +func BackoffExponentialWithJitter(scalar time.Duration, jitterFraction float64) BackoffFunc { + return func(ctx context.Context, attempt uint) time.Duration { + return jitterUp(scalar*time.Duration(exponentBase2(attempt)), jitterFraction) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/doc.go similarity index 68% rename from vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go rename to vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/doc.go index f8ba7198a5a..cad659022ee 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/doc.go @@ -1,10 +1,10 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. /* -`grpc_retry` provides client-side request retry logic for gRPC. +Package retry provides client-side request retry logic for gRPC. -Client-Side Request Retry Interceptor +# Client-Side Request Retry Interceptor It allows for automatic retry, inside the generated gRPC code of requests based on the gRPC status of the reply. It supports unary (1:1), and server stream (1:n) requests. @@ -12,14 +12,14 @@ of the reply. It supports unary (1:1), and server stream (1:n) requests. By default the interceptors *are disabled*, preventing accidental use of retries. You can easily override the number of retries (setting them to more than 0) with a `grpc.ClientOption`, e.g.: - myclient.Ping(ctx, goodPing, grpc_retry.WithMax(5)) + myclient.Ping(ctx, goodPing, grpc_retry.WithMax(5)) Other default options are: retry on `ResourceExhausted` and `Unavailable` gRPC codes, use a 50ms linear backoff with 10% jitter. For chained interceptors, the retry interceptor will call every interceptor that follows it -whenever a retry happens. +whenever when a retry happens. Please see examples for more advanced use. */ -package grpc_retry +package retry diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/options.go similarity index 66% rename from vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go rename to vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/options.go index 7a633e29347..e1466598679 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/options.go @@ -1,7 +1,7 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. -package grpc_retry +package retry import ( "context" @@ -9,6 +9,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) var ( @@ -22,10 +23,11 @@ var ( max: 0, // disabled perCallTimeout: 0, // disabled includeHeader: true, - codes: DefaultRetriableCodes, - backoffFunc: BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration { - return BackoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10)(attempt) + backoffFunc: BackoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10), + onRetryCallback: OnRetryCallback(func(ctx context.Context, attempt uint, err error) { + logTrace(ctx, "grpc_retry attempt: %d, backoff for %v", attempt, err) }), + retriableFunc: newRetriableFuncForCodes(DefaultRetriableCodes), } ) @@ -34,16 +36,14 @@ var ( // They are called with an identifier of the attempt, and should return a time the system client should // hold off for. If the time returned is longer than the `context.Context.Deadline` of the request // the deadline of the request takes precedence and the wait will be interrupted before proceeding -// with the next iteration. -type BackoffFunc func(attempt uint) time.Duration - -// BackoffFuncContext denotes a family of functions that control the backoff duration between call retries. -// -// They are called with an identifier of the attempt, and should return a time the system client should -// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request -// the deadline of the request takes precedence and the wait will be interrupted before proceeding // with the next iteration. The context can be used to extract request scoped metadata and context values. -type BackoffFuncContext func(ctx context.Context, attempt uint) time.Duration +type BackoffFunc func(ctx context.Context, attempt uint) time.Duration + +// OnRetryCallback is the type of function called when a retry occurs. +type OnRetryCallback func(ctx context.Context, attempt uint, err error) + +// RetriableFunc denotes a family of functions that control which error should be retried. +type RetriableFunc func(err error) bool // Disable disables the retry behaviour on this call, or this interceptor. // @@ -62,16 +62,16 @@ func WithMax(maxRetries uint) CallOption { // WithBackoff sets the `BackoffFunc` used to control time between retries. func WithBackoff(bf BackoffFunc) CallOption { return CallOption{applyFunc: func(o *options) { - o.backoffFunc = BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration { - return bf(attempt) - }) + o.backoffFunc = bf }} } -// WithBackoffContext sets the `BackoffFuncContext` used to control time between retries. -func WithBackoffContext(bf BackoffFuncContext) CallOption { +// WithOnRetryCallback sets the callback to use when a retry occurs. +// +// By default, when no callback function provided, we will just print a log to trace +func WithOnRetryCallback(fn OnRetryCallback) CallOption { return CallOption{applyFunc: func(o *options) { - o.backoffFunc = bf + o.onRetryCallback = fn }} } @@ -82,7 +82,7 @@ func WithBackoffContext(bf BackoffFuncContext) CallOption { // You cannot automatically retry on Cancelled and Deadline, please use `WithPerRetryTimeout` for these. func WithCodes(retryCodes ...codes.Code) CallOption { return CallOption{applyFunc: func(o *options) { - o.codes = retryCodes + o.retriableFunc = newRetriableFuncForCodes(retryCodes) }} } @@ -104,12 +104,20 @@ func WithPerRetryTimeout(timeout time.Duration) CallOption { }} } +// WithRetriable sets which error should be retried. +func WithRetriable(retriableFunc RetriableFunc) CallOption { + return CallOption{applyFunc: func(o *options) { + o.retriableFunc = retriableFunc + }} +} + type options struct { - max uint - perCallTimeout time.Duration - includeHeader bool - codes []codes.Code - backoffFunc BackoffFuncContext + max uint + perCallTimeout time.Duration + includeHeader bool + backoffFunc BackoffFunc + onRetryCallback OnRetryCallback + retriableFunc RetriableFunc } // CallOption is a grpc.CallOption that is local to grpc_retry. @@ -140,3 +148,20 @@ func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOp } return grpcOptions, retryOptions } + +// newRetriableFuncForCodes returns retriable function for specific Codes. +func newRetriableFuncForCodes(codes []codes.Code) func(err error) bool { + return func(err error) bool { + errCode := status.Code(err) + if isContextError(err) { + // context errors are not retriable based on user settings. + return false + } + for _, code := range codes { + if code == errCode { + return true + } + } + return false + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/retry.go similarity index 80% rename from vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go rename to vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/retry.go index 003bbd9066e..2b77084155f 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/retry.go @@ -1,25 +1,25 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. -package grpc_retry +package retry import ( "context" + "fmt" "io" - "strconv" "sync" "time" - "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata" "golang.org/x/net/trace" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" + grpcMetadata "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) const ( - AttemptMetadataKey = "x-retry-attempty" + AttemptMetadataKey = "x-retry-attempt" ) // UnaryClientInterceptor returns a new retrying unary client interceptor. @@ -28,7 +28,7 @@ const ( // changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). func UnaryClientInterceptor(optFuncs ...CallOption) grpc.UnaryClientInterceptor { intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) - return func(parentCtx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(parentCtx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { grpcOpts, retryOpts := filterCallOptions(opts) callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) // short circuit for simplicity, and avoiding allocations. @@ -40,13 +40,14 @@ func UnaryClientInterceptor(optFuncs ...CallOption) grpc.UnaryClientInterceptor if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { return err } - callCtx := perCallContext(parentCtx, callOpts, attempt) + callCtx, cancel := perCallContext(parentCtx, callOpts, attempt) + defer cancel() // Clean up potential resources. lastErr = invoker(callCtx, method, req, reply, cc, grpcOpts...) // TODO(mwitkow): Maybe dial and transport errors should be retriable? if lastErr == nil { return nil } - logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr) + callOpts.onRetryCallback(parentCtx, attempt, lastErr) if isContextError(lastErr) { if parentCtx.Err() != nil { logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) @@ -85,7 +86,7 @@ func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientIntercepto return streamer(parentCtx, desc, cc, method, grpcOpts...) } if desc.ClientStreams { - return nil, status.Errorf(codes.Unimplemented, "grpc_retry: cannot retry on ClientStreams, set grpc_retry.Disable()") + return nil, status.Error(codes.Unimplemented, "grpc_retry: cannot retry on ClientStreams, set grpc_retry.Disable()") } var lastErr error @@ -93,10 +94,8 @@ func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientIntercepto if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { return nil, err } - callCtx := perCallContext(parentCtx, callOpts, 0) - var newStreamer grpc.ClientStream - newStreamer, lastErr = streamer(callCtx, desc, cc, method, grpcOpts...) + newStreamer, lastErr = streamer(parentCtx, desc, cc, method, grpcOpts...) if lastErr == nil { retryingStreamer := &serverStreamingRetryingStream{ ClientStream: newStreamer, @@ -108,8 +107,7 @@ func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientIntercepto } return retryingStreamer, nil } - - logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr) + callOpts.onRetryCallback(parentCtx, attempt, lastErr) if isContextError(lastErr) { if parentCtx.Err() != nil { logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) @@ -135,8 +133,8 @@ func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientIntercepto // a new ClientStream according to the retry policy. type serverStreamingRetryingStream struct { grpc.ClientStream - bufferedSends []interface{} // single message that the client can sen - wasClosedSend bool // indicates that CloseSend was closed + bufferedSends []any // single message that the client can sen + wasClosedSend bool // indicates that CloseSend was closed parentCtx context.Context callOpts *options streamerCall func(ctx context.Context) (grpc.ClientStream, error) @@ -155,7 +153,7 @@ func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { return s.ClientStream } -func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error { +func (s *serverStreamingRetryingStream) SendMsg(m any) error { s.mu.Lock() s.bufferedSends = append(s.bufferedSends, m) s.mu.Unlock() @@ -169,15 +167,15 @@ func (s *serverStreamingRetryingStream) CloseSend() error { return s.getStream().CloseSend() } -func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) { +func (s *serverStreamingRetryingStream) Header() (grpcMetadata.MD, error) { return s.getStream().Header() } -func (s *serverStreamingRetryingStream) Trailer() metadata.MD { +func (s *serverStreamingRetryingStream) Trailer() grpcMetadata.MD { return s.getStream().Trailer() } -func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { +func (s *serverStreamingRetryingStream) RecvMsg(m any) error { attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) if !attemptRetry { return lastErr // success or hard failure @@ -187,8 +185,8 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { if err := waitRetryBackoff(attempt, s.parentCtx, s.callOpts); err != nil { return err } - callCtx := perCallContext(s.parentCtx, s.callOpts, attempt) - newStream, err := s.reestablishStreamAndResendBuffer(callCtx) + s.callOpts.onRetryCallback(s.parentCtx, attempt, lastErr) + newStream, err := s.reestablishStreamAndResendBuffer(s.parentCtx) if err != nil { // Retry dial and transport errors of establishing stream as grpc doesn't retry. if isRetriable(err, s.callOpts) { @@ -199,7 +197,7 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { s.setStream(newStream) attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m) - //fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr) + if !attemptRetry { return lastErr } @@ -207,7 +205,7 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { return lastErr } -func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { +func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m any) (bool, error) { err := s.getStream().RecvMsg(m) if err == nil || err == io.EOF { return false, err @@ -226,9 +224,7 @@ func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{} return isRetriable(err, s.callOpts), err } -func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer( - callCtx context.Context, -) (grpc.ClientStream, error) { +func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { s.mu.RLock() bufferedSends := s.bufferedSends s.mu.RUnlock() @@ -260,7 +256,9 @@ func waitRetryBackoff(attempt uint, parentCtx context.Context, callOpts *options timer := time.NewTimer(waitTime) select { case <-parentCtx.Done(): - timer.Stop() + if !timer.Stop() { + <-timer.C + } return contextErrToGrpcErr(parentCtx.Err()) case <-timer.C: } @@ -269,15 +267,8 @@ func waitRetryBackoff(attempt uint, parentCtx context.Context, callOpts *options } func isRetriable(err error, callOpts *options) bool { - errCode := status.Code(err) - if isContextError(err) { - // context errors are not retriable based on user settings. - return false - } - for _, code := range callOpts.codes { - if code == errCode { - return true - } + if callOpts.retriableFunc != nil { + return callOpts.retriableFunc(err) } return false } @@ -287,16 +278,18 @@ func isContextError(err error) bool { return code == codes.DeadlineExceeded || code == codes.Canceled } -func perCallContext(parentCtx context.Context, callOpts *options, attempt uint) context.Context { +func perCallContext(parentCtx context.Context, callOpts *options, attempt uint) (context.Context, context.CancelFunc) { + cancel := context.CancelFunc(func() {}) + ctx := parentCtx if callOpts.perCallTimeout != 0 { - ctx, _ = context.WithTimeout(ctx, callOpts.perCallTimeout) + ctx, cancel = context.WithTimeout(ctx, callOpts.perCallTimeout) } if attempt > 0 && callOpts.includeHeader { - mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, strconv.FormatUint(uint64(attempt), 10)) + mdClone := metadata.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt)) ctx = mdClone.ToOutgoing(ctx) } - return ctx + return ctx, cancel } func contextErrToGrpcErr(err error) error { @@ -310,7 +303,7 @@ func contextErrToGrpcErr(err error) error { } } -func logTrace(ctx context.Context, format string, a ...interface{}) { +func logTrace(ctx context.Context, format string, a ...any) { tr, ok := trace.FromContext(ctx) if !ok { return diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/doc.go new file mode 100644 index 00000000000..3aac1b3648f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/doc.go @@ -0,0 +1,19 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +/* +Package `metadata` provides convenience functions for dealing with gRPC metadata.MD objects inside +Context handlers. + +While the upstream grpc-go package contains decent functionality (see https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) +they are hard to use. + +The majority of functions center around the MD, which is a convenience wrapper around metadata.MD. For example +the following code allows you to easily extract incoming metadata (server handler) and put it into a new client context +metadata. + + md := metadata.ExtractIncoming(serverCtx).Clone(":authorization", ":custom") + clientCtx := md.Set("x-client-header", "2").Set("x-another", "3").ToOutgoing(ctx) +*/ + +package metadata diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/metadata.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/metadata.go new file mode 100644 index 00000000000..2eeda06857c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/metadata.go @@ -0,0 +1,126 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package metadata + +import ( + "context" + "strings" + + grpcMetadata "google.golang.org/grpc/metadata" +) + +// MD is a convenience wrapper defining extra functions on the metadata. +type MD grpcMetadata.MD + +// ExtractIncoming extracts an inbound metadata from the server-side context. +// +// This function always returns a MD wrapper of the grpcMetadata.MD, in case the context doesn't have metadata it returns +// a new empty MD. +func ExtractIncoming(ctx context.Context) MD { + md, ok := grpcMetadata.FromIncomingContext(ctx) + if !ok { + return MD(grpcMetadata.Pairs()) + } + return MD(md) +} + +// ExtractOutgoing extracts an outbound metadata from the client-side context. +// +// This function always returns a MD wrapper of the grpcMetadata.MD, in case the context doesn't have metadata it returns +// a new empty MD. +func ExtractOutgoing(ctx context.Context) MD { + md, ok := grpcMetadata.FromOutgoingContext(ctx) + if !ok { + return MD(grpcMetadata.Pairs()) + } + return MD(md) +} + +// Clone performs a *deep* copy of the grpcMetadata.MD. +// +// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted +// all keys get copied. +func (m MD) Clone(copiedKeys ...string) MD { + newMd := MD(grpcMetadata.Pairs()) + for k, vv := range m { + found := false + if len(copiedKeys) == 0 { + found = true + } else { + for _, allowedKey := range copiedKeys { + if strings.EqualFold(allowedKey, k) { + found = true + break + } + } + } + if !found { + continue + } + newMd[k] = make([]string, len(vv)) + copy(newMd[k], vv) + } + return newMd +} + +// ToOutgoing sets the given MD as a client-side context for dispatching. +func (m MD) ToOutgoing(ctx context.Context) context.Context { + return grpcMetadata.NewOutgoingContext(ctx, grpcMetadata.MD(m)) +} + +// ToIncoming sets the given MD as a server-side context for dispatching. +// +// This is mostly useful in ServerInterceptors. +func (m MD) ToIncoming(ctx context.Context) context.Context { + return grpcMetadata.NewIncomingContext(ctx, grpcMetadata.MD(m)) +} + +// Get retrieves a single value from the metadata. +// +// It works analogously to http.Header.Get, returning the first value if there are many set. If the value is not set, +// an empty string is returned. +// +// The function is binary-key safe. +func (m MD) Get(key string) string { + k, _ := encodeKeyValue(key, "") + vv, ok := m[k] + if !ok { + return "" + } + return vv[0] +} + +// Del retrieves a single value from the metadata. +// +// It works analogously to http.Header.Del, deleting all values if they exist. +// +// The function is binary-key safe. + +func (m MD) Del(key string) MD { + k, _ := encodeKeyValue(key, "") + delete(m, k) + return m +} + +// Set sets the given value in a metadata. +// +// It works analogously to http.Header.Set, overwriting all previous metadata values. +// +// The function is binary-key safe. +func (m MD) Set(key string, value string) MD { + k, v := encodeKeyValue(key, value) + m[k] = []string{v} + return m +} + +// Add retrieves a single value from the metadata. +// +// It works analogously to http.Header.Add, as it appends to any existing values associated with key. +// +// The function is binary-key safe. +func (m MD) Add(key string, value string) MD { + k, v := encodeKeyValue(key, value) + m[k] = append(m[k], v) + return m +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/single_key.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/single_key.go new file mode 100644 index 00000000000..e758c092f93 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/single_key.go @@ -0,0 +1,21 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package metadata + +import ( + "encoding/base64" + "strings" +) + +const ( + binHdrSuffix = "-bin" +) + +func encodeKeyValue(k, v string) (string, string) { + k = strings.ToLower(k) + if strings.HasSuffix(k, binHdrSuffix) { + return k, base64.StdEncoding.EncodeToString([]byte(v)) + } + return k, v +} diff --git a/vendor/github.com/hashicorp/consul/api/.copywrite.hcl b/vendor/github.com/hashicorp/consul/api/.copywrite.hcl new file mode 100644 index 00000000000..7e4c0b58a8a --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/.copywrite.hcl @@ -0,0 +1,8 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2024 + + header_ignore = [] +} diff --git a/vendor/github.com/hashicorp/consul/api/LICENSE b/vendor/github.com/hashicorp/consul/api/LICENSE index c72625e4cc8..7c5baa45e1c 100644 --- a/vendor/github.com/hashicorp/consul/api/LICENSE +++ b/vendor/github.com/hashicorp/consul/api/LICENSE @@ -1,92 +1,92 @@ -Copyright (c) 2013 HashiCorp, Inc. +Copyright (c) 2020 HashiCorp, Inc. Mozilla Public License, version 2.0 1. Definitions -1.1. “Contributor” +1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. -1.2. “Contributor Version” +1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. + Contributor and that particular Contributor's Contribution. -1.3. “Contribution” +1.3. "Contribution" means Covered Software of a particular Contributor. -1.4. “Covered Software” +1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. -1.5. “Incompatible With Secondary Licenses” +1.5. "Incompatible With Secondary Licenses" means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. -1.6. “Executable Form” +1.6. "Executable Form" means any form of the work other than Source Code Form. -1.7. “Larger Work” +1.7. "Larger Work" - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. -1.8. “License” +1.8. "License" means this document. -1.9. “Licensable” +1.9. "Licensable" - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. -1.10. “Modifications” +1.10. "Modifications" means any of the following: - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. -1.11. “Patent Claims” of a Contributor +1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. -1.12. “Secondary License” +1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. -1.13. “Source Code Form” +1.13. "Source Code Form" means the form of the work preferred for making modifications. -1.14. “You” (or “Your”) +1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is + License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause + definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. @@ -102,57 +102,59 @@ Mozilla Public License, version 2.0 a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. 2.2. Effective Date - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. 2.3. Limitations on Grant Scope - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: a. for any code that a Contributor has removed from Covered Software; or - b. for infringements caused by: (i) Your and any other third party’s + b. for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). 2.5. Representation - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. 2.6. Fair Use - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. 2.7. Conditions @@ -165,11 +167,12 @@ Mozilla Public License, version 2.0 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. 3.2. Distribution of Executable Form @@ -181,39 +184,40 @@ Mozilla Public License, version 2.0 reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). 3.4. Notices - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any @@ -222,14 +226,14 @@ Mozilla Public License, version 2.0 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. 5. Termination @@ -237,21 +241,22 @@ Mozilla Public License, version 2.0 fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been @@ -260,16 +265,16 @@ Mozilla Public License, version 2.0 6. Disclaimer of Warranty - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. 7. Limitation of Liability @@ -281,27 +286,29 @@ Mozilla Public License, version 2.0 goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. 8. Litigation - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. 9. Miscellaneous - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. 10. Versions of the License @@ -315,23 +322,24 @@ Mozilla Public License, version 2.0 10.2. Effect of New Versions - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. Exhibit A - Source Code Form License Notice @@ -342,15 +350,16 @@ Exhibit A - Source Code Form License Notice obtain one at http://mozilla.org/MPL/2.0/. -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. You may add additional accurate notices of copyright ownership. -Exhibit B - “Incompatible With Secondary Licenses” Notice +Exhibit B - "Incompatible With Secondary Licenses" Notice - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go index 48d2e66ee97..fcea0fdde08 100644 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -19,6 +19,15 @@ const ( // ACLManagementType is the management type token ACLManagementType = "management" + + // ACLTemplatedPolicy names + ACLTemplatedPolicyServiceName = "builtin/service" + ACLTemplatedPolicyNodeName = "builtin/node" + ACLTemplatedPolicyDNSName = "builtin/dns" + ACLTemplatedPolicyNomadServerName = "builtin/nomad-server" + ACLTemplatedPolicyWorkloadIdentityName = "builtin/workload-identity" + ACLTemplatedPolicyAPIGatewayName = "builtin/api-gateway" + ACLTemplatedPolicyNomadClientName = "builtin/nomad-client" ) type ACLLink struct { @@ -40,6 +49,7 @@ type ACLToken struct { Roles []*ACLTokenRoleLink `json:",omitempty"` ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` NodeIdentities []*ACLNodeIdentity `json:",omitempty"` + TemplatedPolicies []*ACLTemplatedPolicy `json:",omitempty"` Local bool AuthMethod string `json:",omitempty"` ExpirationTTL time.Duration `json:",omitempty"` @@ -88,6 +98,7 @@ type ACLTokenListEntry struct { Roles []*ACLTokenRoleLink `json:",omitempty"` ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` NodeIdentities []*ACLNodeIdentity `json:",omitempty"` + TemplatedPolicies []*ACLTemplatedPolicy `json:",omitempty"` Local bool AuthMethod string `json:",omitempty"` ExpirationTime *time.Time `json:",omitempty"` @@ -148,6 +159,28 @@ type ACLNodeIdentity struct { Datacenter string } +// ACLTemplatedPolicy represents a template used to generate a `synthetic` policy +// given some input variables. +type ACLTemplatedPolicy struct { + TemplateName string + TemplateVariables *ACLTemplatedPolicyVariables `json:",omitempty"` + + // Datacenters are an artifact of Nodeidentity & ServiceIdentity. + // It is used to facilitate the future migration away from both + Datacenters []string `json:",omitempty"` +} + +type ACLTemplatedPolicyResponse struct { + TemplateName string + Schema string + Template string + Description string +} + +type ACLTemplatedPolicyVariables struct { + Name string +} + // ACLPolicy represents an ACL Policy. type ACLPolicy struct { ID string @@ -196,6 +229,7 @@ type ACLRole struct { Policies []*ACLRolePolicyLink `json:",omitempty"` ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` NodeIdentities []*ACLNodeIdentity `json:",omitempty"` + TemplatedPolicies []*ACLTemplatedPolicy `json:",omitempty"` Hash []byte CreateIndex uint64 ModifyIndex uint64 @@ -218,6 +252,15 @@ const ( // BindingRuleBindTypeRole binds to pre-existing roles with the given name. BindingRuleBindTypeRole BindingRuleBindType = "role" + + // BindingRuleBindTypeNode binds to a node identity with given name. + BindingRuleBindTypeNode BindingRuleBindType = "node" + + // BindingRuleBindTypePolicy binds to a specific policy with given name. + BindingRuleBindTypePolicy BindingRuleBindType = "policy" + + // BindingRuleBindTypeTemplatedPolicy binds to a templated policy with given template name and variables. + BindingRuleBindTypeTemplatedPolicy BindingRuleBindType = "templated-policy" ) type ACLBindingRule struct { @@ -227,6 +270,7 @@ type ACLBindingRule struct { Selector string BindType BindingRuleBindType BindName string + BindVars *ACLTemplatedPolicyVariables `json:",omitempty"` CreateIndex uint64 ModifyIndex uint64 @@ -1623,3 +1667,78 @@ func (a *ACL) OIDCCallback(auth *ACLOIDCCallbackParams, q *WriteOptions) (*ACLTo } return &out, wm, nil } + +// TemplatedPolicyReadByName retrieves the templated policy details (by name). Returns nil if not found. +func (a *ACL) TemplatedPolicyReadByName(templateName string, q *QueryOptions) (*ACLTemplatedPolicyResponse, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/templated-policy/name/"+templateName) + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + found, resp, err := requireNotFoundOrOK(resp) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLTemplatedPolicyResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// TemplatedPolicyList retrieves a listing of all templated policies. +func (a *ACL) TemplatedPolicyList(q *QueryOptions) (map[string]ACLTemplatedPolicyResponse, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/templated-policies") + r.setQueryOptions(q) + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries map[string]ACLTemplatedPolicyResponse + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// TemplatedPolicyPreview is used to preview the policy rendered by the templated policy. +func (a *ACL) TemplatedPolicyPreview(tp *ACLTemplatedPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) { + r := a.c.newRequest("POST", "/v1/acl/templated-policy/preview/"+tp.TemplateName) + r.setWriteOptions(q) + r.obj = tp.TemplateVariables + + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + wm := &WriteMeta{RequestTime: rtt} + var out ACLPolicy + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index 6775edf4257..24e2c50d649 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -307,6 +307,10 @@ type ServiceRegisterOpts struct { // having to manually deregister checks. ReplaceExistingChecks bool + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + // ctx is an optional context pass through to the underlying HTTP // request layer. Use WithContext() to set the context. ctx context.Context @@ -835,6 +839,9 @@ func (a *Agent) serviceRegister(service *AgentServiceRegistration, opts ServiceR if opts.ReplaceExistingChecks { r.params.Set("replace-existing-checks", "true") } + if opts.Token != "" { + r.header.Set("X-Consul-Token", opts.Token) + } _, resp, err := a.c.doRequest(r) if err != nil { return err @@ -991,7 +998,14 @@ func (a *Agent) UpdateTTLOpts(checkID, output, status string, q *QueryOptions) e // CheckRegister is used to register a new check with // the local agent func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { + return a.CheckRegisterOpts(check, nil) +} + +// CheckRegisterOpts is used to register a new check with +// the local agent using query options +func (a *Agent) CheckRegisterOpts(check *AgentCheckRegistration, q *QueryOptions) error { r := a.c.newRequest("PUT", "/v1/agent/check/register") + r.setQueryOptions(q) r.obj = check _, resp, err := a.c.doRequest(r) if err != nil { @@ -1379,6 +1393,10 @@ func (a *Agent) UpdateConfigFileRegistrationToken(token string, q *WriteOptions) return a.updateToken("config_file_service_registration", token, q) } +func (a *Agent) UpdateDNSToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("dns", token, q) +} + // updateToken can be used to update one of an agent's ACL tokens after the agent has // started. The tokens are may not be persisted, so will need to be updated again if // the agent is restarted unless the agent is configured to persist them. diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go index 405e92ef274..ffc18a85ed5 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry.go @@ -39,11 +39,12 @@ const ( ) const ( - BuiltinAWSLambdaExtension string = "builtin/aws/lambda" - BuiltinExtAuthzExtension string = "builtin/ext-authz" - BuiltinLuaExtension string = "builtin/lua" - BuiltinPropertyOverrideExtension string = "builtin/property-override" - BuiltinWasmExtension string = "builtin/wasm" + BuiltinAWSLambdaExtension string = "builtin/aws/lambda" + BuiltinExtAuthzExtension string = "builtin/ext-authz" + BuiltinLuaExtension string = "builtin/lua" + BuiltinOTELAccessLoggingExtension string = "builtin/otel-access-logging" + BuiltinPropertyOverrideExtension string = "builtin/property-override" + BuiltinWasmExtension string = "builtin/wasm" // BuiltinValidateExtension should not be exposed directly or accepted as a valid configured // extension type, as it is only used indirectly via troubleshooting tools. It is included here // for common reference alongside other builtin extensions. @@ -314,6 +315,48 @@ type UpstreamLimits struct { MaxConcurrentRequests *int `alias:"max_concurrent_requests"` } +// RateLimits is rate limiting configuration that is applied to +// inbound traffic for a service. +// Rate limiting is a Consul enterprise feature. +type RateLimits struct { + InstanceLevel InstanceLevelRateLimits `alias:"instance_level"` +} + +// InstanceLevelRateLimits represents rate limit configuration +// that are applied per service instance. +type InstanceLevelRateLimits struct { + // RequestsPerSecond is the average number of requests per second that can be + // made without being throttled. This field is required if RequestsMaxBurst + // is set. The allowed number of requests may exceed RequestsPerSecond up to + // the value specified in RequestsMaxBurst. + // + // Internally, this is the refill rate of the token bucket used for rate limiting. + RequestsPerSecond int `alias:"requests_per_second"` + + // RequestsMaxBurst is the maximum number of requests that can be sent + // in a burst. Should be equal to or greater than RequestsPerSecond. + // If unset, defaults to RequestsPerSecond. + // + // Internally, this is the maximum size of the token bucket used for rate limiting. + RequestsMaxBurst int `alias:"requests_max_burst"` + + // Routes is a list of rate limits applied to specific routes. + // For a given request, the first matching route will be applied, if any + // Overrides any top-level configuration. + Routes []InstanceLevelRouteRateLimits +} + +// InstanceLevelRouteRateLimits represents rate limit configuration +// applied to a route matching one of PathExact/PathPrefix/PathRegex. +type InstanceLevelRouteRateLimits struct { + PathExact string `alias:"path_exact"` + PathPrefix string `alias:"path_prefix"` + PathRegex string `alias:"path_regex"` + + RequestsPerSecond int `alias:"requests_per_second"` + RequestsMaxBurst int `alias:"requests_max_burst"` +} + type ServiceConfigEntry struct { Kind string Name string @@ -332,6 +375,7 @@ type ServiceConfigEntry struct { LocalConnectTimeoutMs int `json:",omitempty" alias:"local_connect_timeout_ms"` LocalRequestTimeoutMs int `json:",omitempty" alias:"local_request_timeout_ms"` BalanceInboundConnections string `json:",omitempty" alias:"balance_inbound_connections"` + RateLimits *RateLimits `json:",omitempty" alias:"rate_limits"` EnvoyExtensions []EnvoyExtension `json:",omitempty" alias:"envoy_extensions"` Meta map[string]string `json:",omitempty"` CreateIndex uint64 diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go index 3696f7be554..572d9d2d5e4 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go @@ -6,6 +6,8 @@ package api import ( "encoding/json" "time" + + "github.com/hashicorp/go-multierror" ) type ServiceRouterConfigEntry struct { @@ -39,9 +41,10 @@ type ServiceRouteMatch struct { } type ServiceRouteHTTPMatch struct { - PathExact string `json:",omitempty" alias:"path_exact"` - PathPrefix string `json:",omitempty" alias:"path_prefix"` - PathRegex string `json:",omitempty" alias:"path_regex"` + PathExact string `json:",omitempty" alias:"path_exact"` + PathPrefix string `json:",omitempty" alias:"path_prefix"` + PathRegex string `json:",omitempty" alias:"path_regex"` + CaseInsensitive bool `json:",omitempty" alias:"case_insensitive"` Header []ServiceRouteHTTPMatchHeader `json:",omitempty"` QueryParam []ServiceRouteHTTPMatchQueryParam `json:",omitempty" alias:"query_param"` @@ -189,14 +192,19 @@ func (e *ServiceResolverConfigEntry) MarshalJSON() ([]byte, error) { type Alias ServiceResolverConfigEntry exported := &struct { ConnectTimeout string `json:",omitempty"` + RequestTimeout string `json:",omitempty"` *Alias }{ ConnectTimeout: e.ConnectTimeout.String(), + RequestTimeout: e.RequestTimeout.String(), Alias: (*Alias)(e), } if e.ConnectTimeout == 0 { exported.ConnectTimeout = "" } + if e.RequestTimeout == 0 { + exported.RequestTimeout = "" + } return json.Marshal(exported) } @@ -205,20 +213,27 @@ func (e *ServiceResolverConfigEntry) UnmarshalJSON(data []byte) error { type Alias ServiceResolverConfigEntry aux := &struct { ConnectTimeout string + RequestTimeout string *Alias }{ Alias: (*Alias)(e), } - if err := json.Unmarshal(data, &aux); err != nil { + var err error + if err = json.Unmarshal(data, &aux); err != nil { return err } - var err error + var merr *multierror.Error if aux.ConnectTimeout != "" { if e.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil { - return err + merr = multierror.Append(merr, err) } } - return nil + if aux.RequestTimeout != "" { + if e.RequestTimeout, err = time.ParseDuration(aux.RequestTimeout); err != nil { + merr = multierror.Append(merr, err) + } + } + return merr.ErrorOrNil() } func (e *ServiceResolverConfigEntry) GetKind() string { return e.Kind } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_exports.go b/vendor/github.com/hashicorp/consul/api/config_entry_exports.go index 97920e40ddc..b8e9830e652 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_exports.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_exports.go @@ -3,7 +3,9 @@ package api -import "encoding/json" +import ( + "encoding/json" +) // ExportedServicesConfigEntry manages the exported services for a single admin partition. // Admin Partitions are a Consul Enterprise feature. diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go index b59f1c0621f..baf274e2da0 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go @@ -284,6 +284,10 @@ type APIGatewayListener struct { Protocol string // TLS is the TLS settings for the listener. TLS APIGatewayTLSConfiguration + // Override is the policy that overrides all other policy and route specific configuration + Override *APIGatewayPolicy `json:",omitempty"` + // Default is the policy that is the default for the listener and route, routes can override this behavior + Default *APIGatewayPolicy `json:",omitempty"` } // APIGatewayTLSConfiguration specifies the configuration of a listener’s @@ -302,3 +306,39 @@ type APIGatewayTLSConfiguration struct { // Only applicable to connections negotiated via TLS 1.2 or earlier CipherSuites []string `json:",omitempty" alias:"cipher_suites"` } + +// APIGatewayPolicy holds the policy that configures the gateway listener, this is used in the `Override` and `Default` fields of a listener +type APIGatewayPolicy struct { + // JWT holds the JWT configuration for the Listener + JWT *APIGatewayJWTRequirement `json:",omitempty"` +} + +// APIGatewayJWTRequirement holds the list of JWT providers to be verified against +type APIGatewayJWTRequirement struct { + // Providers is a list of providers to consider when verifying a JWT. + Providers []*APIGatewayJWTProvider `json:",omitempty"` +} + +// APIGatewayJWTProvider holds the provider and claim verification information +type APIGatewayJWTProvider struct { + // Name is the name of the JWT provider. There MUST be a corresponding + // "jwt-provider" config entry with this name. + Name string `json:",omitempty"` + + // VerifyClaims is a list of additional claims to verify in a JWT's payload. + VerifyClaims []*APIGatewayJWTClaimVerification `json:",omitempty" alias:"verify_claims"` +} + +// APIGatewayJWTClaimVerification holds the actual claim information to be verified +type APIGatewayJWTClaimVerification struct { + // Path is the path to the claim in the token JSON. + Path []string `json:",omitempty"` + + // Value is the expected value at the given path: + // - If the type at the path is a list then we verify + // that this value is contained in the list. + // + // - If the type at the path is a string then we verify + // that this value matches. + Value string `json:",omitempty"` +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go b/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go index 8df7d4c98e7..7af2a2658f2 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_rate_limit_ip.go @@ -4,8 +4,8 @@ package api type ReadWriteRatesConfig struct { - ReadRate float64 - WriteRate float64 + ReadRate float64 `alias:"read_rate"` + WriteRate float64 `alias:"write_rate"` } type RateLimitIPConfigEntry struct { @@ -16,8 +16,8 @@ type RateLimitIPConfigEntry struct { Meta map[string]string `json:",omitempty"` // overall limits - ReadRate float64 - WriteRate float64 + ReadRate float64 `alias:"read_rate"` + WriteRate float64 `alias:"write_rate"` //limits specific to a type of call ACL *ReadWriteRatesConfig `json:",omitempty"` // OperationCategoryACL OperationCategory = "ACL" diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_routes.go b/vendor/github.com/hashicorp/consul/api/config_entry_routes.go index cfea394535d..bbaa032d50f 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_routes.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_routes.go @@ -3,6 +3,8 @@ package api +import "time" + // TCPRouteConfigEntry -- TODO stub type TCPRouteConfigEntry struct { // Kind of the config entry. This should be set to api.TCPRoute. @@ -195,8 +197,17 @@ type HTTPQueryMatch struct { // HTTPFilters specifies a list of filters used to modify a request // before it is routed to an upstream. type HTTPFilters struct { - Headers []HTTPHeaderFilter - URLRewrite *URLRewrite + Headers []HTTPHeaderFilter + URLRewrite *URLRewrite + RetryFilter *RetryFilter + TimeoutFilter *TimeoutFilter + JWT *JWTFilter +} + +// HTTPResponseFilters specifies a list of filters used to modify a +// response returned by an upstream +type HTTPResponseFilters struct { + Headers []HTTPHeaderFilter } // HTTPHeaderFilter specifies how HTTP headers should be modified. @@ -210,12 +221,32 @@ type URLRewrite struct { Path string } +type RetryFilter struct { + NumRetries uint32 + RetryOn []string + RetryOnStatusCodes []uint32 + RetryOnConnectFailure bool +} + +type TimeoutFilter struct { + RequestTimeout time.Duration + IdleTimeout time.Duration +} + +// JWTFilter specifies the JWT configuration for a route +type JWTFilter struct { + Providers []*APIGatewayJWTProvider `json:",omitempty"` +} + // HTTPRouteRule specifies the routing rules used to determine what upstream // service an HTTP request is routed to. type HTTPRouteRule struct { // Filters is a list of HTTP-based filters used to modify a request prior // to routing it to the upstream service Filters HTTPFilters + // ResponseFilters is a list of HTTP-based filters used to modify a response + // returned by the upstream service + ResponseFilters HTTPResponseFilters // Matches specified the matching criteria used in the routing table. If a // request matches the given HTTPMatch configuration, then traffic is routed // to services specified in the Services field. @@ -231,10 +262,15 @@ type HTTPService struct { // Weight is an arbitrary integer used in calculating how much // traffic should be sent to the given service. Weight int + // Filters is a list of HTTP-based filters used to modify a request prior // to routing it to the upstream service Filters HTTPFilters + // ResponseFilters is a list of HTTP-based filters used to modify the + // response returned from the upstream service + ResponseFilters HTTPResponseFilters + // Partition is the partition the config entry is associated with. // Partitioning is a Consul Enterprise feature. Partition string `json:",omitempty"` diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_status.go b/vendor/github.com/hashicorp/consul/api/config_entry_status.go index 2d16ea0fc4e..997066f24fe 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_status.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_status.go @@ -106,6 +106,10 @@ const ( // certificates and cannot bind to any routes GatewayReasonInvalidCertificates GatewayConditionReason = "InvalidCertificates" + // This reason is used with the "Accepted" condition when the gateway has multiple invalid + // JWT providers and cannot bind to any routes + GatewayReasonInvalidJWTProviders GatewayConditionReason = "InvalidJWTProviders" + // This condition indicates that the gateway was unable to resolve // conflicting specification requirements for this Listener. If a // Listener is conflicted, its network port should not be configured @@ -163,6 +167,14 @@ const ( // If the reference is not allowed, the reason RefNotPermitted must be used // instead. GatewayListenerReasonInvalidCertificateRef GatewayConditionReason = "InvalidCertificateRef" + + // This reason is used with the "ResolvedRefs" condition when a + // Listener has a JWT configuration with at least one JWTProvider + // that is invalid or does not exist. + // A JWTProvider is considered invalid when it refers to a nonexistent + // or unsupported resource or kind, or when the data within that resource + // is malformed. + GatewayListenerReasonInvalidJWTProviderRef GatewayConditionReason = "InvalidJWTProviderRef" ) var validGatewayConditionReasonsMapping = map[GatewayConditionType]map[ConditionStatus][]GatewayConditionReason{ @@ -172,6 +184,7 @@ var validGatewayConditionReasonsMapping = map[GatewayConditionType]map[Condition }, ConditionStatusFalse: { GatewayReasonInvalidCertificates, + GatewayReasonInvalidJWTProviders, }, ConditionStatusUnknown: {}, }, @@ -190,6 +203,7 @@ var validGatewayConditionReasonsMapping = map[GatewayConditionType]map[Condition }, ConditionStatusFalse: { GatewayListenerReasonInvalidCertificateRef, + GatewayListenerReasonInvalidJWTProviderRef, }, ConditionStatusUnknown: {}, }, @@ -282,6 +296,10 @@ const ( // This reason is used with the "Bound" condition when the route fails // to find the gateway RouteReasonGatewayNotFound RouteConditionReason = "GatewayNotFound" + + // This reason is used with the "Accepted" condition when the route references non-existent + // JWTProviders + RouteReasonJWTProvidersNotFound RouteConditionReason = "JWTProvidersNotFound" ) var validRouteConditionReasonsMapping = map[RouteConditionType]map[ConditionStatus][]RouteConditionReason{ @@ -302,6 +320,7 @@ var validRouteConditionReasonsMapping = map[RouteConditionType]map[ConditionStat ConditionStatusFalse: { RouteReasonGatewayNotFound, RouteReasonFailedToBind, + RouteReasonJWTProvidersNotFound, }, ConditionStatusUnknown: {}, }, diff --git a/vendor/github.com/hashicorp/consul/api/exported_services.go b/vendor/github.com/hashicorp/consul/api/exported_services.go new file mode 100644 index 00000000000..50483e9c8da --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/exported_services.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +type ResolvedExportedService struct { + // Service is the name of the service which is exported. + Service string + + // Partition of the service + Partition string `json:",omitempty"` + + // Namespace of the service + Namespace string `json:",omitempty"` + + // Consumers is a list of downstream consumers of the service. + Consumers ResolvedConsumers +} + +type ResolvedConsumers struct { + Peers []string `json:",omitempty"` + Partitions []string `json:",omitempty"` +} + +func (c *Client) ExportedServices(q *QueryOptions) ([]ResolvedExportedService, *QueryMeta, error) { + + r := c.newRequest("GET", "/v1/exported-services") + r.setQueryOptions(q) + rtt, resp, err := c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var expSvcs []ResolvedExportedService + + if err := decodeBody(resp, &expSvcs); err != nil { + return nil, nil, err + } + + return expSvcs, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/internal.go b/vendor/github.com/hashicorp/consul/api/internal.go index dee161a65eb..b5f400f4b19 100644 --- a/vendor/github.com/hashicorp/consul/api/internal.go +++ b/vendor/github.com/hashicorp/consul/api/internal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import "context" diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go index d72c00c97b9..f0f5794aa5a 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_raft.go +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -68,9 +68,14 @@ func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, e } // RaftLeaderTransfer is used to transfer the current raft leader to another node -func (op *Operator) RaftLeaderTransfer(q *QueryOptions) (*TransferLeaderResponse, error) { +// Optionally accepts a non-empty id of another node to transfer leadership to. +func (op *Operator) RaftLeaderTransfer(id string, q *QueryOptions) (*TransferLeaderResponse, error) { r := op.c.newRequest("POST", "/v1/operator/raft/transfer-leader") r.setQueryOptions(q) + + if id != "" { + r.params.Set("id", id) + } _, resp, err := op.c.doRequest(r) if err != nil { return nil, err diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc/manager.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc/manager.go index f941a178e6c..dd49ccbcff8 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc/manager.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc/manager.go @@ -47,6 +47,6 @@ func (s *ConfigManagerProxy) GetSamplingStrategy(ctx context.Context, serviceNam } // GetBaggageRestrictions returns baggage restrictions from collector. -func (s *ConfigManagerProxy) GetBaggageRestrictions(_ context.Context, _ string) ([]*baggage.BaggageRestriction, error) { +func (*ConfigManagerProxy) GetBaggageRestrictions(_ context.Context, _ string) ([]*baggage.BaggageRestriction, error) { return nil, errors.New("baggage not implemented") } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/customtransport/buffered_read_transport.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/customtransport/buffered_read_transport.go index 929457939ad..2d5b8573a9b 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/customtransport/buffered_read_transport.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/customtransport/buffered_read_transport.go @@ -36,19 +36,19 @@ func NewTBufferedReadTransport(readBuf *bytes.Buffer) (*TBufferedReadTransport, // IsOpen does nothing as transport is not maintaining the connection // Required to maintain thrift.TTransport interface -func (p *TBufferedReadTransport) IsOpen() bool { +func (*TBufferedReadTransport) IsOpen() bool { return true } // Open does nothing as transport is not maintaining the connection // Required to maintain thrift.TTransport interface -func (p *TBufferedReadTransport) Open() error { +func (*TBufferedReadTransport) Open() error { return nil } // Close does nothing as transport is not maintaining the connection // Required to maintain thrift.TTransport interface -func (p *TBufferedReadTransport) Close() error { +func (*TBufferedReadTransport) Close() error { return nil } @@ -72,6 +72,6 @@ func (p *TBufferedReadTransport) Write(buf []byte) (int, error) { // Flush does nothing as udp server does not write responses back // Required to maintain thrift.TTransport interface -func (p *TBufferedReadTransport) Flush(_ context.Context) error { +func (*TBufferedReadTransport) Flush(_ context.Context) error { return nil } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/thrift_processor.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/thrift_processor.go index f16aea5cd86..0045388994f 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/thrift_processor.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/thrift_processor.go @@ -67,7 +67,7 @@ func NewThriftProcessor( "number of processors must be greater than 0, called with %d", numProcessors) } protocolPool := &sync.Pool{ - New: func() interface{} { + New: func() any { trans := &customtransport.TBufferedReadTransport{} return factory.GetProtocol(trans) }, diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/client_metrics.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/client_metrics.go index c93b60f0b38..4213c1929ef 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/client_metrics.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/client_metrics.go @@ -143,7 +143,7 @@ func (r *ClientMetricsReporter) expireClientMetricsLoop() { func (r *ClientMetricsReporter) expireClientMetrics(t time.Time) { var size int64 - r.lastReceivedClientStats.Range(func(k, v interface{}) bool { + r.lastReceivedClientStats.Range(func(k, v any) bool { stats := v.(*lastReceivedClientStats) stats.lock.Lock() defer stats.lock.Unlock() diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/flags.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/flags.go index c3ec9616789..ea3deeeda47 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/flags.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/flags.go @@ -52,7 +52,7 @@ func AddFlags(flags *flag.FlagSet) { } // InitFromViper initializes Options with properties retrieved from Viper. -func (b *Options) InitFromViper(v *viper.Viper, logger *zap.Logger) *Options { +func (b *Options) InitFromViper(v *viper.Viper, _ *zap.Logger) *Options { b.ReporterType = Type(v.GetString(reporterType)) if !setupcontext.IsAllInOne() { if len(v.GetString(agentTags)) > 0 { diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/builder.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/builder.go index 9290107dd3a..1a4912f553f 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/builder.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/builder.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" @@ -99,7 +99,7 @@ func (b *ConnBuilder) CreateConnection(ctx context.Context, logger *zap.Logger, } } dialOptions = append(dialOptions, grpc.WithDefaultServiceConfig(grpcresolver.GRPCServiceConfig)) - dialOptions = append(dialOptions, grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(grpc_retry.WithMax(b.MaxRetry)))) + dialOptions = append(dialOptions, grpc.WithUnaryInterceptor(retry.UnaryClientInterceptor(retry.WithMax(b.MaxRetry)))) dialOptions = append(dialOptions, b.AdditionalDialOptions...) conn, err := grpc.NewClient(dialTarget, dialOptions...) diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/flags.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/flags.go index 72289e4b2fd..a860887fdb1 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/flags.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/flags.go @@ -27,7 +27,7 @@ import ( const ( gRPCPrefix = "reporter.grpc" collectorHostPort = gRPCPrefix + ".host-port" - retry = gRPCPrefix + ".retry.max" + retryFlag = gRPCPrefix + ".retry.max" defaultMaxRetry = 3 discoveryMinPeers = gRPCPrefix + ".discovery.min-peers" ) @@ -38,7 +38,7 @@ var tlsFlagsConfig = tlscfg.ClientFlagsConfig{ // AddFlags adds flags for Options. func AddFlags(flags *flag.FlagSet) { - flags.Uint(retry, defaultMaxRetry, "Sets the maximum number of retries for a call") + flags.Uint(retryFlag, defaultMaxRetry, "Sets the maximum number of retries for a call") flags.Int(discoveryMinPeers, 3, "Max number of collectors to which the agent will try to connect at any given time") flags.String(collectorHostPort, "", "Comma-separated string representing host:port of a static list of collectors to connect to directly") tlsFlagsConfig.AddFlags(flags) @@ -50,12 +50,12 @@ func (b *ConnBuilder) InitFromViper(v *viper.Viper) (*ConnBuilder, error) { if hostPorts != "" { b.CollectorHostPorts = strings.Split(hostPorts, ",") } - b.MaxRetry = uint(v.GetInt(retry)) - if tls, err := tlsFlagsConfig.InitFromViper(v); err == nil { - b.TLS = tls - } else { + b.MaxRetry = uint(v.GetInt(retryFlag)) + tls, err := tlsFlagsConfig.InitFromViper(v) + if err != nil { return b, fmt.Errorf("failed to process TLS options: %w", err) } + b.TLS = tls b.DiscoveryMinPeers = v.GetInt(discoveryMinPeers) return b, nil } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/tbuffered_server.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/tbuffered_server.go index 6a9d49b45ad..f52a0fa6fea 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/tbuffered_server.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/tbuffered_server.go @@ -79,7 +79,7 @@ func NewTBufferedServer( dataChan := make(chan *ReadBuf, maxQueueSize) readBufPool := &sync.Pool{ - New: func() interface{} { + New: func() any { return &ReadBuf{bytes: make([]byte, maxPacketSize)} }, } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/transport.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/transport.go index 4579502061f..e124b16a102 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/transport.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/transport.go @@ -93,7 +93,7 @@ func NewTUDPServerTransport(hostPort string) (*TUDPTransport, error) { // Open does nothing as connection is opened on creation // Required to maintain thrift.TTransport interface -func (p *TUDPTransport) Open() error { +func (*TUDPTransport) Open() error { return nil } @@ -131,7 +131,7 @@ func (p *TUDPTransport) Read(buf []byte) (int, error) { // RemainingBytes returns the max number of bytes (same as Thrift's StreamTransport) as we // do not know how many bytes we have left. -func (p *TUDPTransport) RemainingBytes() uint64 { +func (*TUDPTransport) RemainingBytes() uint64 { const maxSize = ^uint64(0) return maxSize } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/factory.go b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy/factory.go similarity index 80% rename from vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/factory.go rename to vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy/factory.go index 5fab77b62bd..353512b0f72 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/factory.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy/factory.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package strategystore +package samplingstrategy import ( "go.uber.org/zap" @@ -21,7 +21,7 @@ import ( "github.com/jaegertracing/jaeger/storage" ) -// Factory defines an interface for a factory that can create implementations of different strategy storage components. +// Factory defines an interface for a factory that can create implementations of different sampling strategy components. // Implementations are also encouraged to implement plugin.Configurable interface. // // # See also @@ -31,6 +31,9 @@ type Factory interface { // Initialize performs internal initialization of the factory. Initialize(metricsFactory metrics.Factory, ssFactory storage.SamplingStoreFactory, logger *zap.Logger) error - // CreateStrategyStore initializes the StrategyStore and returns it. - CreateStrategyStore() (StrategyStore, Aggregator, error) + // CreateStrategyProvider initializes and returns Provider and optionallty Aggregator. + CreateStrategyProvider() (Provider, Aggregator, error) + + // Close closes the factory + Close() error } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/interface.go b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy/interface.go similarity index 80% rename from vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/interface.go rename to vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy/interface.go index 90d9464918d..4b599dd3060 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/interface.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy/interface.go @@ -12,18 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -package strategystore +package samplingstrategy import ( "context" "io" + "go.uber.org/zap" + "github.com/jaegertracing/jaeger/model" "github.com/jaegertracing/jaeger/proto-gen/api_v2" ) -// StrategyStore keeps track of service specific sampling strategies. -type StrategyStore interface { +// Provider keeps track of service specific sampling strategies. +type Provider interface { // Close() from io.Closer stops the processor from calculating probabilities. io.Closer @@ -36,6 +38,10 @@ type Aggregator interface { // Close() from io.Closer stops the aggregator from aggregating throughput. io.Closer + // The HandleRootSpan function processes a span, checking if it's a root span. + // If it is, it extracts sampler parameters, then calls RecordThroughput. + HandleRootSpan(span *model.Span, logger *zap.Logger) + // RecordThroughput records throughput for an operation for aggregation. RecordThroughput(service, operation string, samplerType model.SamplerType, probability float64) diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin/span_sanitizer.go b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin/span_sanitizer.go index 8ba8a71f665..82fad02c7d6 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin/span_sanitizer.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin/span_sanitizer.go @@ -69,7 +69,7 @@ func NewSpanDurationSanitizer() Sanitizer { type spanDurationSanitizer struct{} -func (s *spanDurationSanitizer) Sanitize(span *zc.Span) *zc.Span { +func (*spanDurationSanitizer) Sanitize(span *zc.Span) *zc.Span { if span.Duration == nil { duration := defaultDuration if len(span.Annotations) >= 2 { @@ -116,7 +116,7 @@ func NewSpanStartTimeSanitizer() Sanitizer { type spanStartTimeSanitizer struct{} -func (s *spanStartTimeSanitizer) Sanitize(span *zc.Span) *zc.Span { +func (*spanStartTimeSanitizer) Sanitize(span *zc.Span) *zc.Span { if span.Timestamp != nil || len(span.Annotations) == 0 { return span } @@ -143,7 +143,7 @@ func NewParentIDSanitizer() Sanitizer { type parentIDSanitizer struct{} -func (s *parentIDSanitizer) Sanitize(span *zc.Span) *zc.Span { +func (*parentIDSanitizer) Sanitize(span *zc.Span) *zc.Span { if span.ParentID == nil || *span.ParentID != 0 { return span } @@ -166,7 +166,7 @@ func NewErrorTagSanitizer() Sanitizer { type errorTagSanitizer struct{} -func (s *errorTagSanitizer) Sanitize(span *zc.Span) *zc.Span { +func (*errorTagSanitizer) Sanitize(span *zc.Span) *zc.Span { for _, binAnno := range span.BinaryAnnotations { if binAnno.AnnotationType != zc.AnnotationType_BOOL && strings.EqualFold("error", binAnno.Key) { binAnno.AnnotationType = zc.AnnotationType_BOOL diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/.nocover b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/.nocover deleted file mode 100644 index 46911e9377b..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/.nocover +++ /dev/null @@ -1,2 +0,0 @@ -FIXME - diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/admin.go b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/admin.go index ea4df675a04..974aafdaacb 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/admin.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/admin.go @@ -170,6 +170,8 @@ func (s *AdminServer) registerPprofHandlers() { // Close stops the HTTP server func (s *AdminServer) Close() error { - _ = s.tlsCertWatcherCloser.Close() - return s.server.Shutdown(context.Background()) + return errors.Join( + s.tlsCertWatcherCloser.Close(), + s.server.Shutdown(context.Background()), + ) } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/flags.go b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/flags.go index 272ad8dd4fe..91060af5d6f 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/flags.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/flags.go @@ -29,6 +29,7 @@ import ( const ( spanStorageType = "span-storage.type" // deprecated logLevel = "log-level" + logEncoding = "log-encoding" // json or console configFile = "config-file" ) @@ -98,23 +99,26 @@ type SharedFlags struct { } type logging struct { - Level string + Level string + Encoding string } // AddFlags adds flags for SharedFlags func AddFlags(flagSet *flag.FlagSet) { flagSet.String(spanStorageType, "", "(deprecated) please use SPAN_STORAGE_TYPE environment variable. Run this binary with the 'env' command for help.") - AddLoggingFlag(flagSet) + AddLoggingFlags(flagSet) } // AddLoggingFlag adds logging flag for SharedFlags -func AddLoggingFlag(flagSet *flag.FlagSet) { +func AddLoggingFlags(flagSet *flag.FlagSet) { flagSet.String(logLevel, "info", "Minimal allowed log Level. For more levels see https://github.com/uber-go/zap") + flagSet.String(logEncoding, "json", "Log encoding. Supported values are 'json' and 'console'.") } // InitFromViper initializes SharedFlags with properties from viper func (flags *SharedFlags) InitFromViper(v *viper.Viper) *SharedFlags { flags.Logging.Level = v.GetString(logLevel) + flags.Logging.Encoding = v.GetString(logEncoding) return flags } @@ -126,5 +130,9 @@ func (flags *SharedFlags) NewLogger(conf zap.Config, options ...zap.Option) (*za return nil, err } conf.Level = zap.NewAtomicLevelAt(level) + conf.Encoding = flags.Logging.Encoding + if flags.Logging.Encoding == "console" { + conf.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + } return conf.Build(options...) } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/service.go b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/service.go index 622a960a4e2..12293a7b3ef 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/service.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/service.go @@ -22,9 +22,10 @@ import ( "os/signal" "syscall" - grpcZap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" "github.com/spf13/viper" "go.uber.org/zap" + "go.uber.org/zap/zapgrpc" + "google.golang.org/grpc/grpclog" "github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder" "github.com/jaegertracing/jaeger/pkg/healthcheck" @@ -67,7 +68,7 @@ func NewService(adminPort int) *Service { func (s *Service) AddFlags(flagSet *flag.FlagSet) { AddConfigFileFlag(flagSet) if s.NoStorage { - AddLoggingFlag(flagSet) + AddLoggingFlags(flagSet) } else { AddFlags(flagSet) } @@ -84,16 +85,15 @@ func (s *Service) Start(v *viper.Viper) error { sFlags := new(SharedFlags).InitFromViper(v) newProdConfig := zap.NewProductionConfig() newProdConfig.Sampling = nil - if logger, err := sFlags.NewLogger(newProdConfig); err == nil { - s.Logger = logger - grpcZap.ReplaceGrpcLoggerV2(logger.WithOptions( - // grpclog is not consistent with the depth of call tree before it's dispatched to zap, - // but Skip(2) still shows grpclog as caller, while Skip(3) shows actual grpc packages. - zap.AddCallerSkip(3), - )) - } else { + logger, err := sFlags.NewLogger(newProdConfig) + if err != nil { return fmt.Errorf("cannot create logger: %w", err) } + s.Logger = logger + grpclog.SetLoggerV2(zapgrpc.NewLogger( + logger.WithOptions( + zap.AddCallerSkip(5), // ensure the actual caller:lineNo is shown + ))) metricsBuilder := new(metricsbuilder.Builder).InitFromViper(v) metricsFactory, err := metricsBuilder.CreateMetricsFactory("") diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/cache.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/cache.go deleted file mode 100644 index f25ddba563b..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/cache.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// Copyright (c) 2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expvar - -import ( - "sync" - - "github.com/jaegertracing/jaeger/pkg/metrics" -) - -type cache struct { - lock sync.Mutex - counters map[string]metrics.Counter - gauges map[string]metrics.Gauge - timers map[string]metrics.Timer - histograms map[string]metrics.Histogram -} - -func newCache() *cache { - return &cache{ - counters: make(map[string]metrics.Counter), - gauges: make(map[string]metrics.Gauge), - timers: make(map[string]metrics.Timer), - histograms: make(map[string]metrics.Histogram), - } -} - -func (r *cache) getOrSetCounter(name string, create func() metrics.Counter) metrics.Counter { - r.lock.Lock() - defer r.lock.Unlock() - c, ok := r.counters[name] - if !ok { - c = create() - r.counters[name] = c - } - return c -} - -func (r *cache) getOrSetGauge(name string, create func() metrics.Gauge) metrics.Gauge { - r.lock.Lock() - defer r.lock.Unlock() - g, ok := r.gauges[name] - if !ok { - g = create() - r.gauges[name] = g - } - return g -} - -func (r *cache) getOrSetTimer(name string, create func() metrics.Timer) metrics.Timer { - r.lock.Lock() - defer r.lock.Unlock() - t, ok := r.timers[name] - if !ok { - t = create() - r.timers[name] = t - } - return t -} - -func (r *cache) getOrSetHistogram(name string, create func() metrics.Histogram) metrics.Histogram { - r.lock.Lock() - defer r.lock.Unlock() - t, ok := r.histograms[name] - if !ok { - t = create() - r.histograms[name] = t - } - return t -} diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/factory.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/factory.go deleted file mode 100644 index 6ce562e5ef1..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/factory.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// Copyright (c) 2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expvar - -import ( - "sort" - - kexpvar "github.com/go-kit/kit/metrics/expvar" - - "github.com/jaegertracing/jaeger/pkg/metrics" -) - -// NewFactory creates a new metrics factory using go-kit expvar package. -// buckets is the number of buckets to be used in histograms. -// Custom buckets passed via options are not supported. -func NewFactory(buckets int) metrics.Factory { - return &factory{ - buckets: buckets, - scope: "", - scopeSep: ".", - tagsSep: ".", - tagKVSep: "_", - cache: newCache(), - } -} - -type factory struct { - buckets int - - scope string - tags map[string]string - scopeSep string - tagsSep string - tagKVSep string - cache *cache -} - -var _ metrics.Factory = (*factory)(nil) - -func (f *factory) subScope(name string) string { - if f.scope == "" { - return name - } - if name == "" { - return f.scope - } - return f.scope + f.scopeSep + name -} - -func (f *factory) mergeTags(tags map[string]string) map[string]string { - ret := make(map[string]string, len(f.tags)+len(tags)) - for k, v := range f.tags { - ret[k] = v - } - for k, v := range tags { - ret[k] = v - } - return ret -} - -func (f *factory) getKey(name string, tags map[string]string) string { - fullName := f.subScope(name) - fullTags := f.mergeTags(tags) - return makeKey(fullName, fullTags, f.tagsSep, f.tagKVSep) -} - -// getKey converts name+tags into a single string of the form -// "name|tag1=value1|...|tagN=valueN", where tag names are -// sorted alphabetically. -func makeKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string { - keys := make([]string, 0, len(tags)) - for k := range tags { - keys = append(keys, k) - } - sort.Strings(keys) - key := name - for _, k := range keys { - key = key + tagsSep + k + tagKVSep + tags[k] - } - return key -} - -func (f *factory) Counter(options metrics.Options) metrics.Counter { - key := f.getKey(options.Name, options.Tags) - return f.cache.getOrSetCounter(key, func() metrics.Counter { - return NewCounter(kexpvar.NewCounter(key)) - }) -} - -func (f *factory) Gauge(options metrics.Options) metrics.Gauge { - key := f.getKey(options.Name, options.Tags) - return f.cache.getOrSetGauge(key, func() metrics.Gauge { - return NewGauge(kexpvar.NewGauge(key)) - }) -} - -func (f *factory) Timer(options metrics.TimerOptions) metrics.Timer { - key := f.getKey(options.Name, options.Tags) - return f.cache.getOrSetTimer(key, func() metrics.Timer { - return NewTimer(kexpvar.NewHistogram(key, f.buckets)) - }) -} - -func (f *factory) Histogram(options metrics.HistogramOptions) metrics.Histogram { - key := f.getKey(options.Name, options.Tags) - return f.cache.getOrSetHistogram(key, func() metrics.Histogram { - return NewHistogram(kexpvar.NewHistogram(key, f.buckets)) - }) -} - -func (f *factory) Namespace(options metrics.NSOptions) metrics.Factory { - return &factory{ - buckets: f.buckets, - scope: f.subScope(options.Name), - tags: f.mergeTags(options.Tags), - scopeSep: f.scopeSep, - tagsSep: f.tagsSep, - tagKVSep: f.tagKVSep, - cache: f.cache, - } -} diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/metrics.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/metrics.go deleted file mode 100644 index f3cbf740b1a..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/metrics.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expvar - -import ( - "time" - - kit "github.com/go-kit/kit/metrics" -) - -// Counter is an adapter from go-kit Counter to jaeger-lib Counter -type Counter struct { - counter kit.Counter -} - -// NewCounter creates a new Counter -func NewCounter(counter kit.Counter) *Counter { - return &Counter{counter: counter} -} - -// Inc adds the given value to the counter. -func (c *Counter) Inc(delta int64) { - c.counter.Add(float64(delta)) -} - -// Gauge is an adapter from go-kit Gauge to jaeger-lib Gauge -type Gauge struct { - gauge kit.Gauge -} - -// NewGauge creates a new Gauge -func NewGauge(gauge kit.Gauge) *Gauge { - return &Gauge{gauge: gauge} -} - -// Update the gauge to the value passed in. -func (g *Gauge) Update(value int64) { - g.gauge.Set(float64(value)) -} - -// Timer is an adapter from go-kit Histogram to jaeger-lib Timer -type Timer struct { - hist kit.Histogram -} - -// NewTimer creates a new Timer -func NewTimer(hist kit.Histogram) *Timer { - return &Timer{hist: hist} -} - -// Record saves the time passed in. -func (t *Timer) Record(delta time.Duration) { - t.hist.Observe(delta.Seconds()) -} - -// Histogram is an adapter from go-kit Histogram to jaeger-lib Histogram -type Histogram struct { - hist kit.Histogram -} - -// NewHistogram creates a new Histogram -func NewHistogram(hist kit.Histogram) *Histogram { - return &Histogram{hist: hist} -} - -// Record saves the value passed in. -func (t *Histogram) Record(value float64) { - t.hist.Observe(value) -} diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder/builder.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder/builder.go index 502b7a4aa3d..4bd71304f08 100644 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder/builder.go +++ b/vendor/github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder/builder.go @@ -17,16 +17,13 @@ package metricsbuilder import ( "errors" - "expvar" "flag" - "log" "net/http" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/viper" - jexpvar "github.com/jaegertracing/jaeger/internal/metrics/expvar" jprom "github.com/jaegertracing/jaeger/internal/metrics/prometheus" "github.com/jaegertracing/jaeger/pkg/metrics" ) @@ -47,14 +44,12 @@ type Builder struct { handler http.Handler } -const expvarDepr = "(deprecated, will be removed after 2024-01-01 or in release v1.53.0, whichever is later) " - // AddFlags adds flags for Builder. func AddFlags(flags *flag.FlagSet) { flags.String( metricsBackend, defaultMetricsBackend, - "Defines which metrics backend to use for metrics reporting: prometheus, none, or expvar "+expvarDepr) + "Defines which metrics backend to use for metrics reporting: prometheus or none") flags.String( metricsHTTPRoute, defaultMetricsRoute, @@ -77,12 +72,6 @@ func (b *Builder) CreateMetricsFactory(namespace string) (metrics.Factory, error b.handler = promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{DisableCompression: true}) return metricsFactory, nil } - if b.Backend == "expvar" { - metricsFactory := jexpvar.NewFactory(10).Namespace(metrics.NSOptions{Name: namespace, Tags: nil}) - b.handler = expvar.Handler() - log.Printf("using expvar as metrics backend " + expvarDepr) - return metricsFactory, nil - } if b.Backend == "none" || b.Backend == "" { return metrics.NullFactory, nil } diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/cache.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/cache.go index 40791ebb708..d5bf7ec6789 100644 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/cache.go +++ b/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/cache.go @@ -81,6 +81,6 @@ func (c *vectorCache) getOrMakeHistogramVec(opts prometheus.HistogramOpts, label return hv } -func (c *vectorCache) getCacheKey(name string, labels []string) string { +func (*vectorCache) getCacheKey(name string, labels []string) string { return strings.Join(append([]string{name}, labels...), "||") } diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/factory.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/factory.go index 0f8b3a959d7..bb4a296ce95 100644 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/factory.go +++ b/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/factory.go @@ -280,7 +280,7 @@ func (f *Factory) mergeTags(tags map[string]string) map[string]string { return ret } -func (f *Factory) tagNames(tags map[string]string) []string { +func (*Factory) tagNames(tags map[string]string) []string { ret := make([]string, 0, len(tags)) for k := range tags { ret = append(ret, k) @@ -289,7 +289,7 @@ func (f *Factory) tagNames(tags map[string]string) []string { return ret } -func (f *Factory) tagsAsLabelValues(labels []string, tags map[string]string) []string { +func (*Factory) tagsAsLabelValues(labels []string, tags map[string]string) []string { ret := make([]string, 0, len(tags)) for _, l := range labels { ret = append(ret, tags[l]) diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/json/from_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/json/from_domain.go index 26c2e47d6f0..677f96bb386 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/json/from_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/json/from_domain.go @@ -110,17 +110,17 @@ func (fd fromDomain) convertReferences(span *model.Span) []json.Reference { return out } -func (fd fromDomain) convertRefType(refType model.SpanRefType) json.ReferenceType { +func (fromDomain) convertRefType(refType model.SpanRefType) json.ReferenceType { if refType == model.FollowsFrom { return json.FollowsFrom } return json.ChildOf } -func (fd fromDomain) convertKeyValues(keyValues model.KeyValues) []json.KeyValue { +func (fromDomain) convertKeyValues(keyValues model.KeyValues) []json.KeyValue { out := make([]json.KeyValue, len(keyValues)) for i, kv := range keyValues { - var value interface{} + var value any switch kv.VType { case model.StringType: value = kv.VStr @@ -146,7 +146,7 @@ func (fd fromDomain) convertKeyValues(keyValues model.KeyValues) []json.KeyValue return out } -func (fd fromDomain) convertKeyValuesString(keyValues model.KeyValues) []json.KeyValue { +func (fromDomain) convertKeyValuesString(keyValues model.KeyValues) []json.KeyValue { out := make([]json.KeyValue, len(keyValues)) for i, kv := range keyValues { out[i] = json.KeyValue{ diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/from_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/from_domain.go index 8ae83e02c57..6f8b85863d9 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/from_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/from_domain.go @@ -44,7 +44,7 @@ func FromDomainSpan(span *model.Span) *jaeger.Span { type domainToJaegerTransformer struct{} -func (d domainToJaegerTransformer) keyValueToTag(kv *model.KeyValue) *jaeger.Tag { +func (domainToJaegerTransformer) keyValueToTag(kv *model.KeyValue) *jaeger.Tag { if kv.VType == model.StringType { stringValue := kv.VStr return &jaeger.Tag{ @@ -119,7 +119,7 @@ func (d domainToJaegerTransformer) convertLogs(logs []model.Log) []*jaeger.Log { return jaegerLogs } -func (d domainToJaegerTransformer) convertSpanRefs(refs []model.SpanRef) []*jaeger.SpanRef { +func (domainToJaegerTransformer) convertSpanRefs(refs []model.SpanRef) []*jaeger.SpanRef { jaegerSpanRefs := make([]*jaeger.SpanRef, len(refs)) for idx, ref := range refs { jaegerSpanRefs[idx] = &jaeger.SpanRef{ diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/to_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/to_domain.go index 2738776123f..5f3838bc397 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/to_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/to_domain.go @@ -83,7 +83,7 @@ func (td toDomain) transformSpan(jSpan *jaeger.Span, mProcess *model.Process) *m } } -func (td toDomain) getReferences(jRefs []*jaeger.SpanRef) []model.SpanRef { +func (toDomain) getReferences(jRefs []*jaeger.SpanRef) []model.SpanRef { if len(jRefs) == 0 { return nil } @@ -127,7 +127,7 @@ func (td toDomain) getTags(tags []*jaeger.Tag, extraSpace int) model.KeyValues { return retMe } -func (td toDomain) getTag(tag *jaeger.Tag) model.KeyValue { +func (toDomain) getTag(tag *jaeger.Tag) model.KeyValue { switch tag.VType { case jaeger.TagType_BOOL: return model.Bool(tag.Key, tag.GetVBool()) diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/to_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/to_domain.go index f4dd3655f74..a75255e9eb6 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/to_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/to_domain.go @@ -116,7 +116,7 @@ func (td toDomain) ToDomainSpans(zSpan *zipkincore.Span) ([]*model.Span, error) return jSpans, err } -func (td toDomain) findAnnotation(zSpan *zipkincore.Span, value string) *zipkincore.Annotation { +func (toDomain) findAnnotation(zSpan *zipkincore.Span, value string) *zipkincore.Annotation { for _, ann := range zSpan.Annotations { if ann.Value == value { return ann @@ -189,7 +189,7 @@ func (td toDomain) transformSpan(zSpan *zipkincore.Span) []*model.Span { } // getFlags takes a Zipkin Span and deduces the proper flags settings -func (td toDomain) getFlags(zSpan *zipkincore.Span) model.Flags { +func (toDomain) getFlags(zSpan *zipkincore.Span) model.Flags { f := model.Flags(0) if zSpan.Debug { f.SetDebug() @@ -198,9 +198,9 @@ func (td toDomain) getFlags(zSpan *zipkincore.Span) model.Flags { } // Get a correct start time to use for the span if it's not set directly -func (td toDomain) getStartTimeAndDuration(zSpan *zipkincore.Span) (int64, int64) { - timestamp := zSpan.GetTimestamp() - duration := zSpan.GetDuration() +func (td toDomain) getStartTimeAndDuration(zSpan *zipkincore.Span) (timestamp, duration int64) { + timestamp = zSpan.GetTimestamp() + duration = zSpan.GetDuration() if timestamp == 0 { cs := td.findAnnotation(zSpan, zipkincore.CLIENT_SEND) sr := td.findAnnotation(zSpan, zipkincore.SERVER_RECV) @@ -265,12 +265,12 @@ func (td toDomain) findServiceNameAndIP(zSpan *zipkincore.Span) (string, int32, return UnknownServiceName, 0, err } -func (td toDomain) isCoreAnnotation(annotation *zipkincore.Annotation) bool { +func (toDomain) isCoreAnnotation(annotation *zipkincore.Annotation) bool { _, ok := coreAnnotations[annotation.Value] return ok } -func (td toDomain) isProcessTag(binaryAnnotation *zipkincore.BinaryAnnotation) bool { +func (toDomain) isProcessTag(binaryAnnotation *zipkincore.BinaryAnnotation) bool { _, ok := processTagAnnotations[binaryAnnotation.Key] return ok } @@ -309,7 +309,7 @@ func (td toDomain) getTags(binAnnotations []*zipkincore.BinaryAnnotation, tagInc return retMe } -func (td toDomain) transformBinaryAnnotation(binaryAnnotation *zipkincore.BinaryAnnotation) (model.KeyValue, error) { +func (toDomain) transformBinaryAnnotation(binaryAnnotation *zipkincore.BinaryAnnotation) (model.KeyValue, error) { switch binaryAnnotation.AnnotationType { case zipkincore.AnnotationType_BOOL: vBool := bytes.Equal(binaryAnnotation.Value, trueByteSlice) @@ -346,7 +346,7 @@ func (td toDomain) transformBinaryAnnotation(binaryAnnotation *zipkincore.Binary return model.KeyValue{}, fmt.Errorf("unknown zipkin annotation type: %d", binaryAnnotation.AnnotationType) } -func bytesToNumber(b []byte, number interface{}) error { +func bytesToNumber(b []byte, number any) error { buf := bytes.NewReader(b) return binary.Read(buf, binary.BigEndian, number) } @@ -372,7 +372,7 @@ func (td toDomain) getLogs(annotations []*zipkincore.Annotation) []model.Log { return retMe } -func (td toDomain) getLogFields(annotation *zipkincore.Annotation) []model.KeyValue { +func (toDomain) getLogFields(annotation *zipkincore.Annotation) []model.KeyValue { var logFields map[string]string // Since Zipkin format does not support kv-logging, some clients encode those Logs // as annotations with JSON value. Therefore, we try JSON decoding first. @@ -388,7 +388,7 @@ func (td toDomain) getLogFields(annotation *zipkincore.Annotation) []model.KeyVa return []model.KeyValue{model.String(DefaultLogFieldKey, annotation.Value)} } -func (td toDomain) getSpanKindTag(annotations []*zipkincore.Annotation) (model.KeyValue, bool) { +func (toDomain) getSpanKindTag(annotations []*zipkincore.Annotation) (model.KeyValue, bool) { for _, a := range annotations { if spanKind, ok := coreAnnotations[a.Value]; ok { return model.String(keySpanKind, spanKind), true @@ -397,7 +397,7 @@ func (td toDomain) getSpanKindTag(annotations []*zipkincore.Annotation) (model.K return model.KeyValue{}, false } -func (td toDomain) getPeerTags(endpoint *zipkincore.Endpoint, tags []model.KeyValue) []model.KeyValue { +func (toDomain) getPeerTags(endpoint *zipkincore.Endpoint, tags []model.KeyValue) []model.KeyValue { if endpoint == nil { return tags } diff --git a/vendor/github.com/jaegertracing/jaeger/model/dependencies.go b/vendor/github.com/jaegertracing/jaeger/model/dependencies.go index d1a86ba0d7c..d60bbb18f41 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/dependencies.go +++ b/vendor/github.com/jaegertracing/jaeger/model/dependencies.go @@ -22,8 +22,9 @@ const ( // ApplyDefaults applies defaults to the DependencyLink. func (d DependencyLink) ApplyDefaults() DependencyLink { - if d.Source == "" { - d.Source = JaegerDependencyLinkSource + dd := d + if dd.Source == "" { + dd.Source = JaegerDependencyLinkSource } - return d + return dd } diff --git a/vendor/github.com/jaegertracing/jaeger/model/ids.go b/vendor/github.com/jaegertracing/jaeger/model/ids.go index dfdc9070574..525bbf874eb 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/ids.go +++ b/vendor/github.com/jaegertracing/jaeger/model/ids.go @@ -93,17 +93,17 @@ func TraceIDFromBytes(data []byte) (TraceID, error) { } // MarshalText is called by encoding/json, which we do not want people to use. -func (t TraceID) MarshalText() ([]byte, error) { +func (TraceID) MarshalText() ([]byte, error) { return nil, fmt.Errorf("unsupported method TraceID.MarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") } // UnmarshalText is called by encoding/json, which we do not want people to use. -func (t *TraceID) UnmarshalText(text []byte) error { +func (*TraceID) UnmarshalText([]byte /* text */) error { return fmt.Errorf("unsupported method TraceID.UnmarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") } // Size returns the size of this datum in protobuf. It is always 16 bytes. -func (t *TraceID) Size() int { +func (*TraceID) Size() int { return 16 } @@ -187,17 +187,17 @@ func SpanIDFromBytes(data []byte) (SpanID, error) { } // MarshalText is called by encoding/json, which we do not want people to use. -func (s SpanID) MarshalText() ([]byte, error) { +func (SpanID) MarshalText() ([]byte, error) { return nil, fmt.Errorf("unsupported method SpanID.MarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") } // UnmarshalText is called by encoding/json, which we do not want people to use. -func (s *SpanID) UnmarshalText(text []byte) error { +func (*SpanID) UnmarshalText([]byte /* text */) error { return fmt.Errorf("unsupported method SpanID.UnmarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") } // Size returns the size of this datum in protobuf. It is always 8 bytes. -func (s *SpanID) Size() int { +func (*SpanID) Size() int { return 8 } diff --git a/vendor/github.com/jaegertracing/jaeger/model/json/model.go b/vendor/github.com/jaegertracing/jaeger/model/json/model.go index 8d72dc498e5..ba1b5a44aba 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/json/model.go +++ b/vendor/github.com/jaegertracing/jaeger/model/json/model.go @@ -98,9 +98,9 @@ type Log struct { // KeyValue is a key-value pair with typed value. type KeyValue struct { - Key string `json:"key"` - Type ValueType `json:"type,omitempty"` - Value interface{} `json:"value"` + Key string `json:"key"` + Type ValueType `json:"type,omitempty"` + Value any `json:"value"` } // DependencyLink shows dependencies between services diff --git a/vendor/github.com/jaegertracing/jaeger/model/keyvalue.go b/vendor/github.com/jaegertracing/jaeger/model/keyvalue.go index 44f3e1bdb03..96c15dfaa81 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/keyvalue.go +++ b/vendor/github.com/jaegertracing/jaeger/model/keyvalue.go @@ -102,8 +102,8 @@ func (kv *KeyValue) Binary() []byte { return nil } -// Value returns typed values stored in KeyValue as interface{}. -func (kv *KeyValue) Value() interface{} { +// Value returns typed values stored in KeyValue as any. +func (kv *KeyValue) Value() any { switch kv.VType { case StringType: return kv.VStr diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/cfgmgr.go b/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/cfgmgr.go index 621162a36fe..a34d19a7ba6 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/cfgmgr.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/cfgmgr.go @@ -18,20 +18,20 @@ import ( "context" "errors" - "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore" + "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy" "github.com/jaegertracing/jaeger/proto-gen/api_v2" "github.com/jaegertracing/jaeger/thrift-gen/baggage" ) // ConfigManager implements ClientConfigManager. type ConfigManager struct { - SamplingStrategyStore strategystore.StrategyStore - BaggageManager baggage.BaggageRestrictionManager + SamplingProvider samplingstrategy.Provider + BaggageManager baggage.BaggageRestrictionManager } // GetSamplingStrategy implements ClientConfigManager.GetSamplingStrategy. func (c *ConfigManager) GetSamplingStrategy(ctx context.Context, serviceName string) (*api_v2.SamplingStrategyResponse, error) { - return c.SamplingStrategyStore.GetSamplingStrategy(ctx, serviceName) + return c.SamplingProvider.GetSamplingStrategy(ctx, serviceName) } // GetBaggageRestrictions implements ClientConfigManager.GetBaggageRestrictions. diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/handler.go b/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/handler.go index 2c7bbc12f09..ab2cef08c5d 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/handler.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/handler.go @@ -213,7 +213,7 @@ var samplingStrategyTypes = []api_v2.SamplingStrategyType{ // // Thrift 0.9.3 classes generate this JSON: // {"strategyType":"PROBABILISTIC","probabilisticSampling":{"samplingRate":0.5}} -func (h *HTTPHandler) encodeThriftEnums092(json []byte) []byte { +func (*HTTPHandler) encodeThriftEnums092(json []byte) []byte { str := string(json) for _, strategyType := range samplingStrategyTypes { str = strings.Replace( diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/options.go b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/options.go index 2707887831d..cae2fa6376f 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/options.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/options.go @@ -23,6 +23,7 @@ import ( "path/filepath" "time" + "go.opentelemetry.io/collector/config/configtls" "go.uber.org/zap" ) @@ -39,40 +40,40 @@ type Options struct { MaxVersion string `mapstructure:"max_version"` SkipHostVerify bool `mapstructure:"skip_host_verify"` ReloadInterval time.Duration `mapstructure:"reload_interval"` - certWatcher *certWatcher `mapstructure:"-"` + certWatcher *certWatcher } var systemCertPool = x509.SystemCertPool // to allow overriding in unit test // Config loads TLS certificates and returns a TLS Config. -func (p *Options) Config(logger *zap.Logger) (*tls.Config, error) { +func (o *Options) Config(logger *zap.Logger) (*tls.Config, error) { var minVersionId, maxVersionId uint16 - certPool, err := p.loadCertPool() + certPool, err := o.loadCertPool() if err != nil { return nil, fmt.Errorf("failed to load CA CertPool: %w", err) } - cipherSuiteIds, err := CipherSuiteNamesToIDs(p.CipherSuites) + cipherSuiteIds, err := CipherSuiteNamesToIDs(o.CipherSuites) if err != nil { return nil, fmt.Errorf("failed to get cipher suite ids from cipher suite names: %w", err) } - if p.MinVersion != "" { - minVersionId, err = VersionNameToID(p.MinVersion) + if o.MinVersion != "" { + minVersionId, err = VersionNameToID(o.MinVersion) if err != nil { return nil, fmt.Errorf("failed to get minimum tls version: %w", err) } } - if p.MaxVersion != "" { - maxVersionId, err = VersionNameToID(p.MaxVersion) + if o.MaxVersion != "" { + maxVersionId, err = VersionNameToID(o.MaxVersion) if err != nil { return nil, fmt.Errorf("failed to get maximum tls version: %w", err) } } - if p.MinVersion != "" && p.MaxVersion != "" { + if o.MinVersion != "" && o.MaxVersion != "" { if minVersionId > maxVersionId { return nil, fmt.Errorf("minimum tls version can't be greater than maximum tls version") } @@ -80,47 +81,47 @@ func (p *Options) Config(logger *zap.Logger) (*tls.Config, error) { tlsCfg := &tls.Config{ RootCAs: certPool, - ServerName: p.ServerName, - InsecureSkipVerify: p.SkipHostVerify, /* #nosec G402*/ + ServerName: o.ServerName, + InsecureSkipVerify: o.SkipHostVerify, /* #nosec G402*/ CipherSuites: cipherSuiteIds, MinVersion: minVersionId, MaxVersion: maxVersionId, } - if p.ClientCAPath != "" { + if o.ClientCAPath != "" { // TODO this should be moved to certWatcher, since it already loads key pair certPool := x509.NewCertPool() - if err := addCertToPool(p.ClientCAPath, certPool); err != nil { + if err := addCertToPool(o.ClientCAPath, certPool); err != nil { return nil, err } tlsCfg.ClientCAs = certPool tlsCfg.ClientAuth = tls.RequireAndVerifyClientCert } - certWatcher, err := newCertWatcher(*p, logger, tlsCfg.RootCAs, tlsCfg.ClientCAs) + certWatcher, err := newCertWatcher(*o, logger, tlsCfg.RootCAs, tlsCfg.ClientCAs) if err != nil { return nil, err } - p.certWatcher = certWatcher + o.certWatcher = certWatcher - if (p.CertPath == "" && p.KeyPath != "") || (p.CertPath != "" && p.KeyPath == "") { + if (o.CertPath == "" && o.KeyPath != "") || (o.CertPath != "" && o.KeyPath == "") { return nil, fmt.Errorf("for client auth via TLS, either both client certificate and key must be supplied, or neither") } - if p.CertPath != "" && p.KeyPath != "" { + if o.CertPath != "" && o.KeyPath != "" { tlsCfg.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) { - return p.certWatcher.certificate(), nil + return o.certWatcher.certificate(), nil } // GetClientCertificate is used on the client side when server is configured with tls.RequireAndVerifyClientCert e.g. mTLS tlsCfg.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { - return p.certWatcher.certificate(), nil + return o.certWatcher.certificate(), nil } } return tlsCfg, nil } -func (p Options) loadCertPool() (*x509.CertPool, error) { - if len(p.CAPath) == 0 { // no truststore given, use SystemCertPool +func (o Options) loadCertPool() (*x509.CertPool, error) { + if len(o.CAPath) == 0 { // no truststore given, use SystemCertPool certPool, err := loadSystemCertPool() if err != nil { return nil, fmt.Errorf("failed to load SystemCertPool: %w", err) @@ -129,12 +130,29 @@ func (p Options) loadCertPool() (*x509.CertPool, error) { } certPool := x509.NewCertPool() // setup user specified truststore - if err := addCertToPool(p.CAPath, certPool); err != nil { + if err := addCertToPool(o.CAPath, certPool); err != nil { return nil, err } return certPool, nil } +func (o *Options) ToOtelClientConfig() configtls.ClientConfig { + return configtls.ClientConfig{ + Insecure: !o.Enabled, + InsecureSkipVerify: o.SkipHostVerify, + ServerName: o.ServerName, + Config: configtls.Config{ + CAFile: o.CAPath, + CertFile: o.CertPath, + KeyFile: o.KeyPath, + CipherSuites: o.CipherSuites, + MinVersion: o.MinVersion, + MaxVersion: o.MaxVersion, + ReloadInterval: o.ReloadInterval, + }, + } +} + func addCertToPool(caPath string, certPool *x509.CertPool) error { caPEM, err := os.ReadFile(filepath.Clean(caPath)) if err != nil { @@ -150,9 +168,9 @@ func addCertToPool(caPath string, certPool *x509.CertPool) error { var _ io.Closer = (*Options)(nil) // Close shuts down the embedded certificate watcher. -func (p *Options) Close() error { - if p.certWatcher != nil { - return p.certWatcher.Close() +func (o *Options) Close() error { + if o.certWatcher != nil { + return o.certWatcher.Close() } return nil } diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/discovery/grpcresolver/grpc_resolver.go b/vendor/github.com/jaegertracing/jaeger/pkg/discovery/grpcresolver/grpc_resolver.go index 2329870f7ca..8783b19f843 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/discovery/grpcresolver/grpc_resolver.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/discovery/grpcresolver/grpc_resolver.go @@ -85,7 +85,7 @@ func New( } // Build returns itself for Resolver, because it's both a builder and a resolver. -func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { +func (r *Resolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { r.cc = cc // Update conn states if proactively updates already work @@ -106,7 +106,7 @@ func (r *Resolver) Scheme() string { // ResolveNow is a noop for Resolver since resolver is already firing r.cc.UpdatesState every time // it receives updates of new instance from discoCh -func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*Resolver) ResolveNow(resolver.ResolveNowOptions) {} func (r *Resolver) watcher() { defer r.closing.Done() diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/fswatcher/fswatcher.go b/vendor/github.com/jaegertracing/jaeger/pkg/fswatcher/fswatcher.go index d3c929d1d9b..fa7ef7f8ca5 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/fswatcher/fswatcher.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/fswatcher/fswatcher.go @@ -86,11 +86,11 @@ func (w *FSWatcher) setupWatchedPaths(filepaths []string) error { if p == "" { continue } - if h, err := hashFile(p); err == nil { - w.fileHashContentMap[p] = h - } else { + h, err := hashFile(p) + if err != nil { return err } + w.fileHashContentMap[p] = h dir := path.Dir(p) if _, ok := uniqueDirs[dir]; !ok { if err := w.watcher.Add(dir); err != nil { diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/gogocodec/codec.go b/vendor/github.com/jaegertracing/jaeger/pkg/gogocodec/codec.go index f7df1c2d2ec..f72de1d9f27 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/gogocodec/codec.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/gogocodec/codec.go @@ -61,12 +61,12 @@ func newCodec() *gogoCodec { } // Name implements encoding.Codec -func (c *gogoCodec) Name() string { +func (*gogoCodec) Name() string { return proto.Name } // Marshal implements encoding.Codec -func (c *gogoCodec) Marshal(v interface{}) ([]byte, error) { +func (*gogoCodec) Marshal(v any) ([]byte, error) { t := reflect.TypeOf(v) elem := t.Elem() // use gogo proto only for Jaeger types @@ -77,7 +77,7 @@ func (c *gogoCodec) Marshal(v interface{}) ([]byte, error) { } // Unmarshal implements encoding.Codec -func (c *gogoCodec) Unmarshal(data []byte, v interface{}) error { +func (*gogoCodec) Unmarshal(data []byte, v any) error { t := reflect.TypeOf(v) elem := t.Elem() // only for collections // use gogo proto only for Jaeger types diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/healthcheck/handler.go b/vendor/github.com/jaegertracing/jaeger/pkg/healthcheck/handler.go index 1a714ea70c6..b1860fa2349 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/healthcheck/handler.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/healthcheck/handler.go @@ -106,7 +106,7 @@ func (hc *HealthCheck) Handler() http.Handler { }) } -func (hc *HealthCheck) createRespBody(state state, template healthCheckResponse) []byte { +func (*HealthCheck) createRespBody(state state, template healthCheckResponse) []byte { resp := template // clone if state.status == Ready { resp.UpSince = state.upSince diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/factory.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/factory.go index e77257127d9..b1b9000a285 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/factory.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/factory.go @@ -64,19 +64,19 @@ var NullFactory Factory = nullFactory{} type nullFactory struct{} -func (nullFactory) Counter(options Options) Counter { +func (nullFactory) Counter(Options) Counter { return NullCounter } -func (nullFactory) Timer(options TimerOptions) Timer { +func (nullFactory) Timer(TimerOptions) Timer { return NullTimer } -func (nullFactory) Gauge(options Options) Gauge { +func (nullFactory) Gauge(Options) Gauge { return NullGauge } -func (nullFactory) Histogram(options HistogramOptions) Histogram { +func (nullFactory) Histogram(HistogramOptions) Histogram { return NullHistogram } -func (nullFactory) Namespace(scope NSOptions) Factory { return NullFactory } +func (nullFactory) Namespace(NSOptions /* scope */) Factory { return NullFactory } diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/metrics.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/metrics.go index 86cf913bc94..a7954ded67d 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/metrics.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/metrics.go @@ -32,7 +32,7 @@ import ( // of type Counter or Gauge or Timer. // // Errors during Init lead to a panic. -func MustInit(metrics interface{}, factory Factory, globalTags map[string]string) { +func MustInit(metrics any, factory Factory, globalTags map[string]string) { if err := Init(metrics, factory, globalTags); err != nil { panic(err.Error()) } @@ -40,7 +40,7 @@ func MustInit(metrics interface{}, factory Factory, globalTags map[string]string // Init does the same as MustInit, but returns an error instead of // panicking. -func Init(m interface{}, factory Factory, globalTags map[string]string) error { +func Init(m any, factory Factory, globalTags map[string]string) error { // Allow user to opt out of reporting metrics by passing in nil. if factory == nil { factory = NullFactory @@ -101,7 +101,7 @@ func Init(m interface{}, factory Factory, globalTags map[string]string) error { } } help := field.Tag.Get("help") - var obj interface{} + var obj any switch { case field.Type.AssignableTo(counterPtrType): obj = factory.Counter(Options{ diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/recoveryhandler/zap.go b/vendor/github.com/jaegertracing/jaeger/pkg/recoveryhandler/zap.go index 678c179dade..4303248dae1 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/recoveryhandler/zap.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/recoveryhandler/zap.go @@ -29,7 +29,7 @@ type zapRecoveryWrapper struct { } // Println logs an error message with the given fields -func (z zapRecoveryWrapper) Println(args ...interface{}) { +func (z zapRecoveryWrapper) Println(args ...any) { z.logger.Error(fmt.Sprint(args...)) } diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/context.go b/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/context.go deleted file mode 100644 index 9e0021e3659..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/context.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tenancy - -import "context" - -// tenantKeyType is a custom type for the key "tenant", following context.Context convention -type tenantKeyType string - -const ( - // tenantKey holds tenancy for spans - tenantKey = tenantKeyType("tenant") -) - -// WithTenant creates a Context with a tenant association -func WithTenant(ctx context.Context, tenant string) context.Context { - return context.WithValue(ctx, tenantKey, tenant) -} - -// GetTenant retrieves a tenant associated with a Context -func GetTenant(ctx context.Context) string { - tenant := ctx.Value(tenantKey) - if tenant == nil { - return "" - } - - if s, ok := tenant.(string); ok { - return s - } - return "" -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/flags.go b/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/flags.go deleted file mode 100644 index 0a050c4a3ef..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/flags.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tenancy - -import ( - "flag" - "fmt" - "strings" - - "github.com/spf13/viper" -) - -const ( - flagPrefix = "multi-tenancy" - flagTenancyEnabled = flagPrefix + ".enabled" - flagTenancyHeader = flagPrefix + ".header" - flagValidTenants = flagPrefix + ".tenants" -) - -// AddFlags adds flags for tenancy to the FlagSet. -func AddFlags(flags *flag.FlagSet) { - flags.Bool(flagTenancyEnabled, false, "Enable tenancy header when receiving or querying") - flags.String(flagTenancyHeader, "x-tenant", "HTTP header carrying tenant") - flags.String(flagValidTenants, "", - fmt.Sprintf("comma-separated list of allowed values for --%s header. (If not supplied, tenants are not restricted)", - flagTenancyHeader)) -} - -// InitFromViper creates tenancy.Options populated with values retrieved from Viper. -func InitFromViper(v *viper.Viper) Options { - var p Options - p.Enabled = v.GetBool(flagTenancyEnabled) - p.Header = v.GetString(flagTenancyHeader) - tenants := v.GetString(flagValidTenants) - if len(tenants) != 0 { - p.Tenants = strings.Split(tenants, ",") - } else { - p.Tenants = []string{} - } - - return p -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/grpc.go b/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/grpc.go deleted file mode 100644 index 80d069d3825..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/grpc.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tenancy - -import ( - "context" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// tenantedServerStream is a wrapper for ServerStream providing settable context -type tenantedServerStream struct { - grpc.ServerStream - context context.Context -} - -func (tss *tenantedServerStream) Context() context.Context { - return tss.context -} - -func getValidTenant(ctx context.Context, tc *Manager) (string, error) { - // Handle case where tenant is already directly in the context - tenant := GetTenant(ctx) - if tenant != "" { - if !tc.Valid(tenant) { - return tenant, status.Errorf(codes.PermissionDenied, "unknown tenant") - } - return tenant, nil - } - - // Handle case where tenant is in the context metadata - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return "", status.Errorf(codes.PermissionDenied, "missing tenant header") - } - - var err error - tenant, err = tenantFromMetadata(md, tc.Header) - if err != nil { - return "", err - } - if !tc.Valid(tenant) { - return tenant, status.Errorf(codes.PermissionDenied, "unknown tenant") - } - - return tenant, nil -} - -func directlyAttachedTenant(ctx context.Context) bool { - return GetTenant(ctx) != "" -} - -// NewGuardingStreamInterceptor blocks handling of streams whose tenancy header doesn't meet tenancy requirements. -// It also ensures the tenant is directly in the context, rather than context metadata. -func NewGuardingStreamInterceptor(tc *Manager) grpc.StreamServerInterceptor { - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - tenant, err := getValidTenant(ss.Context(), tc) - if err != nil { - return err - } - - if directlyAttachedTenant(ss.Context()) { - return handler(srv, ss) - } - - // "upgrade" the tenant to be part of the context, rather than just incoming metadata - return handler(srv, &tenantedServerStream{ - ServerStream: ss, - context: WithTenant(ss.Context(), tenant), - }) - } -} - -func tenantFromMetadata(md metadata.MD, tenancyHeader string) (string, error) { - tenants := md.Get(tenancyHeader) - if len(tenants) < 1 { - return "", status.Errorf(codes.Unauthenticated, "missing tenant header") - } else if len(tenants) > 1 { - return "", status.Errorf(codes.PermissionDenied, "extra tenant header") - } - - return tenants[0], nil -} - -// NewGuardingUnaryInterceptor blocks handling of RPCs whose tenancy header doesn't meet tenancy requirements. -// It also ensures the tenant is directly in the context, rather than context metadata. -func NewGuardingUnaryInterceptor(tc *Manager) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - tenant, err := getValidTenant(ctx, tc) - if err != nil { - return nil, err - } - - if directlyAttachedTenant(ctx) { - return handler(ctx, req) - } - - return handler(WithTenant(ctx, tenant), req) - } -} - -// NewClientUnaryInterceptor injects tenant header into gRPC request metadata. -func NewClientUnaryInterceptor(tc *Manager) grpc.UnaryClientInterceptor { - return grpc.UnaryClientInterceptor(func( - ctx context.Context, - method string, - req, reply interface{}, - cc *grpc.ClientConn, - invoker grpc.UnaryInvoker, - opts ...grpc.CallOption, - ) error { - if tenant := GetTenant(ctx); tenant != "" { - ctx = metadata.AppendToOutgoingContext(ctx, tc.Header, tenant) - } - return invoker(ctx, method, req, reply, cc, opts...) - }) -} - -// NewClientStreamInterceptor injects tenant header into gRPC request metadata. -func NewClientStreamInterceptor(tc *Manager) grpc.StreamClientInterceptor { - return grpc.StreamClientInterceptor(func( - ctx context.Context, - desc *grpc.StreamDesc, - cc *grpc.ClientConn, - method string, - streamer grpc.Streamer, - opts ...grpc.CallOption, - ) (grpc.ClientStream, error) { - if tenant := GetTenant(ctx); tenant != "" { - ctx = metadata.AppendToOutgoingContext(ctx, tc.Header, tenant) - } - return streamer(ctx, desc, cc, method, opts...) - }) -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/http.go b/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/http.go deleted file mode 100644 index 0884a7d41ed..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/http.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tenancy - -import ( - "context" - "net/http" - - "google.golang.org/grpc/metadata" -) - -// PropagationHandler returns a http.Handler containing the logic to extract -// the tenancy header of the http.Request and insert the tenant into request.Context -// for propagation. The token can be accessed via tenancy.GetTenant(). -func ExtractTenantHTTPHandler(tc *Manager, h http.Handler) http.Handler { - if !tc.Enabled { - return h - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - tenant := r.Header.Get(tc.Header) - if tenant == "" { - w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte("missing tenant header")) - return - } - - if !tc.Valid(tenant) { - w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte("unknown tenant")) - return - } - - ctx := WithTenant(r.Context(), tenant) - h.ServeHTTP(w, r.WithContext(ctx)) - }) -} - -// MetadataAnnotator returns a function suitable for propagating tenancy -// via github.com/grpc-ecosystem/grpc-gateway/runtime.NewServeMux -func (tc *Manager) MetadataAnnotator() func(context.Context, *http.Request) metadata.MD { - return func(ctx context.Context, req *http.Request) metadata.MD { - tenant := req.Header.Get(tc.Header) - if tenant == "" { - // The HTTP request lacked the tenancy header. Pass along - // empty metadata -- the gRPC query service will reject later. - return metadata.Pairs() - } - return metadata.New(map[string]string{ - tc.Header: tenant, - }) - } -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/manager.go b/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/manager.go deleted file mode 100644 index 9f6addf1471..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/tenancy/manager.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tenancy - -// Options describes the configuration properties for multitenancy -type Options struct { - Enabled bool - Header string - Tenants []string -} - -// Manager can check tenant usage for multi-tenant Jaeger configurations -type Manager struct { - Enabled bool - Header string - guard guard -} - -// Guard verifies a valid tenant when tenancy is enabled -type guard interface { - Valid(candidate string) bool -} - -// NewManager creates a tenancy.Manager for given tenancy.Options. -func NewManager(options *Options) *Manager { - // Default header value (although set by CLI flags, this helps tests and API users) - header := options.Header - if header == "" && options.Enabled { - header = "x-tenant" - } - return &Manager{ - Enabled: options.Enabled, - Header: header, - guard: tenancyGuardFactory(options), - } -} - -func (tc *Manager) Valid(tenant string) bool { - return tc.guard.Valid(tenant) -} - -type tenantDontCare bool - -func (tenantDontCare) Valid(candidate string) bool { - return true -} - -type tenantList struct { - tenants map[string]bool -} - -func (tl *tenantList) Valid(candidate string) bool { - _, ok := tl.tenants[candidate] - return ok -} - -func newTenantList(tenants []string) *tenantList { - tenantMap := make(map[string]bool) - for _, tenant := range tenants { - tenantMap[tenant] = true - } - - return &tenantList{ - tenants: tenantMap, - } -} - -func tenancyGuardFactory(options *Options) guard { - // Three cases - // - no tenancy - // - tenancy, but no guarding by tenant - // - tenancy, with guarding by a list - - if !options.Enabled || len(options.Tenants) == 0 { - return tenantDontCare(true) - } - - return newTenantList(options.Tenants) -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/version/command.go b/vendor/github.com/jaegertracing/jaeger/pkg/version/command.go index 343c1a477ac..546170b8707 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/version/command.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/version/command.go @@ -30,7 +30,7 @@ func Command() *cobra.Command { Use: "version", Short: "Print the version.", Long: `Print the version and build information.`, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ /* args */ []string) error { json, err := json.Marshal(info) if err != nil { return err diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/README.md b/vendor/github.com/jaegertracing/jaeger/plugin/README.md deleted file mode 100644 index 4490c35fb9a..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/plugin/README.md +++ /dev/null @@ -1,3 +0,0 @@ -The collection of implementations of different interfaces defined across Jaeger - -For example, implementations of the storage interface can be found in the plugin package \ No newline at end of file diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/configurable.go b/vendor/github.com/jaegertracing/jaeger/plugin/configurable.go deleted file mode 100644 index 20872798630..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/plugin/configurable.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package plugin - -import ( - "flag" - - "github.com/spf13/viper" - "go.uber.org/zap" -) - -// Configurable interface can be implemented by plugins that require external configuration, -// such as CLI flags, config files, or environment variables. -type Configurable interface { - // AddFlags adds CLI flags for configuring this component. - AddFlags(flagSet *flag.FlagSet) - - // InitFromViper initializes this component with properties from spf13/viper. - InitFromViper(v *viper.Viper, logger *zap.Logger) -} diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/doc.go b/vendor/github.com/jaegertracing/jaeger/plugin/doc.go deleted file mode 100644 index dd04bfff02b..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/plugin/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package plugin is the collection of implementations of different interfaces defined across Jaeger -// -// For example, implementations of the storage interface can be found in the plugin package -package plugin diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/README.md b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/README.md deleted file mode 100644 index 1fee18db23a..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/README.md +++ /dev/null @@ -1,243 +0,0 @@ -gRPC Storage Plugins -==================== - -Update (Jan 2022): as of Jaeger v1.30, the gRPC storage extension can be implemented as a remote gRPC server, in addition to the gRPC plugin architecture described below. The remote server needs to implement the same `storage_v1` gRPC interfaces defined in `plugin/storage/grpc/proto/`. - -gRPC Storage Plugins currently use the [Hashicorp go-plugin](https://github.com/hashicorp/go-plugin). This requires the -implementer of a plugin to develop the "server" side of the go-plugin system. At a high level this looks like: - -``` -+----------------------------------+ +-----------------------------+ -| | | | -| +-------------+ | unix-socket | +-------------+ | -| | | | | | | | -| jaeger-component | grpc-client +----------------------> grpc-server | plugin-impl | -| | | | | | | | -| +-------------+ | | +-------------+ | -| | | | -+----------------------------------+ +-----------------------------+ - - parent process child sub-process -``` - -Implementing a plugin ----------------------- - -Although the instructions below are limited to Go, plugins can be implemented any language. Languages other than -Go would implement a gRPC server using the `storage_v1` proto interfaces. The `proto` file can be found in `plugin/storage/grpc/proto/`. -To generate the bindings for your language you would use `protoc` with the appropriate `xx_out=` flag. This is detailed -in the [protobuf documentation](https://developers.google.com/protocol-buffers/docs/tutorials) and you can see an example of -how it is done for Go in the top level Jaeger `Makefile`. - -The easiest way to generate the gRPC storage plugin bindings is to use [Docker Protobuf](https://github.com/jaegertracing/docker-protobuf/) which is a lightweight `protoc` Docker image containing the dependencies needed to generate code for multiple languages. For example, one can generate bindings for C# on Windows with Docker for Windows using the following steps: -1. First clone the Jaeger github repo to a folder (e.g. `c:\source\repos\jaeger`): -``` -$ mkdir c:\source\repos\jaeger -$ cd c:\source\repos\jaeger -$ git clone https://github.com/jaegertracing/jaeger.git c:\source\repos\jaeger -``` -2. Initialize the Jaeger repo submodules (this pulls in code from other Jaeger repositories that are needed): -``` -$ git submodule update --init --recursive -``` -3. Then execute the following Docker command which mounts the local directory `c:\source\repos\jaeger` to the directory `/jaeger` in the Docker container and then executes the `jaegertracing/protobuf:0.2.0` command. This will create a file called `Storage.cs` in your local Windows folder `c:\source\repos\jaeger\code` containing the gRPC Storage Plugin bindings. -``` -$ docker run --rm -u 1000 -v/c/source/repos/jaeger:/jaeger -w/jaeger \ - jaegertracing/protobuf:0.2.0 "-I/jaeger -Iidl/proto/api_v2 -I/usr/include/github.com/gogo/protobuf -Iplugin/storage/grpc/proto --csharp_out=/jaeger/code plugin/storage/grpc/proto/storage.proto" -``` - -There are instructions on implementing a `go-plugin` server for non-Go languages in the -[go-plugin non-go guide](https://github.com/hashicorp/go-plugin/blob/master/docs/guide-plugin-write-non-go.md). -Take note of the required [health check service](https://github.com/hashicorp/go-plugin/blob/master/docs/guide-plugin-write-non-go.md#3-add-the-grpc-health-checking-service). - -A Go plugin is a standalone application which calls `grpc.Serve(&pluginServices)` in its `main` function, where the `grpc` package -is `github.com/jaegertracing/jaeger/plugin/storage/grpc`. - -```go - package main - - import ( - "flag" - "github.com/jaegertracing/jaeger/plugin/storage/grpc" - ) - - func main() { - var configPath string - flag.StringVar(&configPath, "config", "", "A path to the plugin's configuration file") - flag.Parse() - - plugin := myStoragePlugin{} - - grpc.Serve(&shared.PluginServices{ - Store: plugin, - ArchiveStore: plugin, - }) - } -``` - -Note that `grpc.Serve` is called as the final part of the main. This should be called after you have carried out any necessary -setup for your plugin, as once running Jaeger may start calling to read/write spans straight away. You could defer -setup until the first read/write but that could make the first operation slow and also lead to racing behaviours. - -A plugin must implement the StoragePlugin interface of: - -```go -type StoragePlugin interface { - SpanReader() spanstore.Reader - SpanWriter() spanstore.Writer - DependencyReader() dependencystore.Reader -} -``` - -As your plugin will be dependent on the protobuf implementation within Jaeger you will likely need to `vendor` your -dependencies, you can also use `go.mod` to achieve the same goal of pinning your plugin to a Jaeger point in time. - -A simple plugin which uses the memstore storage implementation can be found in the `examples` directory of the top level -of the Jaeger project. - -To support archive storage a plugin must implement the ArchiveStoragePlugin interface of: - -```go -type ArchiveStoragePlugin interface { - ArchiveSpanReader() spanstore.Reader - ArchiveSpanWriter() spanstore.Writer -} -``` - -If you don't plan to implement archive storage simply do not fill `ArchiveStore` property of `shared.PluginServices`: - -```go -grpc.Serve(&shared.PluginServices{ - Store: plugin, -}) -``` - -The plugin framework supports writing spans via gRPC stream, instead of unary messages. Streaming writes can improve throughput and decrease CPU load (see benchmarks in Issue #3636). The plugin needs to implement `StreamingSpanWriter` interface and indicate support via the `streamingSpanWriter` flag in the `Capabilities` response. - -Note that using the streaming spanWriter may make the collector's `save_by_svr` metric inaccurate, in which case users will need to pay attention to the metrics provided by the plugin. - -Certifying compliance ---------------- -A plugin implementation shall verify it's correctness with Jaeger storage protocol by running the storage integration tests from [integration package](https://github.com/jaegertracing/jaeger/blob/main/plugin/storage/integration/integration.go#L397). - -```golang -import ( - jaeger_integration_tests "github.com/jaegertracing/jaeger/plugin/storage/integration" -) - -func TestJaegerStorageIntegration(t *testing.T) { - ... - si := jaeger_integration_tests.StorageIntegration{ - SpanReader: createSpanReader(), - SpanWriter: createSpanWriter(), - CleanUp: func() error { ... }, - Refresh: func() error { ... }, - SkipList: []string { // Skip any unsupported tests - }, - } - // Runs all storage integration tests. - si.IntegrationTestAll(t) -} -``` -For more details, refer to one of the following implementations. - -1. [grpc-plugin](https://github.com/jaegertracing/jaeger/blob/cbceceb1e0cc308cdf0226b1fa19b9c531a3a2d3/plugin/storage/integration/grpc_test.go#L189-L203) -2. [jaeger-clickhouse](https://github.com/jaegertracing/jaeger-clickhouse/blob/798c568c1e1a345536f35692fca71196a796811e/integration/grpc_test.go#L88-L107) -3. [Timescale DB via Promscale](https://github.com/timescale/promscale/blob/ccde8accf5205450891e805e23566d9a11dbf8d3/pkg/tests/end_to_end_tests/jaeger_store_integration_test.go#L79-L97) - -Running with a plugin ---------------------- -A plugin can be run using the `all-in-one` application within the top level `cmd` package of the Jaeger project. To do this -an environment variable must be set to tell the `all-in-one` application to use the gRPC plugin storage: -`export SPAN_STORAGE_TYPE="grpc-plugin"` - -Once this has been set then there are two command line flags that can be used to configure the plugin. The first is -`--grpc-storage-plugin.binary` which is required and is the path to the plugin **binary**. The second is -`--grpc-storage-plugin.configuration-file` which is optional and is the path to the configuration file which will be -provided to your plugin as a command line flag. This command line flag is `config`, as can be seen in the code sample -above. An example invocation would be: - -``` -./all-in-one --grpc-storage-plugin.binary=/path/to/my/plugin --grpc-storage-plugin.configuration-file=/path/to/my/config -``` - -As well as passing configuration values via the command line through the configuration file it is also possible to use -environment variables. When you invoke `all-in-one` any environment variables that have been set will also be accessible -from within your plugin, this is useful if using Docker. - -Logging -------- -In order for Jaeger to include the log output from your plugin you need to use `hclog` (`"github.com/hashicorp/go-hclog"`). -The plugin framework will only include any log output created at the `WARN` or above levels. If you log output in this -way before calling `grpc.Serve` then it will still be included in the Jaeger output. - -An example logger instantiation could look like: - - ``` -logger := hclog.New(&hclog.LoggerOptions{ - Level: hclog.Warn, - Name: "my-jaeger-plugin", - JSONFormat: true, -}) -``` - -There are more logger options that can be used with `hclog` listed on [godoc](https://godoc.org/github.com/hashicorp/go-hclog#LoggerOptions). - -Note: Setting the `Output` option to `os.Stdout` can confuse the `go-plugin` framework and lead it to consider the plugin -errored. - -Tracing -------- - -When `grpc-plugin` is used, it will be running as a separated process, thus context propagation is necessary for inter-process scenarios. - -In order to get complete traces containing both `jaeger-component`(s) and the `grpc-plugin`, developers should enable tracing at server-side. -Thus, we can leverage gRPC interceptors, - -```golang -grpc.ServeWithGRPCServer(&shared.PluginServices{ - Store: memStorePlugin, - ArchiveStore: memStorePlugin, -}, func(options []googleGRPC.ServerOption) *googleGRPC.Server { - return plugin.DefaultGRPCServer([]googleGRPC.ServerOption{ - grpc.UnaryInterceptor(otelgrpc.UnaryServerInterceptor(otelgrpc.WithTracerProvider(tracerProvider))), - grpc.StreamInterceptor(otelgrpc.StreamServerInterceptor(otelgrpc.WithTracerProvider(tracerProvider))), - }) -}) -``` - -Refer to `example/memstore-plugin` for more details. - -Bearer token propagation from the UI ------------------------------------- -When using `--query.bearer-token-propagation=true`, the bearer token will be properly passed on to the gRPC plugin server. To get access to the bearer token in your plugin, use a method similar to: - -```golang -import ( - // ... other imports - "fmt" - "google.golang.org/grpc/metadata" - - "github.com/jaegertracing/jaeger/plugin/storage/grpc" -) - -// ... spanReader type declared here - -func (r *spanReader) extractBearerToken(ctx context.Context) (string, bool) { - if md, ok := metadata.FromIncomingContext(ctx); ok { - values := md.Get(grpc.BearerTokenKey) - if len(values) > 0 { - return values[0], true - } - } - return "", false -} - -// ... spanReader interface implementation - -func (r *spanReader) GetServices(ctx context.Context) ([]string, error) { - str, ok := r.extractBearerToken(ctx) - fmt.Println(fmt.Sprintf("spanReader.GetServices: bearer-token: '%s', wasGiven: '%t'" str, ok)) - // ... -} -``` diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/config/config.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/config/config.go deleted file mode 100644 index c22e9f9d6f1..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/config/config.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "context" - "fmt" - "os/exec" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" - - "github.com/jaegertracing/jaeger/pkg/config/tlscfg" - "github.com/jaegertracing/jaeger/pkg/tenancy" - "github.com/jaegertracing/jaeger/plugin/storage/grpc/shared" -) - -var pluginHealthCheckInterval = time.Second * 60 - -// Configuration describes the options to customize the storage behavior. -type Configuration struct { - PluginBinary string `yaml:"binary" mapstructure:"binary"` - PluginConfigurationFile string `yaml:"configuration-file" mapstructure:"configuration_file"` - PluginLogLevel string `yaml:"log-level" mapstructure:"log_level"` - RemoteServerAddr string `yaml:"server" mapstructure:"server"` - RemoteTLS tlscfg.Options - RemoteConnectTimeout time.Duration `yaml:"connection-timeout" mapstructure:"connection-timeout"` - TenancyOpts tenancy.Options - - pluginHealthCheck *time.Ticker - pluginHealthCheckDone chan bool - pluginRPCClient plugin.ClientProtocol - remoteConn *grpc.ClientConn -} - -// ClientPluginServices defines services plugin can expose and its capabilities -type ClientPluginServices struct { - shared.PluginServices - Capabilities shared.PluginCapabilities - killPluginClient func() -} - -func (c *ClientPluginServices) Close() error { - if c.killPluginClient != nil { - c.killPluginClient() - } - return nil -} - -// PluginBuilder is used to create storage plugins. Implemented by Configuration. -type PluginBuilder interface { - Build(logger *zap.Logger, tracerProvider trace.TracerProvider) (*ClientPluginServices, error) - Close() error -} - -// Build instantiates a PluginServices -func (c *Configuration) Build(logger *zap.Logger, tracerProvider trace.TracerProvider) (*ClientPluginServices, error) { - if c.PluginBinary != "" { - return c.buildPlugin(logger, tracerProvider) - } else { - return c.buildRemote(logger, tracerProvider) - } -} - -func (c *Configuration) Close() error { - if c.pluginHealthCheck != nil { - c.pluginHealthCheck.Stop() - c.pluginHealthCheckDone <- true - } - if c.remoteConn != nil { - c.remoteConn.Close() - } - - return c.RemoteTLS.Close() -} - -func (c *Configuration) buildRemote(logger *zap.Logger, tracerProvider trace.TracerProvider) (*ClientPluginServices, error) { - opts := []grpc.DialOption{ - grpc.WithStatsHandler(otelgrpc.NewClientHandler(otelgrpc.WithTracerProvider(tracerProvider))), - grpc.WithBlock(), - } - if c.RemoteTLS.Enabled { - tlsCfg, err := c.RemoteTLS.Config(logger) - if err != nil { - return nil, err - } - creds := credentials.NewTLS(tlsCfg) - opts = append(opts, grpc.WithTransportCredentials(creds)) - } else { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - - ctx, cancel := context.WithTimeout(context.Background(), c.RemoteConnectTimeout) - defer cancel() - - tenancyMgr := tenancy.NewManager(&c.TenancyOpts) - if tenancyMgr.Enabled { - opts = append(opts, grpc.WithUnaryInterceptor(tenancy.NewClientUnaryInterceptor(tenancyMgr))) - opts = append(opts, grpc.WithStreamInterceptor(tenancy.NewClientStreamInterceptor(tenancyMgr))) - } - var err error - // TODO: Need to replace grpc.DialContext with grpc.NewClient and pass test - c.remoteConn, err = grpc.DialContext(ctx, c.RemoteServerAddr, opts...) - if err != nil { - return nil, fmt.Errorf("error connecting to remote storage: %w", err) - } - - grpcClient := shared.NewGRPCClient(c.remoteConn) - return &ClientPluginServices{ - PluginServices: shared.PluginServices{ - Store: grpcClient, - ArchiveStore: grpcClient, - StreamingSpanWriter: grpcClient, - }, - Capabilities: grpcClient, - }, nil -} - -func (c *Configuration) buildPlugin(logger *zap.Logger, tracerProvider trace.TracerProvider) (*ClientPluginServices, error) { - opts := []grpc.DialOption{ - grpc.WithStatsHandler(otelgrpc.NewClientHandler(otelgrpc.WithTracerProvider(tracerProvider))), - } - - tenancyMgr := tenancy.NewManager(&c.TenancyOpts) - if tenancyMgr.Enabled { - opts = append(opts, grpc.WithUnaryInterceptor(tenancy.NewClientUnaryInterceptor(tenancyMgr))) - opts = append(opts, grpc.WithStreamInterceptor(tenancy.NewClientStreamInterceptor(tenancyMgr))) - } - - // #nosec G204 - cmd := exec.Command(c.PluginBinary, "--config", c.PluginConfigurationFile) - client := plugin.NewClient(&plugin.ClientConfig{ - HandshakeConfig: shared.Handshake, - VersionedPlugins: map[int]plugin.PluginSet{ - 1: shared.PluginMap, - }, - Cmd: cmd, - AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, - Logger: hclog.New(&hclog.LoggerOptions{ - Level: hclog.LevelFromString(c.PluginLogLevel), - }), - GRPCDialOptions: opts, - }) - - rpcClient, err := client.Client() - if err != nil { - return nil, fmt.Errorf("error attempting to connect to plugin rpc client: %w", err) - } - - raw, err := rpcClient.Dispense(shared.StoragePluginIdentifier) - if err != nil { - return nil, fmt.Errorf("unable to retrieve storage plugin instance: %w", err) - } - - // in practice, the type of `raw` is *shared.grpcClient, and type casts below cannot fail - storagePlugin, ok := raw.(shared.StoragePlugin) - if !ok { - return nil, fmt.Errorf("unable to cast %T to shared.StoragePlugin for plugin \"%s\"", - raw, shared.StoragePluginIdentifier) - } - archiveStoragePlugin, ok := raw.(shared.ArchiveStoragePlugin) - if !ok { - return nil, fmt.Errorf("unable to cast %T to shared.ArchiveStoragePlugin for plugin \"%s\"", - raw, shared.StoragePluginIdentifier) - } - streamingSpanWriterPlugin, ok := raw.(shared.StreamingSpanWriterPlugin) - if !ok { - return nil, fmt.Errorf("unable to cast %T to shared.StreamingSpanWriterPlugin for plugin \"%s\"", - raw, shared.StoragePluginIdentifier) - } - capabilities, ok := raw.(shared.PluginCapabilities) - if !ok { - return nil, fmt.Errorf("unable to cast %T to shared.PluginCapabilities for plugin \"%s\"", - raw, shared.StoragePluginIdentifier) - } - - if err := c.startPluginHealthCheck(rpcClient, logger); err != nil { - return nil, fmt.Errorf("initial plugin health check failed: %w", err) - } - - return &ClientPluginServices{ - PluginServices: shared.PluginServices{ - Store: storagePlugin, - ArchiveStore: archiveStoragePlugin, - StreamingSpanWriter: streamingSpanWriterPlugin, - }, - Capabilities: capabilities, - killPluginClient: client.Kill, - }, nil -} - -func (c *Configuration) startPluginHealthCheck(rpcClient plugin.ClientProtocol, logger *zap.Logger) error { - c.pluginRPCClient = rpcClient - c.pluginHealthCheckDone = make(chan bool) - c.pluginHealthCheck = time.NewTicker(pluginHealthCheckInterval) - - go func() { - for { - select { - case <-c.pluginHealthCheckDone: - return - case <-c.pluginHealthCheck.C: - if err := c.pluginRPCClient.Ping(); err != nil { - logger.Fatal("plugin health check failed", zap.Error(err)) - } - } - } - }() - - return c.pluginRPCClient.Ping() -} diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/factory.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/factory.go deleted file mode 100644 index 0009ebcb6c8..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/factory.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpc - -import ( - "errors" - "flag" - "fmt" - "io" - - "github.com/spf13/viper" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - - "github.com/jaegertracing/jaeger/pkg/metrics" - "github.com/jaegertracing/jaeger/plugin" - "github.com/jaegertracing/jaeger/plugin/storage/grpc/config" - "github.com/jaegertracing/jaeger/plugin/storage/grpc/shared" - "github.com/jaegertracing/jaeger/storage" - "github.com/jaegertracing/jaeger/storage/dependencystore" - "github.com/jaegertracing/jaeger/storage/spanstore" -) - -var ( // interface comformance checks - _ storage.Factory = (*Factory)(nil) - _ storage.ArchiveFactory = (*Factory)(nil) - _ io.Closer = (*Factory)(nil) - _ plugin.Configurable = (*Factory)(nil) -) - -// Factory implements storage.Factory and creates storage components backed by a storage plugin. -type Factory struct { - options Options - metricsFactory metrics.Factory - logger *zap.Logger - tracerProvider trace.TracerProvider - - builder config.PluginBuilder - - store shared.StoragePlugin - archiveStore shared.ArchiveStoragePlugin - streamingSpanWriter shared.StreamingSpanWriterPlugin - capabilities shared.PluginCapabilities - - servicesCloser io.Closer -} - -// NewFactory creates a new Factory. -func NewFactory() *Factory { - return &Factory{} -} - -// NewFactoryWithConfig is used from jaeger(v2). -func NewFactoryWithConfig( - cfg config.Configuration, - metricsFactory metrics.Factory, - logger *zap.Logger, -) (*Factory, error) { - f := NewFactory() - f.InitFromOptions(Options{Configuration: cfg}) - err := f.Initialize(metricsFactory, logger) - if err != nil { - return nil, err - } - return f, nil -} - -// AddFlags implements plugin.Configurable -func (f *Factory) AddFlags(flagSet *flag.FlagSet) { - f.options.AddFlags(flagSet) -} - -// InitFromViper implements plugin.Configurable -func (f *Factory) InitFromViper(v *viper.Viper, logger *zap.Logger) { - if err := f.options.InitFromViper(v); err != nil { - logger.Fatal("unable to initialize gRPC storage factory", zap.Error(err)) - } - f.builder = &f.options.Configuration -} - -// InitFromOptions initializes factory from options -func (f *Factory) InitFromOptions(opts Options) { - f.options = opts - f.builder = &f.options.Configuration -} - -// Initialize implements storage.Factory -func (f *Factory) Initialize(metricsFactory metrics.Factory, logger *zap.Logger) error { - f.metricsFactory, f.logger = metricsFactory, logger - f.tracerProvider = otel.GetTracerProvider() - - services, err := f.builder.Build(logger, f.tracerProvider) - if err != nil { - return fmt.Errorf("grpc-plugin builder failed to create a store: %w", err) - } - - f.store = services.Store - f.archiveStore = services.ArchiveStore - f.capabilities = services.Capabilities - f.streamingSpanWriter = services.StreamingSpanWriter - f.servicesCloser = services - logger.Info("External plugin storage configuration", zap.Any("configuration", f.options.Configuration)) - return nil -} - -// CreateSpanReader implements storage.Factory -func (f *Factory) CreateSpanReader() (spanstore.Reader, error) { - return f.store.SpanReader(), nil -} - -// CreateSpanWriter implements storage.Factory -func (f *Factory) CreateSpanWriter() (spanstore.Writer, error) { - if f.capabilities != nil && f.streamingSpanWriter != nil { - if capabilities, err := f.capabilities.Capabilities(); err == nil && capabilities.StreamingSpanWriter { - return f.streamingSpanWriter.StreamingSpanWriter(), nil - } - } - return f.store.SpanWriter(), nil -} - -// CreateDependencyReader implements storage.Factory -func (f *Factory) CreateDependencyReader() (dependencystore.Reader, error) { - return f.store.DependencyReader(), nil -} - -// CreateArchiveSpanReader implements storage.ArchiveFactory -func (f *Factory) CreateArchiveSpanReader() (spanstore.Reader, error) { - if f.capabilities == nil { - return nil, storage.ErrArchiveStorageNotSupported - } - capabilities, err := f.capabilities.Capabilities() - if err != nil { - return nil, err - } - if capabilities == nil || !capabilities.ArchiveSpanReader { - return nil, storage.ErrArchiveStorageNotSupported - } - return f.archiveStore.ArchiveSpanReader(), nil -} - -// CreateArchiveSpanWriter implements storage.ArchiveFactory -func (f *Factory) CreateArchiveSpanWriter() (spanstore.Writer, error) { - if f.capabilities == nil { - return nil, storage.ErrArchiveStorageNotSupported - } - capabilities, err := f.capabilities.Capabilities() - if err != nil { - return nil, err - } - if capabilities == nil || !capabilities.ArchiveSpanWriter { - return nil, storage.ErrArchiveStorageNotSupported - } - return f.archiveStore.ArchiveSpanWriter(), nil -} - -// Close closes the resources held by the factory -func (f *Factory) Close() error { - errs := []error{} - if f.servicesCloser != nil { - errs = append(errs, f.servicesCloser.Close()) - } - errs = append(errs, f.builder.Close()) - return errors.Join(errs...) -} diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/grpc.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/grpc.go deleted file mode 100644 index 09ffffcd399..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/grpc.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpc - -import ( - "github.com/hashicorp/go-plugin" - "google.golang.org/grpc" - - "github.com/jaegertracing/jaeger/plugin/storage/grpc/shared" -) - -// Serve creates a plugin configuration using the implementation of StoragePlugin and then serves it. -func Serve(services *shared.PluginServices) { - ServeWithGRPCServer(services, plugin.DefaultGRPCServer) -} - -// ServeWithGRPCServer creates a plugin configuration using the implementation of StoragePlugin and -// function to create grpcServer, and then serves it. -func ServeWithGRPCServer(services *shared.PluginServices, grpcServer func([]grpc.ServerOption) *grpc.Server, -) { - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: shared.Handshake, - VersionedPlugins: map[int]plugin.PluginSet{ - 1: map[string]plugin.Plugin{ - shared.StoragePluginIdentifier: &shared.StorageGRPCPlugin{ - Impl: services.Store, - ArchiveImpl: services.ArchiveStore, - StreamImpl: services.StreamingSpanWriter, - }, - }, - }, - GRPCServer: grpcServer, - }) -} diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/options.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/options.go deleted file mode 100644 index b3b8d9210b2..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/options.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpc - -import ( - "flag" - "fmt" - "log" - "time" - - "github.com/spf13/viper" - - "github.com/jaegertracing/jaeger/pkg/config/tlscfg" - "github.com/jaegertracing/jaeger/pkg/tenancy" - "github.com/jaegertracing/jaeger/plugin/storage/grpc/config" -) - -const ( - pluginBinary = "grpc-storage-plugin.binary" - pluginConfigurationFile = "grpc-storage-plugin.configuration-file" - pluginLogLevel = "grpc-storage-plugin.log-level" - remotePrefix = "grpc-storage" - remoteServer = remotePrefix + ".server" - remoteConnectionTimeout = remotePrefix + ".connection-timeout" - defaultPluginLogLevel = "warn" - defaultConnectionTimeout = time.Duration(5 * time.Second) - - deprecatedSidecar = "(deprecated, will be removed after 2024-03-01) " -) - -// Options contains GRPC plugins configs and provides the ability -// to bind them to command line flags -type Options struct { - Configuration config.Configuration `mapstructure:",squash"` -} - -func tlsFlagsConfig() tlscfg.ClientFlagsConfig { - return tlscfg.ClientFlagsConfig{ - Prefix: remotePrefix, - } -} - -// AddFlags adds flags for Options -func (opt *Options) AddFlags(flagSet *flag.FlagSet) { - tlsFlagsConfig().AddFlags(flagSet) - - flagSet.String(pluginBinary, "", deprecatedSidecar+"The location of the plugin binary") - flagSet.String(pluginConfigurationFile, "", deprecatedSidecar+"A path pointing to the plugin's configuration file, made available to the plugin with the --config arg") - flagSet.String(pluginLogLevel, defaultPluginLogLevel, "Set the log level of the plugin's logger") - flagSet.String(remoteServer, "", "The remote storage gRPC server address as host:port") - flagSet.Duration(remoteConnectionTimeout, defaultConnectionTimeout, "The remote storage gRPC server connection timeout") -} - -// InitFromViper initializes Options with properties from viper -func (opt *Options) InitFromViper(v *viper.Viper) error { - opt.Configuration.PluginBinary = v.GetString(pluginBinary) - opt.Configuration.PluginConfigurationFile = v.GetString(pluginConfigurationFile) - opt.Configuration.PluginLogLevel = v.GetString(pluginLogLevel) - opt.Configuration.RemoteServerAddr = v.GetString(remoteServer) - var err error - opt.Configuration.RemoteTLS, err = tlsFlagsConfig().InitFromViper(v) - if err != nil { - return fmt.Errorf("failed to parse gRPC storage TLS options: %w", err) - } - opt.Configuration.RemoteConnectTimeout = v.GetDuration(remoteConnectionTimeout) - opt.Configuration.TenancyOpts = tenancy.InitFromViper(v) - if opt.Configuration.PluginBinary != "" { - log.Printf(deprecatedSidecar + "using sidecar model of grpc-plugin storage, please upgrade to 'remote' gRPC storage. https://github.com/jaegertracing/jaeger/issues/4647") - } - return nil -} diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/archive.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/archive.go index 41741e7b950..1dacae3fcdf 100644 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/archive.go +++ b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/archive.go @@ -58,22 +58,22 @@ func (r *archiveReader) GetTrace(ctx context.Context, traceID model.TraceID) (*m } // GetServices not used in archiveReader -func (r *archiveReader) GetServices(ctx context.Context) ([]string, error) { +func (*archiveReader) GetServices(context.Context) ([]string, error) { return nil, errors.New("GetServices not implemented") } // GetOperations not used in archiveReader -func (r *archiveReader) GetOperations(ctx context.Context, query spanstore.OperationQueryParameters) ([]spanstore.Operation, error) { +func (*archiveReader) GetOperations(context.Context, spanstore.OperationQueryParameters) ([]spanstore.Operation, error) { return nil, errors.New("GetOperations not implemented") } // FindTraces not used in archiveReader -func (r *archiveReader) FindTraces(ctx context.Context, query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { +func (*archiveReader) FindTraces(context.Context, *spanstore.TraceQueryParameters) ([]*model.Trace, error) { return nil, errors.New("FindTraces not implemented") } // FindTraceIDs not used in archiveReader -func (r *archiveReader) FindTraceIDs(ctx context.Context, query *spanstore.TraceQueryParameters) ([]model.TraceID, error) { +func (*archiveReader) FindTraceIDs(context.Context, *spanstore.TraceQueryParameters) ([]model.TraceID, error) { return nil, errors.New("FindTraceIDs not implemented") } diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_client.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_client.go index 9b24b6c18eb..55f39971b14 100644 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_client.go +++ b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_client.go @@ -28,6 +28,7 @@ import ( "github.com/jaegertracing/jaeger/model" "github.com/jaegertracing/jaeger/pkg/bearertoken" + _ "github.com/jaegertracing/jaeger/pkg/gogocodec" // force gogo codec registration "github.com/jaegertracing/jaeger/proto-gen/storage_v1" "github.com/jaegertracing/jaeger/storage/dependencystore" "github.com/jaegertracing/jaeger/storage/spanstore" @@ -37,16 +38,16 @@ import ( const BearerTokenKey = "bearer.token" var ( - _ StoragePlugin = (*grpcClient)(nil) - _ ArchiveStoragePlugin = (*grpcClient)(nil) - _ PluginCapabilities = (*grpcClient)(nil) + _ StoragePlugin = (*GRPCClient)(nil) + _ ArchiveStoragePlugin = (*GRPCClient)(nil) + _ PluginCapabilities = (*GRPCClient)(nil) // upgradeContext composites several steps of upgrading context upgradeContext = composeContextUpgradeFuncs(upgradeContextWithBearerToken) ) -// grpcClient implements shared.StoragePlugin and reads/writes spans and dependencies -type grpcClient struct { +// GRPCClient implements shared.StoragePlugin and reads/writes spans and dependencies +type GRPCClient struct { readerClient storage_v1.SpanReaderPluginClient writerClient storage_v1.SpanWriterPluginClient archiveReaderClient storage_v1.ArchiveSpanReaderPluginClient @@ -56,8 +57,8 @@ type grpcClient struct { streamWriterClient storage_v1.StreamingSpanWriterPluginClient } -func NewGRPCClient(c *grpc.ClientConn) *grpcClient { - return &grpcClient{ +func NewGRPCClient(c *grpc.ClientConn) *GRPCClient { + return &GRPCClient{ readerClient: storage_v1.NewSpanReaderPluginClient(c), writerClient: storage_v1.NewSpanWriterPluginClient(c), archiveReaderClient: storage_v1.NewArchiveSpanReaderPluginClient(c), @@ -99,34 +100,34 @@ func upgradeContextWithBearerToken(ctx context.Context) context.Context { } // DependencyReader implements shared.StoragePlugin. -func (c *grpcClient) DependencyReader() dependencystore.Reader { +func (c *GRPCClient) DependencyReader() dependencystore.Reader { return c } // SpanReader implements shared.StoragePlugin. -func (c *grpcClient) SpanReader() spanstore.Reader { +func (c *GRPCClient) SpanReader() spanstore.Reader { return c } // SpanWriter implements shared.StoragePlugin. -func (c *grpcClient) SpanWriter() spanstore.Writer { +func (c *GRPCClient) SpanWriter() spanstore.Writer { return c } -func (c *grpcClient) StreamingSpanWriter() spanstore.Writer { +func (c *GRPCClient) StreamingSpanWriter() spanstore.Writer { return newStreamingSpanWriter(c.streamWriterClient) } -func (c *grpcClient) ArchiveSpanReader() spanstore.Reader { +func (c *GRPCClient) ArchiveSpanReader() spanstore.Reader { return &archiveReader{client: c.archiveReaderClient} } -func (c *grpcClient) ArchiveSpanWriter() spanstore.Writer { +func (c *GRPCClient) ArchiveSpanWriter() spanstore.Writer { return &archiveWriter{client: c.archiveWriterClient} } // GetTrace takes a traceID and returns a Trace associated with that traceID -func (c *grpcClient) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { +func (c *GRPCClient) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { stream, err := c.readerClient.GetTrace(upgradeContext(ctx), &storage_v1.GetTraceRequest{ TraceID: traceID, }) @@ -141,7 +142,7 @@ func (c *grpcClient) GetTrace(ctx context.Context, traceID model.TraceID) (*mode } // GetServices returns a list of all known services -func (c *grpcClient) GetServices(ctx context.Context) ([]string, error) { +func (c *GRPCClient) GetServices(ctx context.Context) ([]string, error) { resp, err := c.readerClient.GetServices(upgradeContext(ctx), &storage_v1.GetServicesRequest{}) if err != nil { return nil, fmt.Errorf("plugin error: %w", err) @@ -151,7 +152,7 @@ func (c *grpcClient) GetServices(ctx context.Context) ([]string, error) { } // GetOperations returns the operations of a given service -func (c *grpcClient) GetOperations( +func (c *GRPCClient) GetOperations( ctx context.Context, query spanstore.OperationQueryParameters, ) ([]spanstore.Operation, error) { @@ -182,7 +183,7 @@ func (c *grpcClient) GetOperations( } // FindTraces retrieves traces that match the traceQuery -func (c *grpcClient) FindTraces(ctx context.Context, query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { +func (c *GRPCClient) FindTraces(ctx context.Context, query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { stream, err := c.readerClient.FindTraces(upgradeContext(ctx), &storage_v1.FindTracesRequest{ Query: &storage_v1.TraceQueryParameters{ ServiceName: query.ServiceName, @@ -220,7 +221,7 @@ func (c *grpcClient) FindTraces(ctx context.Context, query *spanstore.TraceQuery } // FindTraceIDs retrieves traceIDs that match the traceQuery -func (c *grpcClient) FindTraceIDs(ctx context.Context, query *spanstore.TraceQueryParameters) ([]model.TraceID, error) { +func (c *GRPCClient) FindTraceIDs(ctx context.Context, query *spanstore.TraceQueryParameters) ([]model.TraceID, error) { resp, err := c.readerClient.FindTraceIDs(upgradeContext(ctx), &storage_v1.FindTraceIDsRequest{ Query: &storage_v1.TraceQueryParameters{ ServiceName: query.ServiceName, @@ -241,7 +242,7 @@ func (c *grpcClient) FindTraceIDs(ctx context.Context, query *spanstore.TraceQue } // WriteSpan saves the span -func (c *grpcClient) WriteSpan(ctx context.Context, span *model.Span) error { +func (c *GRPCClient) WriteSpan(ctx context.Context, span *model.Span) error { _, err := c.writerClient.WriteSpan(ctx, &storage_v1.WriteSpanRequest{ Span: span, }) @@ -252,7 +253,7 @@ func (c *grpcClient) WriteSpan(ctx context.Context, span *model.Span) error { return nil } -func (c *grpcClient) Close() error { +func (c *GRPCClient) Close() error { _, err := c.writerClient.Close(context.Background(), &storage_v1.CloseWriterRequest{}) if err != nil && status.Code(err) != codes.Unimplemented { return fmt.Errorf("plugin error: %w", err) @@ -262,7 +263,7 @@ func (c *grpcClient) Close() error { } // GetDependencies returns all interservice dependencies -func (c *grpcClient) GetDependencies(ctx context.Context, endTs time.Time, lookback time.Duration) ([]model.DependencyLink, error) { +func (c *GRPCClient) GetDependencies(ctx context.Context, endTs time.Time, lookback time.Duration) ([]model.DependencyLink, error) { resp, err := c.depsReaderClient.GetDependencies(ctx, &storage_v1.GetDependenciesRequest{ EndTime: endTs, StartTime: endTs.Add(-lookback), @@ -274,7 +275,7 @@ func (c *grpcClient) GetDependencies(ctx context.Context, endTs time.Time, lookb return resp.Dependencies, nil } -func (c *grpcClient) Capabilities() (*Capabilities, error) { +func (c *GRPCClient) Capabilities() (*Capabilities, error) { capabilities, err := c.capabilitiesClient.Capabilities(context.Background(), &storage_v1.CapabilitiesRequest{}) if status.Code(err) == codes.Unimplemented { return &Capabilities{}, nil diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_handler.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_handler.go index 98f27a53540..d8843b1189f 100644 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_handler.go +++ b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_handler.go @@ -22,9 +22,12 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/status" "github.com/jaegertracing/jaeger/model" + _ "github.com/jaegertracing/jaeger/pkg/gogocodec" // force gogo codec registration "github.com/jaegertracing/jaeger/proto-gen/storage_v1" "github.com/jaegertracing/jaeger/storage/dependencystore" "github.com/jaegertracing/jaeger/storage/spanstore" @@ -83,7 +86,7 @@ func NewGRPCHandlerWithPlugins( } // Register registers the server as gRPC methods handler. -func (s *GRPCHandler) Register(ss *grpc.Server) error { +func (s *GRPCHandler) Register(ss *grpc.Server, hs *health.Server) error { storage_v1.RegisterSpanReaderPluginServer(ss, s) storage_v1.RegisterSpanWriterPluginServer(ss, s) storage_v1.RegisterArchiveSpanReaderPluginServer(ss, s) @@ -91,6 +94,16 @@ func (s *GRPCHandler) Register(ss *grpc.Server) error { storage_v1.RegisterPluginCapabilitiesServer(ss, s) storage_v1.RegisterDependenciesReaderPluginServer(ss, s) storage_v1.RegisterStreamingSpanWriterPluginServer(ss, s) + + hs.SetServingStatus("jaeger.storage.v1.SpanReaderPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.SpanWriterPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.ArchiveSpanReaderPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.ArchiveSpanWriterPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.PluginCapabilities", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.DependenciesReaderPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.StreamingSpanWriterPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(ss, hs) + return nil } @@ -136,16 +149,15 @@ func (s *GRPCHandler) WriteSpan(ctx context.Context, r *storage_v1.WriteSpanRequ return &storage_v1.WriteSpanResponse{}, nil } -func (s *GRPCHandler) Close(ctx context.Context, r *storage_v1.CloseWriterRequest) (*storage_v1.CloseWriterResponse, error) { +func (s *GRPCHandler) Close(context.Context, *storage_v1.CloseWriterRequest) (*storage_v1.CloseWriterResponse, error) { if closer, ok := s.impl.SpanWriter().(io.Closer); ok { if err := closer.Close(); err != nil { return nil, err } return &storage_v1.CloseWriterResponse{}, nil - } else { - return nil, status.Error(codes.Unimplemented, "span writer does not support graceful shutdown") } + return nil, status.Error(codes.Unimplemented, "span writer does not support graceful shutdown") } // GetTrace takes a traceID and streams a Trace associated with that traceID @@ -167,7 +179,7 @@ func (s *GRPCHandler) GetTrace(r *storage_v1.GetTraceRequest, stream storage_v1. } // GetServices returns a list of all known services -func (s *GRPCHandler) GetServices(ctx context.Context, r *storage_v1.GetServicesRequest) (*storage_v1.GetServicesResponse, error) { +func (s *GRPCHandler) GetServices(ctx context.Context, _ *storage_v1.GetServicesRequest) (*storage_v1.GetServicesResponse, error) { services, err := s.impl.SpanReader().GetServices(ctx) if err != nil { return nil, err @@ -247,7 +259,7 @@ func (s *GRPCHandler) FindTraceIDs(ctx context.Context, r *storage_v1.FindTraceI }, nil } -func (s *GRPCHandler) sendSpans(spans []*model.Span, sendFn func(*storage_v1.SpansResponseChunk) error) error { +func (*GRPCHandler) sendSpans(spans []*model.Span, sendFn func(*storage_v1.SpansResponseChunk) error) error { chunk := make([]model.Span, 0, len(spans)) for i := 0; i < len(spans); i += spanBatchSize { chunk = chunk[:0] @@ -262,7 +274,7 @@ func (s *GRPCHandler) sendSpans(spans []*model.Span, sendFn func(*storage_v1.Spa return nil } -func (s *GRPCHandler) Capabilities(ctx context.Context, request *storage_v1.CapabilitiesRequest) (*storage_v1.CapabilitiesResponse, error) { +func (s *GRPCHandler) Capabilities(context.Context, *storage_v1.CapabilitiesRequest) (*storage_v1.CapabilitiesResponse, error) { return &storage_v1.CapabilitiesResponse{ ArchiveSpanReader: s.impl.ArchiveSpanReader() != nil, ArchiveSpanWriter: s.impl.ArchiveSpanWriter() != nil, diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/interface.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/interface.go index c6345a29b82..ac3e5275d3e 100644 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/interface.go +++ b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/interface.go @@ -15,26 +15,10 @@ package shared import ( - "github.com/hashicorp/go-plugin" - "github.com/jaegertracing/jaeger/storage/dependencystore" "github.com/jaegertracing/jaeger/storage/spanstore" ) -// StoragePluginIdentifier is the identifier that is shared by plugin and host. -const StoragePluginIdentifier = "storage_plugin" - -// Handshake is a common handshake that is shared by plugin and host. -var Handshake = plugin.HandshakeConfig{ - MagicCookieKey: "STORAGE_PLUGIN", - MagicCookieValue: "jaeger", -} - -// PluginMap is the map of plugins we can dispense. -var PluginMap = map[string]plugin.Plugin{ - StoragePluginIdentifier: &StorageGRPCPlugin{}, -} - // StoragePlugin is the interface we're exposing as a plugin. type StoragePlugin interface { SpanReader() spanstore.Reader diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/plugin.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/plugin.go deleted file mode 100644 index b4a3fabf93e..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/plugin.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package shared - -import ( - "context" - - "github.com/hashicorp/go-plugin" - "google.golang.org/grpc" - - _ "github.com/jaegertracing/jaeger/pkg/gogocodec" // force gogo codec registration -) - -// Ensure plugin.GRPCPlugin API match. -var _ plugin.GRPCPlugin = (*StorageGRPCPlugin)(nil) - -// StorageGRPCPlugin is the implementation of plugin.GRPCPlugin. -type StorageGRPCPlugin struct { - plugin.Plugin - - // Concrete implementation, This is only used for plugins that are written in Go. - Impl StoragePlugin - ArchiveImpl ArchiveStoragePlugin - StreamImpl StreamingSpanWriterPlugin -} - -// RegisterHandlers registers the plugin with the server -func (p *StorageGRPCPlugin) RegisterHandlers(s *grpc.Server) error { - handler := NewGRPCHandlerWithPlugins(p.Impl, p.ArchiveImpl, p.StreamImpl) - return handler.Register(s) -} - -// GRPCServer implements plugin.GRPCPlugin. It is used by go-plugin to create a grpc plugin server. -func (p *StorageGRPCPlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { - return p.RegisterHandlers(s) -} - -// GRPCClient implements plugin.GRPCPlugin. It is used by go-plugin to create a grpc plugin client. -func (*StorageGRPCPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, conn *grpc.ClientConn) (interface{}, error) { - return NewGRPCClient(conn), nil -} diff --git a/vendor/github.com/jaegertracing/jaeger/storage/factory.go b/vendor/github.com/jaegertracing/jaeger/storage/factory.go index b56e6fdc07f..462a626b64f 100644 --- a/vendor/github.com/jaegertracing/jaeger/storage/factory.go +++ b/vendor/github.com/jaegertracing/jaeger/storage/factory.go @@ -16,6 +16,7 @@ package storage import ( + "context" "errors" "go.uber.org/zap" @@ -53,7 +54,7 @@ type Factory interface { // Only meant to be used from integration tests. type Purger interface { // Purge removes all data from the storage. - Purge() error + Purge(context.Context) error } // SamplingStoreFactory defines an interface that is capable of returning the necessary backends for diff --git a/vendor/github.com/jaegertracing/jaeger/storage/spanstore/downsampling_writer.go b/vendor/github.com/jaegertracing/jaeger/storage/spanstore/downsampling_writer.go index dd354e4d080..b6463edac81 100644 --- a/vendor/github.com/jaegertracing/jaeger/storage/spanstore/downsampling_writer.go +++ b/vendor/github.com/jaegertracing/jaeger/storage/spanstore/downsampling_writer.go @@ -100,7 +100,7 @@ func NewSampler(ratio float64, hashSalt string) *Sampler { } hashSaltBytes := []byte(hashSalt) pool := &sync.Pool{ - New: func() interface{} { + New: func() any { buffer := make([]byte, len(hashSaltBytes)+traceIDByteSize) copy(buffer, hashSaltBytes) return &hasher{ diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s index 9a7655c0f76..0782b86e3d1 100644 --- a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s +++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s @@ -5,7 +5,6 @@ #include "textflag.h" // func matchLen(a []byte, b []byte) int -// Requires: BMI TEXT ·matchLen(SB), NOSPLIT, $0-56 MOVQ a_base+0(FP), AX MOVQ b_base+24(FP), CX @@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56 JB matchlen_match4_standalone matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone #ifdef GOAMD64_v3 TZCNTQ BX, BX #else BSFQ BX, BX #endif - SARQ $0x03, BX + SHRL $0x03, BX LEAL (SI)(BX*1), SI JMP gen_match_len_end diff --git a/vendor/github.com/klauspost/compress/s2/decode_arm64.s b/vendor/github.com/klauspost/compress/s2/decode_arm64.s index 4b63d5086a9..78e463f342b 100644 --- a/vendor/github.com/klauspost/compress/s2/decode_arm64.s +++ b/vendor/github.com/klauspost/compress/s2/decode_arm64.s @@ -60,7 +60,7 @@ // // The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. // The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. -TEXT ·s2Decode(SB), NOSPLIT, $56-64 +TEXT ·s2Decode(SB), NOSPLIT, $56-56 // Initialize R_SRC, R_DST and R_DBASE-R_SEND. MOVD dst_base+0(FP), R_DBASE MOVD dst_len+8(FP), R_DLEN diff --git a/vendor/github.com/klauspost/compress/s2/index.go b/vendor/github.com/klauspost/compress/s2/index.go index 18a4f7acd6b..4229957b96e 100644 --- a/vendor/github.com/klauspost/compress/s2/index.go +++ b/vendor/github.com/klauspost/compress/s2/index.go @@ -17,6 +17,8 @@ const ( S2IndexHeader = "s2idx\x00" S2IndexTrailer = "\x00xdi2s" maxIndexEntries = 1 << 16 + // If distance is less than this, we do not add the entry. + minIndexDist = 1 << 20 ) // Index represents an S2/Snappy index. @@ -72,6 +74,10 @@ func (i *Index) add(compressedOffset, uncompressedOffset int64) error { if latest.compressedOffset > compressedOffset { return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) } + if latest.uncompressedOffset+minIndexDist > uncompressedOffset { + // Only add entry if distance is large enough. + return nil + } } i.info = append(i.info, struct { compressedOffset int64 @@ -122,7 +128,7 @@ func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err er // reduce to stay below maxIndexEntries func (i *Index) reduce() { - if len(i.info) < maxIndexEntries && i.estBlockUncomp >= 1<<20 { + if len(i.info) < maxIndexEntries && i.estBlockUncomp >= minIndexDist { return } @@ -132,7 +138,7 @@ func (i *Index) reduce() { j := 0 // Each block should be at least 1MB, but don't reduce below 1000 entries. - for i.estBlockUncomp*(int64(removeN)+1) < 1<<20 && len(i.info)/(removeN+1) > 1000 { + for i.estBlockUncomp*(int64(removeN)+1) < minIndexDist && len(i.info)/(removeN+1) > 1000 { removeN++ } for idx := 0; idx < len(src); idx++ { diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go index 72bcb494531..cbd1ed64d69 100644 --- a/vendor/github.com/klauspost/compress/s2/s2.go +++ b/vendor/github.com/klauspost/compress/s2/s2.go @@ -109,7 +109,11 @@ const ( chunkTypeStreamIdentifier = 0xff ) -var crcTable = crc32.MakeTable(crc32.Castagnoli) +var ( + crcTable = crc32.MakeTable(crc32.Castagnoli) + magicChunkSnappyBytes = []byte(magicChunkSnappy) // Can be passed to functions where it escapes. + magicChunkBytes = []byte(magicChunk) // Can be passed to functions where it escapes. +) // crc implements the checksum specified in section 3 of // https://github.com/google/snappy/blob/master/framing_format.txt diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go index 637c931474f..0a46f2b984f 100644 --- a/vendor/github.com/klauspost/compress/s2/writer.go +++ b/vendor/github.com/klauspost/compress/s2/writer.go @@ -239,6 +239,9 @@ func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { } } if n2 == 0 { + if cap(inbuf) >= w.obufLen { + w.buffers.Put(inbuf) + } break } n += int64(n2) @@ -314,9 +317,9 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { hWriter := make(chan result) w.output <- hWriter if w.snappy { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} } else { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} } } @@ -370,9 +373,9 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) { hWriter := make(chan result) w.output <- hWriter if w.snappy { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} } else { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} } } @@ -478,9 +481,9 @@ func (w *Writer) write(p []byte) (nRet int, errRet error) { hWriter := make(chan result) w.output <- hWriter if w.snappy { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} } else { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} } } @@ -560,6 +563,9 @@ func (w *Writer) writeFull(inbuf []byte) (errRet error) { if w.concurrency == 1 { _, err := w.writeSync(inbuf[obufHeaderLen:]) + if cap(inbuf) >= w.obufLen { + w.buffers.Put(inbuf) + } return err } @@ -569,9 +575,9 @@ func (w *Writer) writeFull(inbuf []byte) (errRet error) { hWriter := make(chan result) w.output <- hWriter if w.snappy { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} } else { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} } } @@ -637,9 +643,9 @@ func (w *Writer) writeSync(p []byte) (nRet int, errRet error) { var n int var err error if w.snappy { - n, err = w.writer.Write([]byte(magicChunkSnappy)) + n, err = w.writer.Write(magicChunkSnappyBytes) } else { - n, err = w.writer.Write([]byte(magicChunk)) + n, err = w.writer.Write(magicChunkBytes) } if err != nil { return 0, w.err(err) diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index 8d5567fe64c..b7b83164bc7 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -273,6 +273,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { enc.Encode(&block, b) addValues(&remain, block.literals) litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } seqs += len(block.sequences) block.genCodes() addHist(&ll, block.coders.llEnc.Histogram()) @@ -286,6 +289,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if offset == 0 { continue } + if int(offset) >= len(o.History) { + continue + } if offset > 3 { newOffsets[offset-3]++ } else { @@ -336,6 +342,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if seqs/nUsed < 512 { // Use 512 as minimum. nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } } copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { hist := dst.Histogram() @@ -358,6 +367,28 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { fakeLength += v hist[i] = uint32(v) } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + dst.HistogramFinished(maxSym, maxCount) dst.reUsed = false dst.useRLE = false diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s index 17901e08040..ae7d4d3295a 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -162,12 +162,12 @@ finalize: MOVD h, ret+24(FP) RET -// func writeBlocks(d *Digest, b []byte) int +// func writeBlocks(s *Digest, b []byte) int TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 LDP ·primes+0(SB), (prime1, prime2) // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest + MOVD s+0(FP), digest LDP 0(digest), (v1, v2) LDP 16(digest), (v3, v4) diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s index 9a7655c0f76..0782b86e3d1 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -5,7 +5,6 @@ #include "textflag.h" // func matchLen(a []byte, b []byte) int -// Requires: BMI TEXT ·matchLen(SB), NOSPLIT, $0-56 MOVQ a_base+0(FP), AX MOVQ b_base+24(FP), CX @@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56 JB matchlen_match4_standalone matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone #ifdef GOAMD64_v3 TZCNTQ BX, BX #else BSFQ BX, BX #endif - SARQ $0x03, BX + SHRL $0x03, BX LEAL (SI)(BX*1), SI JMP gen_match_len_end diff --git a/vendor/github.com/mostynb/go-grpc-compression/internal/zstd/zstd.go b/vendor/github.com/mostynb/go-grpc-compression/internal/zstd/zstd.go new file mode 100644 index 00000000000..66e5091136a --- /dev/null +++ b/vendor/github.com/mostynb/go-grpc-compression/internal/zstd/zstd.go @@ -0,0 +1,146 @@ +// Copyright 2020 Mostyn Bramley-Moore. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package zstd is a wrapper for using github.com/klauspost/compress/zstd +// with gRPC. +package zstd + +import ( + "bytes" + "errors" + "io" + "runtime" + "sync" + + "github.com/klauspost/compress/zstd" + "google.golang.org/grpc/encoding" +) + +const Name = "zstd" + +var encoderOptions = []zstd.EOption{ + // The default zstd window size is 8MB, which is much larger than the + // typical RPC message and wastes a bunch of memory. + zstd.WithWindowSize(512 * 1024), +} + +var decoderOptions = []zstd.DOption{ + // If the decoder concurrency level is not 1, we would need to call + // Close() to avoid leaking resources when the object is released + // from compressor.decoderPool. + zstd.WithDecoderConcurrency(1), +} + +// We will set a finalizer on these objects, so when the go-grpc code is +// finished with them, they will be added back to compressor.decoderPool. +type decoderWrapper struct { + *zstd.Decoder +} + +type compressor struct { + encoder *zstd.Encoder + decoderPool sync.Pool // To hold *zstd.Decoder's. +} + +func PretendInit(clobbering bool) { + if !clobbering && encoding.GetCompressor(Name) != nil { + return + } + + enc, _ := zstd.NewWriter(nil, encoderOptions...) + c := &compressor{ + encoder: enc, + } + encoding.RegisterCompressor(c) +} + +var ErrNotInUse = errors.New("SetLevel ineffective because another zstd compressor has been registered") + +// SetLevel updates the registered compressor to use a particular compression +// level. NOTE: this function must only be called from an init function, and +// is not threadsafe. +func SetLevel(level zstd.EncoderLevel) error { + c, ok := encoding.GetCompressor(Name).(*compressor) + if !ok { + return ErrNotInUse + } + + enc, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(level)) + if err != nil { + return err + } + + c.encoder = enc + return nil +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + return &zstdWriteCloser{ + enc: c.encoder, + writer: w, + }, nil +} + +type zstdWriteCloser struct { + enc *zstd.Encoder + writer io.Writer // Compressed data will be written here. + buf bytes.Buffer // Buffer uncompressed data here, compress on Close. +} + +func (z *zstdWriteCloser) Write(p []byte) (int, error) { + return z.buf.Write(p) +} + +func (z *zstdWriteCloser) Close() error { + compressed := z.enc.EncodeAll(z.buf.Bytes(), nil) + _, err := io.Copy(z.writer, bytes.NewReader(compressed)) + return err +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + var err error + var found bool + var decoder *zstd.Decoder + + // Note: avoid the use of zstd.Decoder.DecodeAll here, since + // malicious payloads could DoS us with a decompression bomb. + + decoder, found = c.decoderPool.Get().(*zstd.Decoder) + if !found { + decoder, err = zstd.NewReader(r, decoderOptions...) + if err != nil { + return nil, err + } + } else { + err = decoder.Reset(r) + if err != nil { + c.decoderPool.Put(decoder) + return nil, err + } + } + + wrapper := &decoderWrapper{Decoder: decoder} + runtime.SetFinalizer(wrapper, func(dw *decoderWrapper) { + err := dw.Reset(nil) + if err == nil { + c.decoderPool.Put(dw.Decoder) + } + }) + + return wrapper, nil +} + +func (c *compressor) Name() string { + return Name +} diff --git a/vendor/github.com/mostynb/go-grpc-compression/nonclobbering/zstd/zstd.go b/vendor/github.com/mostynb/go-grpc-compression/nonclobbering/zstd/zstd.go new file mode 100644 index 00000000000..18b94d93fb0 --- /dev/null +++ b/vendor/github.com/mostynb/go-grpc-compression/nonclobbering/zstd/zstd.go @@ -0,0 +1,52 @@ +// Copyright 2023 Mostyn Bramley-Moore. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package github.com/mostynb/go-grpc-compression/nonclobbering/zstd is a +// wrapper for using github.com/klauspost/compress/zstd with gRPC. +// +// If you import this package, it will only register itself as the encoder +// for the "zstd" compressor if no other compressors have already been +// registered with that name. +// +// If you do want to override previously registered "zstd" compressors, +// then you should instead import +// github.com/mostynb/go-grpc-compression/zstd +package zstd + +import ( + internalzstd "github.com/mostynb/go-grpc-compression/internal/zstd" + + "github.com/klauspost/compress/zstd" +) + +const Name = internalzstd.Name + +func init() { + clobbering := false + internalzstd.PretendInit(clobbering) +} + +var ErrNotInUse = internalzstd.ErrNotInUse + +// SetLevel updates the registered compressor to use a particular compression +// level. Returns ErrNotInUse if this module isn't registered (because it has +// been overridden by another encoder with the same name), or any error +// returned by zstd.NewWriter(nil, zstd.WithEncoderLevel(level). +// +// NOTE: this function is not threadsafe and must only be called from an init +// function or from the main goroutine before any other goroutines have been +// created. +func SetLevel(level zstd.EncoderLevel) error { + return internalzstd.SetLevel(level) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md index d5ac9644199..02137a86209 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md @@ -68,6 +68,7 @@ The following settings can be optionally configured: - `password`: The Kerberos password used for authenticate with KDC - `config_file`: Path to Kerberos configuration. i.e /etc/krb5.conf - `keytab_file`: Path to keytab file. i.e /etc/security/kafka.keytab + - `disable_fast_negotiation`: Disable PA-FX-FAST negotiation (Pre-Authentication Framework - Fast). Some common Kerberos implementations do not support PA-FX-FAST negotiation. This is set to `false` by default. - `metadata` - `full` (default = true): Whether to maintain a full set of metadata. When disabled, the client does not make the initial request to broker at the diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go index d990a17dab8..cc4fb3d5502 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go @@ -126,7 +126,7 @@ type kafkaExporterFactory struct { func (f *kafkaExporterFactory) createTracesExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Traces, error) { oCfg := *(cfg.(*Config)) // Clone the config @@ -157,7 +157,7 @@ func (f *kafkaExporterFactory) createTracesExporter( func (f *kafkaExporterFactory) createMetricsExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Metrics, error) { oCfg := *(cfg.(*Config)) // Clone the config @@ -188,7 +188,7 @@ func (f *kafkaExporterFactory) createMetricsExporter( func (f *kafkaExporterFactory) createLogsExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Logs, error) { oCfg := *(cfg.(*Config)) // Clone the config diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go index 59fc26e647b..f15b8b046eb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go @@ -204,7 +204,7 @@ func newSaramaProducer(config Config) (sarama.SyncProducer, error) { return producer, nil } -func newMetricsExporter(config Config, set exporter.CreateSettings, marshalers map[string]MetricsMarshaler) (*kafkaMetricsProducer, error) { +func newMetricsExporter(config Config, set exporter.Settings, marshalers map[string]MetricsMarshaler) (*kafkaMetricsProducer, error) { marshaler := marshalers[config.Encoding] if marshaler == nil { return nil, errUnrecognizedEncoding @@ -224,7 +224,7 @@ func newMetricsExporter(config Config, set exporter.CreateSettings, marshalers m } // newTracesExporter creates Kafka exporter. -func newTracesExporter(config Config, set exporter.CreateSettings, marshalers map[string]TracesMarshaler) (*kafkaTracesProducer, error) { +func newTracesExporter(config Config, set exporter.Settings, marshalers map[string]TracesMarshaler) (*kafkaTracesProducer, error) { marshaler := marshalers[config.Encoding] if marshaler == nil { return nil, errUnrecognizedEncoding @@ -242,7 +242,7 @@ func newTracesExporter(config Config, set exporter.CreateSettings, marshalers ma }, nil } -func newLogsExporter(config Config, set exporter.CreateSettings, marshalers map[string]LogsMarshaler) (*kafkaLogsProducer, error) { +func newLogsExporter(config Config, set exporter.Settings, marshalers map[string]LogsMarshaler) (*kafkaLogsProducer, error) { marshaler := marshalers[config.Encoding] if marshaler == nil { return nil, errUnrecognizedEncoding diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go index 238d909f0b6..9d74c396e85 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go @@ -23,7 +23,7 @@ const UseLocalHostAsDefaultHostID = "component.UseLocalHostAsDefaultHost" var UseLocalHostAsDefaultHostfeatureGate = mustRegisterOrLoad( featuregate.GlobalRegistry(), UseLocalHostAsDefaultHostID, - featuregate.StageAlpha, + featuregate.StageBeta, featuregate.WithRegisterDescription("controls whether server-like receivers and extensions such as the OTLP receiver use localhost as the default host for their endpoints"), ) @@ -60,8 +60,8 @@ func EndpointForPort(port int) string { // LogAboutUseLocalHostAsDefault logs about the upcoming change from 0.0.0.0 to localhost on server-like components. func LogAboutUseLocalHostAsDefault(logger *zap.Logger) { if !UseLocalHostAsDefaultHostfeatureGate.IsEnabled() { - logger.Warn( - "The default endpoints for all servers in components will change to use localhost instead of 0.0.0.0 in a future version. Use the feature gate to preview the new default.", + logger.Info( + "The default endpoints for all servers in components have changed to use localhost instead of 0.0.0.0. Use the feature gate to temporarily revert to the previous default.", zap.String("feature gate ID", UseLocalHostAsDefaultHostID), ) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/uri.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/uri.go new file mode 100644 index 00000000000..048e90e3838 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/uri.go @@ -0,0 +1,164 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package parseutils // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils" + +import ( + "net/url" + "strconv" + "strings" + + semconv "go.opentelemetry.io/collector/semconv/v1.25.0" +) + +const ( + // replace once conventions includes these + AttributeURLUserInfo = "url.user_info" + AttributeURLUsername = "url.username" + AttributeURLPassword = "url.password" +) + +// parseURI takes an absolute or relative uri and returns the parsed values. +func ParseURI(value string, semconvCompliant bool) (map[string]any, error) { + m := make(map[string]any) + + if strings.HasPrefix(value, "?") { + // remove the query string '?' prefix before parsing + v, err := url.ParseQuery(value[1:]) + if err != nil { + return nil, err + } + return queryToMap(v, m), nil + } + + var x *url.URL + var err error + var mappingFn func(*url.URL, map[string]any) (map[string]any, error) + + if semconvCompliant { + mappingFn = urlToSemconvMap + x, err = url.Parse(value) + if err != nil { + return nil, err + } + } else { + x, err = url.ParseRequestURI(value) + if err != nil { + return nil, err + } + + mappingFn = urlToMap + } + return mappingFn(x, m) +} + +// urlToMap converts a url.URL to a map, excludes any values that are not set. +func urlToSemconvMap(parsedURI *url.URL, m map[string]any) (map[string]any, error) { + m[semconv.AttributeURLOriginal] = parsedURI.String() + m[semconv.AttributeURLDomain] = parsedURI.Hostname() + m[semconv.AttributeURLScheme] = parsedURI.Scheme + m[semconv.AttributeURLPath] = parsedURI.Path + + if portString := parsedURI.Port(); len(portString) > 0 { + port, err := strconv.Atoi(portString) + if err != nil { + return nil, err + } + m[semconv.AttributeURLPort] = port + } + + if fragment := parsedURI.Fragment; len(fragment) > 0 { + m[semconv.AttributeURLFragment] = fragment + } + + if parsedURI.User != nil { + m[AttributeURLUserInfo] = parsedURI.User.String() + + if username := parsedURI.User.Username(); len(username) > 0 { + m[AttributeURLUsername] = username + } + + if pwd, isSet := parsedURI.User.Password(); isSet { + m[AttributeURLPassword] = pwd + } + } + + if query := parsedURI.RawQuery; len(query) > 0 { + m[semconv.AttributeURLQuery] = query + } + + if periodIdx := strings.LastIndex(parsedURI.Path, "."); periodIdx != -1 { + if periodIdx < len(parsedURI.Path)-1 { + m[semconv.AttributeURLExtension] = parsedURI.Path[periodIdx+1:] + } + } + + return m, nil +} + +// urlToMap converts a url.URL to a map, excludes any values that are not set. +func urlToMap(p *url.URL, m map[string]any) (map[string]any, error) { + scheme := p.Scheme + if scheme != "" { + m["scheme"] = scheme + } + + user := p.User.Username() + if user != "" { + m["user"] = user + } + + host := p.Hostname() + if host != "" { + m["host"] = host + } + + port := p.Port() + if port != "" { + m["port"] = port + } + + path := p.EscapedPath() + if path != "" { + m["path"] = path + } + + return queryToMap(p.Query(), m), nil +} + +// queryToMap converts a query string url.Values to a map. +func queryToMap(query url.Values, m map[string]any) map[string]any { + // no-op if query is empty, do not create the key m["query"] + if len(query) == 0 { + return m + } + + /* 'parameter' will represent url.Values + map[string]any{ + "parameter-a": []any{ + "a", + "b", + }, + "parameter-b": []any{ + "x", + "y", + }, + } + */ + parameters := map[string]any{} + for param, values := range query { + parameters[param] = queryParamValuesToMap(values) + } + m["query"] = parameters + return m +} + +// queryParamValuesToMap takes query string parameter values and +// returns an []interface populated with the values +func queryParamValuesToMap(values []string) []any { + v := make([]any, len(values)) + for i, value := range values { + v[i] = value + } + return v +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go index 104d6152a1b..d6e48b4bc95 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go @@ -52,15 +52,16 @@ type AWSMSKConfig struct { BrokerAddr string `mapstructure:"broker_addr"` } -// KerberosConfig defines kereros configuration. +// KerberosConfig defines kerberos configuration. type KerberosConfig struct { - ServiceName string `mapstructure:"service_name"` - Realm string `mapstructure:"realm"` - UseKeyTab bool `mapstructure:"use_keytab"` - Username string `mapstructure:"username"` - Password string `mapstructure:"password" json:"-"` - ConfigPath string `mapstructure:"config_file"` - KeyTabPath string `mapstructure:"keytab_file"` + ServiceName string `mapstructure:"service_name"` + Realm string `mapstructure:"realm"` + UseKeyTab bool `mapstructure:"use_keytab"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password" json:"-"` + ConfigPath string `mapstructure:"config_file"` + KeyTabPath string `mapstructure:"keytab_file"` + DisablePAFXFAST bool `mapstructure:"disable_fast_negotiation"` } // ConfigureAuthentication configures authentication in sarama.Config. @@ -159,4 +160,5 @@ func configureKerberos(config KerberosConfig, saramaConfig *sarama.Config) { saramaConfig.Net.SASL.GSSAPI.Username = config.Username saramaConfig.Net.SASL.GSSAPI.Realm = config.Realm saramaConfig.Net.SASL.GSSAPI.ServiceName = config.ServiceName + saramaConfig.Net.SASL.GSSAPI.DisablePAFXFAST = config.DisablePAFXFAST } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/README.md index f94995852db..7625b699098 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/README.md @@ -22,13 +22,12 @@ named under the `protocols` object for the jaeger receiver to start. The below protocols are supported, each supports an optional `endpoint` object configuration parameter. -- `grpc` (default `endpoint` = 0.0.0.0:14250) -- `thrift_binary` (default `endpoint` = 0.0.0.0:6832) -- `thrift_compact` (default `endpoint` = 0.0.0.0:6831) -- `thrift_http` (default `endpoint` = 0.0.0.0:14268) +- `grpc` (default `endpoint` = localhost:14250) +- `thrift_binary` (default `endpoint` = localhost:6832) +- `thrift_compact` (default `endpoint` = localhost:6831) +- `thrift_http` (default `endpoint` = localhost:14268) -The `component.UseLocalHostAsDefaultHost` feature gate changes these endpoints to localhost:14250, localhost:6832, -localhost:6831 and localhost:14268. This will become the default in a future release. +You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change tgese endpoints to 0.0.0.0:14250, 0.0.0.0:6832, 0.0.0.0:6831 and 0.0.0.0:14268. This feature gate will be removed in a future release. Examples: diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/factory.go index 8009ed1b2d9..28b1769dfd7 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/factory.go @@ -87,7 +87,7 @@ func createDefaultConfig() component.Config { // createTracesReceiver creates a trace receiver based on provided config. func createTracesReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/trace_receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/trace_receiver.go index 764b55312dd..daffa793efe 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/trace_receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/trace_receiver.go @@ -66,7 +66,7 @@ type jReceiver struct { goroutines sync.WaitGroup - settings receiver.CreateSettings + settings receiver.Settings grpcObsrecv *receiverhelper.ObsReport httpObsrecv *receiverhelper.ObsReport @@ -95,7 +95,7 @@ func newJaegerReceiver( id component.ID, config *configuration, nextConsumer consumer.Traces, - set receiver.CreateSettings, + set receiver.Settings, ) (*jReceiver, error) { grpcObsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: id, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/README.md index 6432ee25658..85affe6540b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/README.md @@ -27,7 +27,8 @@ The following settings can be optionally configured: - `brokers` (default = localhost:9092): The list of kafka brokers - `resolve_canonical_bootstrap_servers_only` (default = false): Whether to resolve then reverse-lookup broker IPs during startup -- `topic` (default = otlp_spans for traces, otlp_metrics for metrics, otlp_logs for logs): The name of the kafka topic to read from +- `topic` (default = otlp_spans for traces, otlp_metrics for metrics, otlp_logs for logs): The name of the kafka topic to read from. + Only one telemetry type may be used for a given topic. - `encoding` (default = otlp_proto): The encoding of the payload received from kafka. Available encodings: - `otlp_proto`: the payload is deserialized to `ExportTraceServiceRequest`, `ExportLogsServiceRequest` or `ExportMetricsServiceRequest` respectively. - `jaeger_proto`: the payload is deserialized to a single Jaeger proto `Span`. @@ -71,6 +72,7 @@ The following settings can be optionally configured: - `password`: The Kerberos password used for authenticate with KDC - `config_file`: Path to Kerberos configuration. i.e /etc/krb5.conf - `keytab_file`: Path to keytab file. i.e /etc/security/kafka.keytab + - `disable_fast_negotiation`: Disable PA-FX-FAST negotiation (Pre-Authentication Framework - Fast). Some common Kerberos implementations do not support PA-FX-FAST negotiation. This is set to `false` by default. - `metadata` - `full` (default = true): Whether to maintain a full set of metadata. When disabled, the client does not make the initial request to broker at the diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/documentation.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/documentation.md new file mode 100644 index 00000000000..6fc7ddff888 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/documentation.md @@ -0,0 +1,71 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# kafka + +## Internal Telemetry + +The following telemetry is emitted by this component. + +### kafka_receiver_current_offset + +Current message offset + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### kafka_receiver_messages + +Number of received messages + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### kafka_receiver_offset_lag + +Current offset lag + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### kafka_receiver_partition_close + +Number of finished partitions + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### kafka_receiver_partition_start + +Number of started partitions + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### kafka_receiver_unmarshal_failed_log_records + +Number of log records failed to be unmarshaled + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### kafka_receiver_unmarshal_failed_metric_points + +Number of metric points failed to be unmarshaled + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### kafka_receiver_unmarshal_failed_spans + +Number of spans failed to be unmarshaled + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/factory.go index 7552f0c4653..777ba42b673 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/factory.go @@ -9,7 +9,6 @@ import ( "strings" "time" - "go.opencensus.io/stats/view" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" @@ -75,8 +74,6 @@ func withLogsUnmarshalers(logsUnmarshalers ...LogsUnmarshaler) FactoryOption { // NewFactory creates Kafka receiver factory. func NewFactory(options ...FactoryOption) receiver.Factory { - _ = view.Register(metricViews()...) - f := &kafkaReceiverFactory{ tracesUnmarshalers: map[string]TracesUnmarshaler{}, metricsUnmarshalers: map[string]MetricsUnmarshaler{}, @@ -130,7 +127,7 @@ type kafkaReceiverFactory struct { func (f *kafkaReceiverFactory) createTracesReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { @@ -156,7 +153,7 @@ func (f *kafkaReceiverFactory) createTracesReceiver( func (f *kafkaReceiverFactory) createMetricsReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (receiver.Metrics, error) { @@ -182,7 +179,7 @@ func (f *kafkaReceiverFactory) createMetricsReceiver( func (f *kafkaReceiverFactory) createLogsReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Logs, ) (receiver.Logs, error) { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_telemetry.go index d60cbac9b5b..81cc959523d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_telemetry.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_telemetry.go @@ -3,9 +3,14 @@ package metadata import ( - "go.opentelemetry.io/collector/component" + "errors" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" ) func Meter(settings component.TelemetrySettings) metric.Meter { @@ -15,3 +20,92 @@ func Meter(settings component.TelemetrySettings) metric.Meter { func Tracer(settings component.TelemetrySettings) trace.Tracer { return settings.TracerProvider.Tracer("otelcol/kafkareceiver") } + +// TelemetryBuilder provides an interface for components to report telemetry +// as defined in metadata and user config. +type TelemetryBuilder struct { + meter metric.Meter + KafkaReceiverCurrentOffset metric.Int64Gauge + KafkaReceiverMessages metric.Int64Counter + KafkaReceiverOffsetLag metric.Int64Gauge + KafkaReceiverPartitionClose metric.Int64Counter + KafkaReceiverPartitionStart metric.Int64Counter + KafkaReceiverUnmarshalFailedLogRecords metric.Int64Counter + KafkaReceiverUnmarshalFailedMetricPoints metric.Int64Counter + KafkaReceiverUnmarshalFailedSpans metric.Int64Counter + level configtelemetry.Level +} + +// telemetryBuilderOption applies changes to default builder. +type telemetryBuilderOption func(*TelemetryBuilder) + +// WithLevel sets the current telemetry level for the component. +func WithLevel(lvl configtelemetry.Level) telemetryBuilderOption { + return func(builder *TelemetryBuilder) { + builder.level = lvl + } +} + +// NewTelemetryBuilder provides a struct with methods to update all internal telemetry +// for a component +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...telemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{level: configtelemetry.LevelBasic} + for _, op := range options { + op(&builder) + } + var err, errs error + if builder.level >= configtelemetry.LevelBasic { + builder.meter = Meter(settings) + } else { + builder.meter = noop.Meter{} + } + builder.KafkaReceiverCurrentOffset, err = builder.meter.Int64Gauge( + "kafka_receiver_current_offset", + metric.WithDescription("Current message offset"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverMessages, err = builder.meter.Int64Counter( + "kafka_receiver_messages", + metric.WithDescription("Number of received messages"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverOffsetLag, err = builder.meter.Int64Gauge( + "kafka_receiver_offset_lag", + metric.WithDescription("Current offset lag"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverPartitionClose, err = builder.meter.Int64Counter( + "kafka_receiver_partition_close", + metric.WithDescription("Number of finished partitions"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverPartitionStart, err = builder.meter.Int64Counter( + "kafka_receiver_partition_start", + metric.WithDescription("Number of started partitions"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverUnmarshalFailedLogRecords, err = builder.meter.Int64Counter( + "kafka_receiver_unmarshal_failed_log_records", + metric.WithDescription("Number of log records failed to be unmarshaled"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverUnmarshalFailedMetricPoints, err = builder.meter.Int64Counter( + "kafka_receiver_unmarshal_failed_metric_points", + metric.WithDescription("Number of metric points failed to be unmarshaled"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverUnmarshalFailedSpans, err = builder.meter.Int64Counter( + "kafka_receiver_unmarshal_failed_spans", + metric.WithDescription("Number of spans failed to be unmarshaled"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + return &builder, errs +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go index 2b5b2954918..bb6da26c79c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go @@ -11,19 +11,23 @@ import ( "sync" "github.com/IBM/sarama" - "go.opencensus.io/stats" - "go.opencensus.io/tag" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/receiverhelper" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata" ) const ( transport = "kafka" + // TODO: update the following attributes to reflect semconv + attrInstanceName = "name" + attrPartition = "partition" ) var errInvalidInitialOffset = fmt.Errorf("invalid initial offset") @@ -37,7 +41,8 @@ type kafkaTracesConsumer struct { cancelConsumeLoop context.CancelFunc unmarshaler TracesUnmarshaler - settings receiver.CreateSettings + settings receiver.Settings + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking @@ -54,7 +59,8 @@ type kafkaMetricsConsumer struct { cancelConsumeLoop context.CancelFunc unmarshaler MetricsUnmarshaler - settings receiver.CreateSettings + settings receiver.Settings + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking @@ -71,7 +77,8 @@ type kafkaLogsConsumer struct { cancelConsumeLoop context.CancelFunc unmarshaler LogsUnmarshaler - settings receiver.CreateSettings + settings receiver.Settings + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking @@ -83,11 +90,16 @@ var _ receiver.Traces = (*kafkaTracesConsumer)(nil) var _ receiver.Metrics = (*kafkaMetricsConsumer)(nil) var _ receiver.Logs = (*kafkaLogsConsumer)(nil) -func newTracesReceiver(config Config, set receiver.CreateSettings, unmarshaler TracesUnmarshaler, nextConsumer consumer.Traces) (*kafkaTracesConsumer, error) { +func newTracesReceiver(config Config, set receiver.Settings, unmarshaler TracesUnmarshaler, nextConsumer consumer.Traces) (*kafkaTracesConsumer, error) { if unmarshaler == nil { return nil, errUnrecognizedEncoding } + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) + if err != nil { + return nil, err + } + return &kafkaTracesConsumer{ config: config, topics: []string{config.Topic}, @@ -98,6 +110,7 @@ func newTracesReceiver(config Config, set receiver.CreateSettings, unmarshaler T messageMarking: config.MessageMarking, headerExtraction: config.HeaderExtraction.ExtractHeaders, headers: config.HeaderExtraction.Headers, + telemetryBuilder: telemetryBuilder, }, nil } @@ -153,6 +166,7 @@ func (c *kafkaTracesConsumer) Start(_ context.Context, _ component.Host) error { autocommitEnabled: c.autocommitEnabled, messageMarking: c.messageMarking, headerExtractor: &nopHeaderExtractor{}, + telemetryBuilder: c.telemetryBuilder, } if c.headerExtraction { consumerGroup.headerExtractor = &headerExtractor{ @@ -196,11 +210,16 @@ func (c *kafkaTracesConsumer) Shutdown(context.Context) error { return c.consumerGroup.Close() } -func newMetricsReceiver(config Config, set receiver.CreateSettings, unmarshaler MetricsUnmarshaler, nextConsumer consumer.Metrics) (*kafkaMetricsConsumer, error) { +func newMetricsReceiver(config Config, set receiver.Settings, unmarshaler MetricsUnmarshaler, nextConsumer consumer.Metrics) (*kafkaMetricsConsumer, error) { if unmarshaler == nil { return nil, errUnrecognizedEncoding } + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) + if err != nil { + return nil, err + } + return &kafkaMetricsConsumer{ config: config, topics: []string{config.Topic}, @@ -211,6 +230,7 @@ func newMetricsReceiver(config Config, set receiver.CreateSettings, unmarshaler messageMarking: config.MessageMarking, headerExtraction: config.HeaderExtraction.ExtractHeaders, headers: config.HeaderExtraction.Headers, + telemetryBuilder: telemetryBuilder, }, nil } @@ -240,6 +260,7 @@ func (c *kafkaMetricsConsumer) Start(_ context.Context, _ component.Host) error autocommitEnabled: c.autocommitEnabled, messageMarking: c.messageMarking, headerExtractor: &nopHeaderExtractor{}, + telemetryBuilder: c.telemetryBuilder, } if c.headerExtraction { metricsConsumerGroup.headerExtractor = &headerExtractor{ @@ -283,11 +304,16 @@ func (c *kafkaMetricsConsumer) Shutdown(context.Context) error { return c.consumerGroup.Close() } -func newLogsReceiver(config Config, set receiver.CreateSettings, unmarshaler LogsUnmarshaler, nextConsumer consumer.Logs) (*kafkaLogsConsumer, error) { +func newLogsReceiver(config Config, set receiver.Settings, unmarshaler LogsUnmarshaler, nextConsumer consumer.Logs) (*kafkaLogsConsumer, error) { if unmarshaler == nil { return nil, errUnrecognizedEncoding } + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) + if err != nil { + return nil, err + } + return &kafkaLogsConsumer{ config: config, topics: []string{config.Topic}, @@ -298,6 +324,7 @@ func newLogsReceiver(config Config, set receiver.CreateSettings, unmarshaler Log messageMarking: config.MessageMarking, headerExtraction: config.HeaderExtraction.ExtractHeaders, headers: config.HeaderExtraction.Headers, + telemetryBuilder: telemetryBuilder, }, nil } @@ -327,6 +354,7 @@ func (c *kafkaLogsConsumer) Start(_ context.Context, _ component.Host) error { autocommitEnabled: c.autocommitEnabled, messageMarking: c.messageMarking, headerExtractor: &nopHeaderExtractor{}, + telemetryBuilder: c.telemetryBuilder, } if c.headerExtraction { logsConsumerGroup.headerExtractor = &headerExtractor{ @@ -379,7 +407,8 @@ type tracesConsumerGroupHandler struct { logger *zap.Logger - obsrecv *receiverhelper.ObsReport + obsrecv *receiverhelper.ObsReport + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking @@ -395,7 +424,8 @@ type metricsConsumerGroupHandler struct { logger *zap.Logger - obsrecv *receiverhelper.ObsReport + obsrecv *receiverhelper.ObsReport + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking @@ -411,7 +441,8 @@ type logsConsumerGroupHandler struct { logger *zap.Logger - obsrecv *receiverhelper.ObsReport + obsrecv *receiverhelper.ObsReport + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking @@ -426,14 +457,12 @@ func (c *tracesConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) c.readyCloser.Do(func() { close(c.ready) }) - statsTags := []tag.Mutator{tag.Upsert(tagInstanceName, c.id.Name())} - _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionStart.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionStart.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.Name()))) return nil } func (c *tracesConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error { - statsTags := []tag.Mutator{tag.Upsert(tagInstanceName, c.id.Name())} - _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionClose.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionClose.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.Name()))) return nil } @@ -457,22 +486,18 @@ func (c *tracesConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSe } ctx := c.obsrecv.StartTracesOp(session.Context()) - statsTags := []tag.Mutator{ - tag.Upsert(tagInstanceName, c.id.String()), - tag.Upsert(tagPartition, strconv.Itoa(int(claim.Partition()))), - } - _ = stats.RecordWithTags(ctx, statsTags, - statMessageCount.M(1), - statMessageOffset.M(message.Offset), - statMessageOffsetLag.M(claim.HighWaterMarkOffset()-message.Offset-1)) + attrs := attribute.NewSet( + attribute.String(attrInstanceName, c.id.String()), + attribute.String(attrPartition, strconv.Itoa(int(claim.Partition()))), + ) + c.telemetryBuilder.KafkaReceiverMessages.Add(ctx, 1, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverCurrentOffset.Record(ctx, message.Offset, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverOffsetLag.Record(ctx, claim.HighWaterMarkOffset()-message.Offset-1, metric.WithAttributeSet(attrs)) traces, err := c.unmarshaler.Unmarshal(message.Value) if err != nil { c.logger.Error("failed to unmarshal message", zap.Error(err)) - _ = stats.RecordWithTags( - ctx, - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statUnmarshalFailedSpans.M(1)) + c.telemetryBuilder.KafkaReceiverUnmarshalFailedSpans.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) if c.messageMarking.After && c.messageMarking.OnError { session.MarkMessage(message, "") } @@ -509,14 +534,12 @@ func (c *metricsConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) c.readyCloser.Do(func() { close(c.ready) }) - statsTags := []tag.Mutator{tag.Upsert(tagInstanceName, c.id.Name())} - _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionStart.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionStart.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.Name()))) return nil } func (c *metricsConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error { - statsTags := []tag.Mutator{tag.Upsert(tagInstanceName, c.id.Name())} - _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionClose.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionClose.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.Name()))) return nil } @@ -540,22 +563,18 @@ func (c *metricsConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupS } ctx := c.obsrecv.StartMetricsOp(session.Context()) - statsTags := []tag.Mutator{ - tag.Upsert(tagInstanceName, c.id.String()), - tag.Upsert(tagPartition, strconv.Itoa(int(claim.Partition()))), - } - _ = stats.RecordWithTags(ctx, statsTags, - statMessageCount.M(1), - statMessageOffset.M(message.Offset), - statMessageOffsetLag.M(claim.HighWaterMarkOffset()-message.Offset-1)) + attrs := attribute.NewSet( + attribute.String(attrInstanceName, c.id.String()), + attribute.String(attrPartition, strconv.Itoa(int(claim.Partition()))), + ) + c.telemetryBuilder.KafkaReceiverMessages.Add(ctx, 1, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverCurrentOffset.Record(ctx, message.Offset, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverOffsetLag.Record(ctx, claim.HighWaterMarkOffset()-message.Offset-1, metric.WithAttributeSet(attrs)) metrics, err := c.unmarshaler.Unmarshal(message.Value) if err != nil { c.logger.Error("failed to unmarshal message", zap.Error(err)) - _ = stats.RecordWithTags( - ctx, - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statUnmarshalFailedMetricPoints.M(1)) + c.telemetryBuilder.KafkaReceiverUnmarshalFailedMetricPoints.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) if c.messageMarking.After && c.messageMarking.OnError { session.MarkMessage(message, "") } @@ -592,18 +611,12 @@ func (c *logsConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) er c.readyCloser.Do(func() { close(c.ready) }) - _ = stats.RecordWithTags( - session.Context(), - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statPartitionStart.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionStart.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) return nil } func (c *logsConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error { - _ = stats.RecordWithTags( - session.Context(), - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statPartitionClose.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionClose.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) return nil } @@ -627,24 +640,18 @@ func (c *logsConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSess } ctx := c.obsrecv.StartLogsOp(session.Context()) - statsTags := []tag.Mutator{ - tag.Upsert(tagInstanceName, c.id.String()), - tag.Upsert(tagPartition, strconv.Itoa(int(claim.Partition()))), - } - _ = stats.RecordWithTags( - ctx, - statsTags, - statMessageCount.M(1), - statMessageOffset.M(message.Offset), - statMessageOffsetLag.M(claim.HighWaterMarkOffset()-message.Offset-1)) + attrs := attribute.NewSet( + attribute.String(attrInstanceName, c.id.String()), + attribute.String(attrPartition, strconv.Itoa(int(claim.Partition()))), + ) + c.telemetryBuilder.KafkaReceiverMessages.Add(ctx, 1, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverCurrentOffset.Record(ctx, message.Offset, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverOffsetLag.Record(ctx, claim.HighWaterMarkOffset()-message.Offset-1, metric.WithAttributeSet(attrs)) logs, err := c.unmarshaler.Unmarshal(message.Value) if err != nil { c.logger.Error("failed to unmarshal message", zap.Error(err)) - _ = stats.RecordWithTags( - ctx, - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statUnmarshalFailedLogRecords.M(1)) + c.telemetryBuilder.KafkaReceiverUnmarshalFailedLogRecords.Add(ctx, 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) if c.messageMarking.After && c.messageMarking.OnError { session.MarkMessage(message, "") } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metadata.yaml index 8ccc15c2814..cc9e7dc3b64 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metadata.yaml @@ -14,8 +14,60 @@ status: # TODO: Update the receiver to pass the tests tests: skip_lifecycle: true - goleak: - ignore: - top: - # See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information. - - "go.opencensus.io/stats/view.(*worker).start" + +telemetry: + metrics: + kafka_receiver_messages: + enabled: true + description: Number of received messages + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_current_offset: + enabled: true + description: Current message offset + unit: "1" + gauge: + value_type: int + kafka_receiver_offset_lag: + enabled: true + description: Current offset lag + unit: "1" + gauge: + value_type: int + kafka_receiver_partition_start: + enabled: true + description: Number of started partitions + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_partition_close: + enabled: true + description: Number of finished partitions + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_unmarshal_failed_metric_points: + enabled: true + description: Number of metric points failed to be unmarshaled + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_unmarshal_failed_log_records: + enabled: true + description: Number of log records failed to be unmarshaled + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_unmarshal_failed_spans: + enabled: true + description: Number of spans failed to be unmarshaled + unit: "1" + sum: + value_type: int + monotonic: true diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metrics.go deleted file mode 100644 index c1dc6d3e7c3..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metrics.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package kafkareceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -var ( - tagInstanceName, _ = tag.NewKey("name") - tagPartition, _ = tag.NewKey("partition") - - statMessageCount = stats.Int64("kafka_receiver_messages", "Number of received messages", stats.UnitDimensionless) - statMessageOffset = stats.Int64("kafka_receiver_current_offset", "Current message offset", stats.UnitDimensionless) - statMessageOffsetLag = stats.Int64("kafka_receiver_offset_lag", "Current offset lag", stats.UnitDimensionless) - - statPartitionStart = stats.Int64("kafka_receiver_partition_start", "Number of started partitions", stats.UnitDimensionless) - statPartitionClose = stats.Int64("kafka_receiver_partition_close", "Number of finished partitions", stats.UnitDimensionless) - - statUnmarshalFailedMetricPoints = stats.Int64("kafka_receiver_unmarshal_failed_metric_points", "Number of metric points failed to be unmarshaled", stats.UnitDimensionless) - statUnmarshalFailedLogRecords = stats.Int64("kafka_receiver_unmarshal_failed_log_records", "Number of log records failed to be unmarshaled", stats.UnitDimensionless) - statUnmarshalFailedSpans = stats.Int64("kafka_receiver_unmarshal_failed_spans", "Number of spans failed to be unmarshaled", stats.UnitDimensionless) -) - -// metricViews return metric views for Kafka receiver. -func metricViews() []*view.View { - partitionAgnosticTagKeys := []tag.Key{tagInstanceName} - partitionSpecificTagKeys := []tag.Key{tagInstanceName, tagPartition} - - countMessages := &view.View{ - Name: statMessageCount.Name(), - Measure: statMessageCount, - Description: statMessageCount.Description(), - TagKeys: partitionSpecificTagKeys, - Aggregation: view.Sum(), - } - - lastValueOffset := &view.View{ - Name: statMessageOffset.Name(), - Measure: statMessageOffset, - Description: statMessageOffset.Description(), - TagKeys: partitionSpecificTagKeys, - Aggregation: view.LastValue(), - } - - lastValueOffsetLag := &view.View{ - Name: statMessageOffsetLag.Name(), - Measure: statMessageOffsetLag, - Description: statMessageOffsetLag.Description(), - TagKeys: partitionSpecificTagKeys, - Aggregation: view.LastValue(), - } - - countPartitionStart := &view.View{ - Name: statPartitionStart.Name(), - Measure: statPartitionStart, - Description: statPartitionStart.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - countPartitionClose := &view.View{ - Name: statPartitionClose.Name(), - Measure: statPartitionClose, - Description: statPartitionClose.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - countUnmarshalFailedMetricPoints := &view.View{ - Name: statUnmarshalFailedMetricPoints.Name(), - Measure: statUnmarshalFailedMetricPoints, - Description: statUnmarshalFailedMetricPoints.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - countUnmarshalFailedLogRecords := &view.View{ - Name: statUnmarshalFailedLogRecords.Name(), - Measure: statUnmarshalFailedLogRecords, - Description: statUnmarshalFailedLogRecords.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - countUnmarshalFailedSpans := &view.View{ - Name: statUnmarshalFailedSpans.Name(), - Measure: statUnmarshalFailedSpans, - Description: statUnmarshalFailedSpans.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - return []*view.View{ - countMessages, - lastValueOffset, - lastValueOffsetLag, - countPartitionStart, - countPartitionClose, - countUnmarshalFailedMetricPoints, - countUnmarshalFailedLogRecords, - countUnmarshalFailedSpans, - } -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/README.md index 7cbd83afb62..a97fd50b7ad 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/README.md @@ -27,7 +27,7 @@ receivers: The following settings are configurable: -- `endpoint` (default = 0.0.0.0:9411): host:port on which the receiver is going to receive data. The `component.UseLocalHostAsDefaultHost` feature gate changes this to localhost:9411. This will become the default in a future release. For full list of `ServerConfig` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp). +- `endpoint` (default = localhost:9411): host:port on which the receiver is going to receive data.You can temporarily disable the `component.UseLocalHostAsDefaultHost` feature gate to change this to `0.0.0.0:9411`. This feature gate will be removed in a future release. For full list of `ServerConfig` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp). - `parse_string_tags` (default = false): if enabled, the receiver will attempt to parse string tags/binary annotations into int/bool/float. ## Advanced Configuration diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/factory.go index 0675a1829e7..6a86daa6f0d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/factory.go @@ -19,7 +19,7 @@ import ( const ( defaultHTTPPort = 9411 - defaultBindEndpoint = "0.0.0.0:9411" + defaultBindEndpoint = "localhost:9411" ) // NewFactory creates a new Zipkin receiver factory @@ -44,7 +44,7 @@ func createDefaultConfig() component.Config { // createTracesReceiver creates a trace receiver based on provided config. func createTracesReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/trace_receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/trace_receiver.go index 9a5a897a70f..a8c0ec11e3b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/trace_receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/trace_receiver.go @@ -49,14 +49,14 @@ type zipkinReceiver struct { protobufUnmarshaler ptrace.Unmarshaler protobufDebugUnmarshaler ptrace.Unmarshaler - settings receiver.CreateSettings + settings receiver.Settings obsrecvrs map[string]*receiverhelper.ObsReport } var _ http.Handler = (*zipkinReceiver)(nil) // newReceiver creates a new zipkinReceiver reference. -func newReceiver(config *Config, nextConsumer consumer.Traces, settings receiver.CreateSettings) (*zipkinReceiver, error) { +func newReceiver(config *Config, nextConsumer consumer.Traces, settings receiver.Settings) (*zipkinReceiver, error) { transports := []string{receiverTransportV1Thrift, receiverTransportV1JSON, receiverTransportV2JSON, receiverTransportV2PROTO} obsrecvrs := make(map[string]*receiverhelper.ObsReport) for _, transport := range transports { diff --git a/vendor/github.com/pelletier/go-toml/v2/.gitignore b/vendor/github.com/pelletier/go-toml/v2/.gitignore index a69e2b0ebd7..4b7c4eda3a9 100644 --- a/vendor/github.com/pelletier/go-toml/v2/.gitignore +++ b/vendor/github.com/pelletier/go-toml/v2/.gitignore @@ -3,4 +3,5 @@ fuzz/ cmd/tomll/tomll cmd/tomljson/tomljson cmd/tomltestgen/tomltestgen -dist \ No newline at end of file +dist +tests/ diff --git a/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md index 04dd12bcbca..96ecf9e2b3a 100644 --- a/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md +++ b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md @@ -165,25 +165,22 @@ Checklist: ### New release -1. Decide on the next version number. Use semver. -2. Generate release notes using [`gh`][gh]. Example: +1. Decide on the next version number. Use semver. Review commits since last + version to assess. +2. Tag release. For example: ``` -$ gh api -X POST \ - -F tag_name='v2.0.0-beta.5' \ - -F target_commitish='v2' \ - -F previous_tag_name='v2.0.0-beta.4' \ - --jq '.body' \ - repos/pelletier/go-toml/releases/generate-notes +git checkout v2 +git pull +git tag v2.2.0 +git push --tags ``` -3. Look for "Other changes". That would indicate a pull request not labeled - properly. Tweak labels and pull request titles until changelog looks good for - users. -4. [Draft new release][new-release]. -5. Fill tag and target with the same value used to generate the changelog. -6. Set title to the new tag value. -7. Paste the generated changelog. -8. Check "create discussion", in the "Releases" category. -9. Check pre-release if new version is an alpha or beta. +3. CI automatically builds a draft Github release. Review it and edit as + necessary. Look for "Other changes". That would indicate a pull request not + labeled properly. Tweak labels and pull request titles until changelog looks + good for users. +4. Check "create discussion" box, in the "Releases" category. +5. If new version is an alpha or beta only, check pre-release box. + [issues-tracker]: https://github.com/pelletier/go-toml/issues [bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md index 63b92f3b0b2..d964b25fe19 100644 --- a/vendor/github.com/pelletier/go-toml/v2/README.md +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -98,9 +98,9 @@ Given the following struct, let's see how to read it and write it as TOML: ```go type MyConfig struct { - Version int - Name string - Tags []string + Version int + Name string + Tags []string } ``` @@ -119,7 +119,7 @@ tags = ["go", "toml"] var cfg MyConfig err := toml.Unmarshal([]byte(doc), &cfg) if err != nil { - panic(err) + panic(err) } fmt.Println("version:", cfg.Version) fmt.Println("name:", cfg.Name) @@ -140,14 +140,14 @@ as a TOML document: ```go cfg := MyConfig{ - Version: 2, - Name: "go-toml", - Tags: []string{"go", "toml"}, + Version: 2, + Name: "go-toml", + Tags: []string{"go", "toml"}, } b, err := toml.Marshal(cfg) if err != nil { - panic(err) + panic(err) } fmt.Println(string(b)) @@ -175,17 +175,17 @@ the AST level. See https://pkg.go.dev/github.com/pelletier/go-toml/v2/unstable. Execution time speedup compared to other Go TOML libraries: - - - - - - - - - - - + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x1.9x
Marshal/ReferenceFile/map-21.7x1.8x
Marshal/ReferenceFile/struct-22.2x2.5x
Unmarshal/HugoFrontMatter-22.9x2.9x
Unmarshal/ReferenceFile/map-22.6x2.9x
Unmarshal/ReferenceFile/struct-24.4x5.3x
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x2.2x
Marshal/ReferenceFile/map-21.7x2.1x
Marshal/ReferenceFile/struct-22.2x3.0x
Unmarshal/HugoFrontMatter-22.9x2.7x
Unmarshal/ReferenceFile/map-22.6x2.7x
Unmarshal/ReferenceFile/struct-24.6x5.1x
See more

The table above has the results of the most common use-cases. The table below @@ -193,22 +193,22 @@ contains the results of all benchmarks, including unrealistic ones. It is provided for completeness.

- - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.9x
Marshal/SimpleDocument/struct-22.7x4.2x
Unmarshal/SimpleDocument/map-24.5x3.1x
Unmarshal/SimpleDocument/struct-26.2x3.9x
UnmarshalDataset/example-23.1x3.5x
UnmarshalDataset/code-22.3x3.1x
UnmarshalDataset/twitter-22.5x2.6x
UnmarshalDataset/citm_catalog-22.1x2.2x
UnmarshalDataset/canada-21.6x1.3x
UnmarshalDataset/config-24.3x3.2x
[Geo mean]2.7x2.8x
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.7x
Marshal/SimpleDocument/struct-22.7x3.8x
Unmarshal/SimpleDocument/map-23.8x3.0x
Unmarshal/SimpleDocument/struct-25.6x4.1x
UnmarshalDataset/example-23.0x3.2x
UnmarshalDataset/code-22.3x2.9x
UnmarshalDataset/twitter-22.6x2.7x
UnmarshalDataset/citm_catalog-22.2x2.3x
UnmarshalDataset/canada-21.8x1.5x
UnmarshalDataset/config-24.1x2.9x
geomean2.7x2.8x

This table can be generated with ./ci.sh benchmark -a -html.

@@ -233,24 +233,24 @@ Go-toml provides three handy command line tools: * `tomljson`: Reads a TOML file and outputs its JSON representation. - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest - $ tomljson --help - ``` + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest + $ tomljson --help + ``` * `jsontoml`: Reads a JSON file and outputs a TOML representation. - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest - $ jsontoml --help - ``` + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest + $ jsontoml --help + ``` * `tomll`: Lints and reformats a TOML file. - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest - $ tomll --help - ``` + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest + $ tomll --help + ``` ### Docker image @@ -261,7 +261,7 @@ Those tools are also available as a [Docker image][docker]. For example, to use docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml ``` -Multiple versions are availble on [ghcr.io][docker]. +Multiple versions are available on [ghcr.io][docker]. [docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml @@ -293,16 +293,16 @@ element in the interface to decode the object. For example: ```go type inner struct { - B interface{} + B interface{} } type doc struct { - A interface{} + A interface{} } d := doc{ - A: inner{ - B: "Before", - }, + A: inner{ + B: "Before", + }, } data := ` @@ -341,7 +341,7 @@ contained in the doc is superior to the capacity of the array. For example: ```go type doc struct { - A [2]string + A [2]string } d := doc{} err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) @@ -565,10 +565,11 @@ complete solutions exist out there. ## Versioning -Go-toml follows [Semantic Versioning](https://semver.org). The supported version -of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of -this document. The last two major versions of Go are supported -(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). +Expect for parts explicitely marked otherwise, go-toml follows [Semantic +Versioning](https://semver.org). The supported version of +[TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this +document. The last two major versions of Go are supported (see [Go Release +Policy](https://golang.org/doc/devel/release.html#policy)). ## License diff --git a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md index b2f21cfc92c..d4d554fda9d 100644 --- a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md +++ b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md @@ -2,9 +2,6 @@ ## Supported Versions -Use this section to tell people about which versions of your project are -currently being supported with security updates. - | Version | Supported | | ---------- | ------------------ | | Latest 2.x | :white_check_mark: | diff --git a/vendor/github.com/pelletier/go-toml/v2/ci.sh b/vendor/github.com/pelletier/go-toml/v2/ci.sh index 9ae8b753759..86217a9b097 100644 --- a/vendor/github.com/pelletier/go-toml/v2/ci.sh +++ b/vendor/github.com/pelletier/go-toml/v2/ci.sh @@ -77,7 +77,7 @@ cover() { pushd "$dir" go test -covermode=atomic -coverpkg=./... -coverprofile=coverage.out.tmp ./... - cat coverage.out.tmp | grep -v fuzz | grep -v testsuite | grep -v tomltestgen | grep -v gotoml-test-decoder > coverage.out + grep -Ev '(fuzz|testsuite|tomltestgen|gotoml-test-decoder|gotoml-test-encoder)' coverage.out.tmp > coverage.out go tool cover -func=coverage.out echo "Coverage profile for ${branch}: ${dir}/coverage.out" >&2 popd @@ -152,7 +152,7 @@ bench() { fi export GOMAXPROCS=2 - nice -n -19 taskset --cpu-list 0,1 go test '-bench=^Benchmark(Un)?[mM]arshal' -count=5 -run=Nothing ./... | tee "${out}" + go test '-bench=^Benchmark(Un)?[mM]arshal' -count=10 -run=Nothing ./... | tee "${out}" popd if [ "${branch}" != "HEAD" ]; then @@ -161,10 +161,12 @@ bench() { } fmktemp() { - if mktemp --version|grep GNU >/dev/null; then - mktemp --suffix=-$1; + if mktemp --version &> /dev/null; then + # GNU + mktemp --suffix=-$1 else - mktemp -t $1; + # BSD + mktemp -t $1 fi } @@ -184,12 +186,14 @@ with open(sys.argv[1]) as f: lines.append(line.split(',')) results = [] -for line in reversed(lines[1:]): +for line in reversed(lines[2:]): + if len(line) < 8 or line[0] == "": + continue v2 = float(line[1]) results.append([ line[0].replace("-32", ""), "%.1fx" % (float(line[3])/v2), # v1 - "%.1fx" % (float(line[5])/v2), # bs + "%.1fx" % (float(line[7])/v2), # bs ]) # move geomean to the end results.append(results[0]) @@ -260,10 +264,10 @@ benchmark() { if [ "$1" = "-html" ]; then tmpcsv=`fmktemp csv` - benchstat -csv -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv + benchstat -format csv go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv benchstathtml $tmpcsv else - benchstat -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt + benchstat go-toml-v2.txt go-toml-v1.txt bs-toml.txt fi rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go index 40e23f8304a..76df2d5b6a9 100644 --- a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go @@ -57,7 +57,11 @@ type SeenTracker struct { currentIdx int } -var pool sync.Pool +var pool = sync.Pool{ + New: func() interface{} { + return &SeenTracker{} + }, +} func (s *SeenTracker) reset() { // Always contains a root element at index 0. @@ -149,8 +153,9 @@ func (s *SeenTracker) setExplicitFlag(parentIdx int) { // CheckExpression takes a top-level node and checks that it does not contain // keys that have been seen in previous calls, and validates that types are -// consistent. -func (s *SeenTracker) CheckExpression(node *unstable.Node) error { +// consistent. It returns true if it is the first time this node's key is seen. +// Useful to clear array tables on first use. +func (s *SeenTracker) CheckExpression(node *unstable.Node) (bool, error) { if s.entries == nil { s.reset() } @@ -166,7 +171,7 @@ func (s *SeenTracker) CheckExpression(node *unstable.Node) error { } } -func (s *SeenTracker) checkTable(node *unstable.Node) error { +func (s *SeenTracker) checkTable(node *unstable.Node) (bool, error) { if s.currentIdx >= 0 { s.setExplicitFlag(s.currentIdx) } @@ -192,7 +197,7 @@ func (s *SeenTracker) checkTable(node *unstable.Node) error { } else { entry := s.entries[idx] if entry.kind == valueKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } } parentIdx = idx @@ -201,25 +206,27 @@ func (s *SeenTracker) checkTable(node *unstable.Node) error { k := it.Node().Data idx := s.find(parentIdx, k) + first := false if idx >= 0 { kind := s.entries[idx].kind if kind != tableKind { - return fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) + return false, fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) } if s.entries[idx].explicit { - return fmt.Errorf("toml: table %s already exists", string(k)) + return false, fmt.Errorf("toml: table %s already exists", string(k)) } s.entries[idx].explicit = true } else { idx = s.create(parentIdx, k, tableKind, true, false) + first = true } s.currentIdx = idx - return nil + return first, nil } -func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { +func (s *SeenTracker) checkArrayTable(node *unstable.Node) (bool, error) { if s.currentIdx >= 0 { s.setExplicitFlag(s.currentIdx) } @@ -242,7 +249,7 @@ func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { } else { entry := s.entries[idx] if entry.kind == valueKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } } @@ -252,22 +259,23 @@ func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { k := it.Node().Data idx := s.find(parentIdx, k) - if idx >= 0 { + firstTime := idx < 0 + if firstTime { + idx = s.create(parentIdx, k, arrayTableKind, true, false) + } else { kind := s.entries[idx].kind if kind != arrayTableKind { - return fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) + return false, fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) } s.clear(idx) - } else { - idx = s.create(parentIdx, k, arrayTableKind, true, false) } s.currentIdx = idx - return nil + return firstTime, nil } -func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { +func (s *SeenTracker) checkKeyValue(node *unstable.Node) (bool, error) { parentIdx := s.currentIdx it := node.Key() @@ -281,11 +289,11 @@ func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { } else { entry := s.entries[idx] if it.IsLast() { - return fmt.Errorf("toml: key %s is already defined", string(k)) + return false, fmt.Errorf("toml: key %s is already defined", string(k)) } else if entry.kind != tableKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } else if entry.explicit { - return fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) + return false, fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) } } @@ -303,45 +311,39 @@ func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { return s.checkArray(value) } - return nil + return false, nil } -func (s *SeenTracker) checkArray(node *unstable.Node) error { +func (s *SeenTracker) checkArray(node *unstable.Node) (first bool, err error) { it := node.Children() for it.Next() { n := it.Node() switch n.Kind { case unstable.InlineTable: - err := s.checkInlineTable(n) + first, err = s.checkInlineTable(n) if err != nil { - return err + return false, err } case unstable.Array: - err := s.checkArray(n) + first, err = s.checkArray(n) if err != nil { - return err + return false, err } } } - return nil + return first, nil } -func (s *SeenTracker) checkInlineTable(node *unstable.Node) error { - if pool.New == nil { - pool.New = func() interface{} { - return &SeenTracker{} - } - } - +func (s *SeenTracker) checkInlineTable(node *unstable.Node) (first bool, err error) { s = pool.Get().(*SeenTracker) s.reset() it := node.Children() for it.Next() { n := it.Node() - err := s.checkKeyValue(n) + first, err = s.checkKeyValue(n) if err != nil { - return err + return false, err } } @@ -352,5 +354,5 @@ func (s *SeenTracker) checkInlineTable(node *unstable.Node) error { // redefinition of its keys: check* functions cannot walk into // a value. pool.Put(s) - return nil + return first, nil } diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go index 6fe78533c1c..7f4e20c1285 100644 --- a/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -3,6 +3,7 @@ package toml import ( "bytes" "encoding" + "encoding/json" "fmt" "io" "math" @@ -37,10 +38,11 @@ type Encoder struct { w io.Writer // global settings - tablesInline bool - arraysMultiline bool - indentSymbol string - indentTables bool + tablesInline bool + arraysMultiline bool + indentSymbol string + indentTables bool + marshalJsonNumbers bool } // NewEncoder returns a new Encoder that writes to w. @@ -87,6 +89,17 @@ func (enc *Encoder) SetIndentTables(indent bool) *Encoder { return enc } +// SetMarshalJsonNumbers forces the encoder to serialize `json.Number` as a +// float or integer instead of relying on TextMarshaler to emit a string. +// +// *Unstable:* This method does not follow the compatibility guarantees of +// semver. It can be changed or removed without a new major version being +// issued. +func (enc *Encoder) SetMarshalJsonNumbers(indent bool) *Encoder { + enc.marshalJsonNumbers = indent + return enc +} + // Encode writes a TOML representation of v to the stream. // // If v cannot be represented to TOML it returns an error. @@ -252,6 +265,18 @@ func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, e return append(b, x.String()...), nil case LocalDateTime: return append(b, x.String()...), nil + case json.Number: + if enc.marshalJsonNumbers { + if x == "" { /// Useful zero value. + return append(b, "0"...), nil + } else if v, err := x.Int64(); err == nil { + return enc.encode(b, ctx, reflect.ValueOf(v)) + } else if f, err := x.Float64(); err == nil { + return enc.encode(b, ctx, reflect.ValueOf(f)) + } else { + return nil, fmt.Errorf("toml: unable to convert %q to int64 or float64", x) + } + } } hasTextMarshaler := v.Type().Implements(textMarshalerType) @@ -707,6 +732,8 @@ func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { if fieldType.Anonymous { if fieldType.Type.Kind() == reflect.Struct { walkStruct(ctx, t, f) + } else if fieldType.Type.Kind() == reflect.Pointer && !f.IsNil() && f.Elem().Kind() == reflect.Struct { + walkStruct(ctx, t, f.Elem()) } continue } else { @@ -998,6 +1025,10 @@ func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect. scratch = enc.commented(ctx.commented, scratch) + if enc.indentTables { + scratch = enc.indent(ctx.indent, scratch) + } + scratch = append(scratch, "[["...) for i, k := range ctx.parentKey { diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index 868c74c1577..98231bae65b 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -35,6 +35,9 @@ type Decoder struct { // global settings strict bool + + // toggles unmarshaler interface + unmarshalerInterface bool } // NewDecoder creates a new Decoder that will read from r. @@ -54,6 +57,24 @@ func (d *Decoder) DisallowUnknownFields() *Decoder { return d } +// EnableUnmarshalerInterface allows to enable unmarshaler interface. +// +// With this feature enabled, types implementing the unstable/Unmarshaler +// interface can be decoded from any structure of the document. It allows types +// that don't have a straightfoward TOML representation to provide their own +// decoding logic. +// +// Currently, types can only decode from a single value. Tables and array tables +// are not supported. +// +// *Unstable:* This method does not follow the compatibility guarantees of +// semver. It can be changed or removed without a new major version being +// issued. +func (d *Decoder) EnableUnmarshalerInterface() *Decoder { + d.unmarshalerInterface = true + return d +} + // Decode the whole content of r into v. // // By default, values in the document that don't exist in the target Go value @@ -108,6 +129,7 @@ func (d *Decoder) Decode(v interface{}) error { strict: strict{ Enabled: d.strict, }, + unmarshalerInterface: d.unmarshalerInterface, } return dec.FromParser(v) @@ -127,6 +149,10 @@ type decoder struct { // need to be skipped. skipUntilTable bool + // Flag indicating that the current array/slice table should be cleared because + // it is the first encounter of an array table. + clearArrayTable bool + // Tracks position in Go arrays. // This is used when decoding [[array tables]] into Go arrays. Given array // tables are separate TOML expression, we need to keep track of where we @@ -139,6 +165,9 @@ type decoder struct { // Strict mode strict strict + // Flag that enables/disables unmarshaler interface. + unmarshalerInterface bool + // Current context for the error. errorContext *errorContext } @@ -246,9 +275,10 @@ Rules for the unmarshal code: func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) error { var x reflect.Value var err error + var first bool // used for to clear array tables on first use if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) { - err = d.seen.CheckExpression(expr) + first, err = d.seen.CheckExpression(expr) if err != nil { return err } @@ -267,6 +297,7 @@ func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) err case unstable.ArrayTable: d.skipUntilTable = false d.strict.EnterArrayTable(expr) + d.clearArrayTable = first x, err = d.handleArrayTable(expr.Key(), v) default: panic(fmt.Errorf("parser should not permit expression of kind %s at document root", expr.Kind)) @@ -307,6 +338,10 @@ func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflec reflect.Copy(nelem, elem) elem = nelem } + if d.clearArrayTable && elem.Len() > 0 { + elem.SetLen(0) + d.clearArrayTable = false + } } return d.handleArrayTableCollectionLast(key, elem) case reflect.Ptr: @@ -325,6 +360,10 @@ func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflec return v, nil case reflect.Slice: + if d.clearArrayTable && v.Len() > 0 { + v.SetLen(0) + d.clearArrayTable = false + } elemType := v.Type().Elem() var elem reflect.Value if elemType.Kind() == reflect.Interface { @@ -576,7 +615,7 @@ func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { break } - err := d.seen.CheckExpression(expr) + _, err := d.seen.CheckExpression(expr) if err != nil { return reflect.Value{}, err } @@ -634,6 +673,14 @@ func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error { v = initAndDereferencePointer(v) } + if d.unmarshalerInterface { + if v.CanAddr() && v.Addr().CanInterface() { + if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok { + return outi.UnmarshalTOML(value) + } + } + } + ok, err := d.tryTextUnmarshaler(value, v) if ok || err != nil { return err @@ -1097,9 +1144,9 @@ func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node f := fieldByIndex(v, path) - if !f.CanSet() { - // If the field is not settable, need to take a slower path and make a copy of - // the struct itself to a new location. + if !f.CanAddr() { + // If the field is not addressable, need to take a slower path and + // make a copy of the struct itself to a new location. nvp := reflect.New(v.Type()) nvp.Elem().Set(v) v = nvp.Elem() diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go new file mode 100644 index 00000000000..00cfd6de458 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go @@ -0,0 +1,7 @@ +package unstable + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a TOML document. +type Unmarshaler interface { + UnmarshalTOML(value *Node) error +} diff --git a/vendor/github.com/prometheus/common/config/config.go b/vendor/github.com/prometheus/common/config/config.go index 3da4854d048..7588da55561 100644 --- a/vendor/github.com/prometheus/common/config/config.go +++ b/vendor/github.com/prometheus/common/config/config.go @@ -60,9 +60,9 @@ func (s Secret) MarshalJSON() ([]byte, error) { return json.Marshal(secretToken) } -type Header map[string][]Secret +type ProxyHeader map[string][]Secret -func (h *Header) HTTPHeader() http.Header { +func (h *ProxyHeader) HTTPHeader() http.Header { if h == nil || *h == nil { return nil } diff --git a/vendor/github.com/prometheus/common/config/headers.go b/vendor/github.com/prometheus/common/config/headers.go new file mode 100644 index 00000000000..4a0be4a10e9 --- /dev/null +++ b/vendor/github.com/prometheus/common/config/headers.go @@ -0,0 +1,140 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This package no longer handles safe yaml parsing. In order to +// ensure correct yaml unmarshalling, use "yaml.UnmarshalStrict()". + +package config + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "strings" +) + +// reservedHeaders that change the connection, are set by Prometheus, or can +// be changed otherwise. +var reservedHeaders = map[string]struct{}{ + "Authorization": {}, + "Host": {}, + "Content-Encoding": {}, + "Content-Length": {}, + "Content-Type": {}, + "User-Agent": {}, + "Connection": {}, + "Keep-Alive": {}, + "Proxy-Authenticate": {}, + "Proxy-Authorization": {}, + "Www-Authenticate": {}, + "Accept-Encoding": {}, + "X-Prometheus-Remote-Write-Version": {}, + "X-Prometheus-Remote-Read-Version": {}, + "X-Prometheus-Scrape-Timeout-Seconds": {}, + + // Added by SigV4. + "X-Amz-Date": {}, + "X-Amz-Security-Token": {}, + "X-Amz-Content-Sha256": {}, +} + +// Headers represents the configuration for HTTP headers. +type Headers struct { + Headers map[string]Header `yaml:",inline"` + dir string +} + +// Header represents the configuration for a single HTTP header. +type Header struct { + Values []string `yaml:"values,omitempty" json:"values,omitempty"` + Secrets []Secret `yaml:"secrets,omitempty" json:"secrets,omitempty"` + Files []string `yaml:"files,omitempty" json:"files,omitempty"` +} + +func (h Headers) MarshalJSON() ([]byte, error) { + // Inline the Headers map when serializing JSON because json encoder doesn't support "inline" directive. + return json.Marshal(h.Headers) +} + +// SetDirectory records the directory to make headers file relative to the +// configuration file. +func (h *Headers) SetDirectory(dir string) { + if h == nil { + return + } + h.dir = dir +} + +// Validate validates the Headers config. +func (h *Headers) Validate() error { + for n, header := range h.Headers { + if _, ok := reservedHeaders[http.CanonicalHeaderKey(n)]; ok { + return fmt.Errorf("setting header %q is not allowed", http.CanonicalHeaderKey(n)) + } + for _, v := range header.Files { + f := JoinDir(h.dir, v) + _, err := os.ReadFile(f) + if err != nil { + return fmt.Errorf("unable to read header %q from file %s: %w", http.CanonicalHeaderKey(n), f, err) + } + } + } + return nil +} + +// NewHeadersRoundTripper returns a RoundTripper that sets HTTP headers on +// requests as configured. +func NewHeadersRoundTripper(config *Headers, next http.RoundTripper) http.RoundTripper { + if len(config.Headers) == 0 { + return next + } + return &headersRoundTripper{ + config: config, + next: next, + } +} + +type headersRoundTripper struct { + next http.RoundTripper + config *Headers +} + +// RoundTrip implements http.RoundTripper. +func (rt *headersRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + for n, h := range rt.config.Headers { + for _, v := range h.Values { + req.Header.Add(n, v) + } + for _, v := range h.Secrets { + req.Header.Add(n, string(v)) + } + for _, v := range h.Files { + f := JoinDir(rt.config.dir, v) + b, err := os.ReadFile(f) + if err != nil { + return nil, fmt.Errorf("unable to read headers file %s: %w", f, err) + } + req.Header.Add(n, strings.TrimSpace(string(b))) + } + } + return rt.next.RoundTrip(req) +} + +// CloseIdleConnections implements closeIdler. +func (rt *headersRoundTripper) CloseIdleConnections() { + if ci, ok := rt.next.(closeIdler); ok { + ci.CloseIdleConnections() + } +} diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 20441818405..3e320134776 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -20,6 +20,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/json" + "errors" "fmt" "net" "net/http" @@ -130,8 +131,12 @@ func (tv *TLSVersion) String() string { type BasicAuth struct { Username string `yaml:"username" json:"username"` UsernameFile string `yaml:"username_file,omitempty" json:"username_file,omitempty"` + // UsernameRef is the name of the secret within the secret manager to use as the username. + UsernameRef string `yaml:"username_ref,omitempty" json:"username_ref,omitempty"` Password Secret `yaml:"password,omitempty" json:"password,omitempty"` PasswordFile string `yaml:"password_file,omitempty" json:"password_file,omitempty"` + // PasswordRef is the name of the secret within the secret manager to use as the password. + PasswordRef string `yaml:"password_ref,omitempty" json:"password_ref,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -148,6 +153,8 @@ type Authorization struct { Type string `yaml:"type,omitempty" json:"type,omitempty"` Credentials Secret `yaml:"credentials,omitempty" json:"credentials,omitempty"` CredentialsFile string `yaml:"credentials_file,omitempty" json:"credentials_file,omitempty"` + // CredentialsRef is the name of the secret within the secret manager to use as credentials. + CredentialsRef string `yaml:"credentials_ref,omitempty" json:"credentials_ref,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -224,14 +231,17 @@ func (u URL) MarshalJSON() ([]byte, error) { // OAuth2 is the oauth2 client configuration. type OAuth2 struct { - ClientID string `yaml:"client_id" json:"client_id"` - ClientSecret Secret `yaml:"client_secret" json:"client_secret"` - ClientSecretFile string `yaml:"client_secret_file" json:"client_secret_file"` - Scopes []string `yaml:"scopes,omitempty" json:"scopes,omitempty"` - TokenURL string `yaml:"token_url" json:"token_url"` - EndpointParams map[string]string `yaml:"endpoint_params,omitempty" json:"endpoint_params,omitempty"` - TLSConfig TLSConfig `yaml:"tls_config,omitempty"` - ProxyConfig `yaml:",inline"` + ClientID string `yaml:"client_id" json:"client_id"` + ClientSecret Secret `yaml:"client_secret" json:"client_secret"` + ClientSecretFile string `yaml:"client_secret_file" json:"client_secret_file"` + // ClientSecretRef is the name of the secret within the secret manager to use as the client + // secret. + ClientSecretRef string `yaml:"client_secret_ref" json:"client_secret_ref"` + Scopes []string `yaml:"scopes,omitempty" json:"scopes,omitempty"` + TokenURL string `yaml:"token_url" json:"token_url"` + EndpointParams map[string]string `yaml:"endpoint_params,omitempty" json:"endpoint_params,omitempty"` + TLSConfig TLSConfig `yaml:"tls_config,omitempty"` + ProxyConfig `yaml:",inline"` } // UnmarshalYAML implements the yaml.Unmarshaler interface @@ -253,12 +263,12 @@ func (o *OAuth2) UnmarshalJSON(data []byte) error { } // SetDirectory joins any relative file paths with dir. -func (a *OAuth2) SetDirectory(dir string) { - if a == nil { +func (o *OAuth2) SetDirectory(dir string) { + if o == nil { return } - a.ClientSecretFile = JoinDir(dir, a.ClientSecretFile) - a.TLSConfig.SetDirectory(dir) + o.ClientSecretFile = JoinDir(dir, o.ClientSecretFile) + o.TLSConfig.SetDirectory(dir) } // LoadHTTPConfig parses the YAML input s into a HTTPClientConfig. @@ -311,6 +321,9 @@ type HTTPClientConfig struct { EnableHTTP2 bool `yaml:"enable_http2" json:"enable_http2"` // Proxy configuration. ProxyConfig `yaml:",inline"` + // HTTPHeaders specify headers to inject in the requests. Those headers + // could be marshalled back to the users. + HTTPHeaders *Headers `yaml:"http_headers,omitempty" json:"http_headers,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -322,9 +335,22 @@ func (c *HTTPClientConfig) SetDirectory(dir string) { c.BasicAuth.SetDirectory(dir) c.Authorization.SetDirectory(dir) c.OAuth2.SetDirectory(dir) + c.HTTPHeaders.SetDirectory(dir) c.BearerTokenFile = JoinDir(dir, c.BearerTokenFile) } +// nonZeroCount returns the amount of values that are non-zero. +func nonZeroCount[T comparable](values ...T) int { + count := 0 + var zero T + for _, value := range values { + if value != zero { + count += 1 + } + } + return count +} + // Validate validates the HTTPClientConfig to check only one of BearerToken, // BasicAuth and BearerTokenFile is configured. It also validates that ProxyURL // is set if ProxyConnectHeader is set. @@ -336,17 +362,17 @@ func (c *HTTPClientConfig) Validate() error { if (c.BasicAuth != nil || c.OAuth2 != nil) && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) { return fmt.Errorf("at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured") } - if c.BasicAuth != nil && (string(c.BasicAuth.Username) != "" && c.BasicAuth.UsernameFile != "") { - return fmt.Errorf("at most one of basic_auth username & username_file must be configured") + if c.BasicAuth != nil && nonZeroCount(string(c.BasicAuth.Username) != "", c.BasicAuth.UsernameFile != "", c.BasicAuth.UsernameRef != "") > 1 { + return fmt.Errorf("at most one of basic_auth username, username_file & username_ref must be configured") } - if c.BasicAuth != nil && (string(c.BasicAuth.Password) != "" && c.BasicAuth.PasswordFile != "") { - return fmt.Errorf("at most one of basic_auth password & password_file must be configured") + if c.BasicAuth != nil && nonZeroCount(string(c.BasicAuth.Password) != "", c.BasicAuth.PasswordFile != "", c.BasicAuth.PasswordRef != "") > 1 { + return fmt.Errorf("at most one of basic_auth password, password_file & password_ref must be configured") } if c.Authorization != nil { if len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0 { return fmt.Errorf("authorization is not compatible with bearer_token & bearer_token_file") } - if string(c.Authorization.Credentials) != "" && c.Authorization.CredentialsFile != "" { + if nonZeroCount(string(c.Authorization.Credentials) != "", c.Authorization.CredentialsFile != "", c.Authorization.CredentialsRef != "") > 1 { return fmt.Errorf("at most one of authorization credentials & credentials_file must be configured") } c.Authorization.Type = strings.TrimSpace(c.Authorization.Type) @@ -381,13 +407,18 @@ func (c *HTTPClientConfig) Validate() error { if len(c.OAuth2.TokenURL) == 0 { return fmt.Errorf("oauth2 token_url must be configured") } - if len(c.OAuth2.ClientSecret) > 0 && len(c.OAuth2.ClientSecretFile) > 0 { - return fmt.Errorf("at most one of oauth2 client_secret & client_secret_file must be configured") + if nonZeroCount(len(c.OAuth2.ClientSecret) > 0, len(c.OAuth2.ClientSecretFile) > 0, len(c.OAuth2.ClientSecretRef) > 0) > 1 { + return fmt.Errorf("at most one of oauth2 client_secret, client_secret_file & client_secret_ref must be configured") } } if err := c.ProxyConfig.Validate(); err != nil { return err } + if c.HTTPHeaders != nil { + if err := c.HTTPHeaders.Validate(); err != nil { + return err + } + } return nil } @@ -428,50 +459,78 @@ type httpClientOptions struct { idleConnTimeout time.Duration userAgent string host string + secretManager SecretManager } // HTTPClientOption defines an option that can be applied to the HTTP client. -type HTTPClientOption func(options *httpClientOptions) +type HTTPClientOption interface { + applyToHTTPClientOptions(options *httpClientOptions) +} + +type httpClientOptionFunc func(options *httpClientOptions) + +func (f httpClientOptionFunc) applyToHTTPClientOptions(options *httpClientOptions) { + f(options) +} // WithDialContextFunc allows you to override func gets used for the actual dialing. The default is `net.Dialer.DialContext`. func WithDialContextFunc(fn DialContextFunc) HTTPClientOption { - return func(opts *httpClientOptions) { + return httpClientOptionFunc(func(opts *httpClientOptions) { opts.dialContextFunc = fn - } + }) } // WithKeepAlivesDisabled allows to disable HTTP keepalive. func WithKeepAlivesDisabled() HTTPClientOption { - return func(opts *httpClientOptions) { + return httpClientOptionFunc(func(opts *httpClientOptions) { opts.keepAlivesEnabled = false - } + }) } // WithHTTP2Disabled allows to disable HTTP2. func WithHTTP2Disabled() HTTPClientOption { - return func(opts *httpClientOptions) { + return httpClientOptionFunc(func(opts *httpClientOptions) { opts.http2Enabled = false - } + }) } // WithIdleConnTimeout allows setting the idle connection timeout. func WithIdleConnTimeout(timeout time.Duration) HTTPClientOption { - return func(opts *httpClientOptions) { + return httpClientOptionFunc(func(opts *httpClientOptions) { opts.idleConnTimeout = timeout - } + }) } // WithUserAgent allows setting the user agent. func WithUserAgent(ua string) HTTPClientOption { - return func(opts *httpClientOptions) { + return httpClientOptionFunc(func(opts *httpClientOptions) { opts.userAgent = ua - } + }) } // WithHost allows setting the host header. func WithHost(host string) HTTPClientOption { - return func(opts *httpClientOptions) { + return httpClientOptionFunc(func(opts *httpClientOptions) { opts.host = host + }) +} + +type secretManagerOption struct { + secretManager SecretManager +} + +func (s *secretManagerOption) applyToHTTPClientOptions(opts *httpClientOptions) { + opts.secretManager = s.secretManager +} + +func (s *secretManagerOption) applyToTLSConfigOptions(opts *tlsConfigOptions) { + opts.secretManager = s.secretManager +} + +// WithSecretManager allows setting the secret manager. +func WithSecretManager(manager SecretManager) *secretManagerOption { + return &secretManagerOption{ + secretManager: manager, } } @@ -501,9 +560,16 @@ func NewClientFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HTTPClie // given config.HTTPClientConfig and config.HTTPClientOption. // The name is used as go-conntrack metric label. func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HTTPClientOption) (http.RoundTripper, error) { + return NewRoundTripperFromConfigWithContext(context.Background(), cfg, name, optFuncs...) +} + +// NewRoundTripperFromConfigWithContext returns a new HTTP RoundTripper configured for the +// given config.HTTPClientConfig and config.HTTPClientOption. +// The name is used as go-conntrack metric label. +func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientConfig, name string, optFuncs ...HTTPClientOption) (http.RoundTripper, error) { opts := defaultHTTPClientOptions - for _, f := range optFuncs { - f(&opts) + for _, opt := range optFuncs { + opt.applyToHTTPClientOptions(&opts) } var dialContext func(ctx context.Context, network, addr string) (net.Conn, error) @@ -551,25 +617,45 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT // If a authorization_credentials is provided, create a round tripper that will set the // Authorization header correctly on each request. - if cfg.Authorization != nil && len(cfg.Authorization.CredentialsFile) > 0 { - rt = NewAuthorizationCredentialsFileRoundTripper(cfg.Authorization.Type, cfg.Authorization.CredentialsFile, rt) - } else if cfg.Authorization != nil { - rt = NewAuthorizationCredentialsRoundTripper(cfg.Authorization.Type, cfg.Authorization.Credentials, rt) + if cfg.Authorization != nil { + credentialsSecret, err := toSecret(opts.secretManager, cfg.Authorization.Credentials, cfg.Authorization.CredentialsFile, cfg.Authorization.CredentialsRef) + if err != nil { + return nil, fmt.Errorf("unable to use credentials: %w", err) + } + rt = NewAuthorizationCredentialsRoundTripper(cfg.Authorization.Type, credentialsSecret, rt) } // Backwards compatibility, be nice with importers who would not have // called Validate(). - if len(cfg.BearerToken) > 0 { - rt = NewAuthorizationCredentialsRoundTripper("Bearer", cfg.BearerToken, rt) - } else if len(cfg.BearerTokenFile) > 0 { - rt = NewAuthorizationCredentialsFileRoundTripper("Bearer", cfg.BearerTokenFile, rt) + if len(cfg.BearerToken) > 0 || len(cfg.BearerTokenFile) > 0 { + bearerSecret, err := toSecret(opts.secretManager, cfg.BearerToken, cfg.BearerTokenFile, "") + if err != nil { + return nil, fmt.Errorf("unable to use bearer token: %w", err) + } + rt = NewAuthorizationCredentialsRoundTripper("Bearer", bearerSecret, rt) } if cfg.BasicAuth != nil { - rt = NewBasicAuthRoundTripper(cfg.BasicAuth.Username, cfg.BasicAuth.Password, cfg.BasicAuth.UsernameFile, cfg.BasicAuth.PasswordFile, rt) + usernameSecret, err := toSecret(opts.secretManager, Secret(cfg.BasicAuth.Username), cfg.BasicAuth.UsernameFile, cfg.BasicAuth.UsernameRef) + if err != nil { + return nil, fmt.Errorf("unable to use username: %w", err) + } + passwordSecret, err := toSecret(opts.secretManager, cfg.BasicAuth.Password, cfg.BasicAuth.PasswordFile, cfg.BasicAuth.PasswordRef) + if err != nil { + return nil, fmt.Errorf("unable to use password: %w", err) + } + rt = NewBasicAuthRoundTripper(usernameSecret, passwordSecret, rt) } if cfg.OAuth2 != nil { - rt = NewOAuth2RoundTripper(cfg.OAuth2, rt, &opts) + clientSecret, err := toSecret(opts.secretManager, cfg.OAuth2.ClientSecret, cfg.OAuth2.ClientSecretFile, cfg.OAuth2.ClientSecretRef) + if err != nil { + return nil, fmt.Errorf("unable to use client secret: %w", err) + } + rt = NewOAuth2RoundTripper(clientSecret, cfg.OAuth2, rt, &opts) + } + + if cfg.HTTPHeaders != nil { + rt = NewHeadersRoundTripper(cfg.HTTPHeaders, rt) } if opts.userAgent != "" { @@ -584,115 +670,187 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT return rt, nil } - tlsConfig, err := NewTLSConfig(&cfg.TLSConfig) + tlsConfig, err := NewTLSConfig(&cfg.TLSConfig, WithSecretManager(opts.secretManager)) if err != nil { return nil, err } - if len(cfg.TLSConfig.CAFile) == 0 { + tlsSettings, err := cfg.TLSConfig.roundTripperSettings(opts.secretManager) + if err != nil { + return nil, err + } + if tlsSettings.CA == nil || tlsSettings.CA.Immutable() { // No need for a RoundTripper that reloads the CA file automatically. return newRT(tlsConfig) } - return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.roundTripperSettings(), newRT) + return NewTLSRoundTripperWithContext(ctx, tlsConfig, tlsSettings, newRT) } -type authorizationCredentialsRoundTripper struct { - authType string - authCredentials Secret - rt http.RoundTripper +// SecretManager manages secret data mapped to names known as "references" or "refs". +type SecretManager interface { + // Fetch returns the secret data given a secret name indicated by `secretRef`. + Fetch(ctx context.Context, secretRef string) (string, error) } -// NewAuthorizationCredentialsRoundTripper adds the provided credentials to a -// request unless the authorization header has already been set. -func NewAuthorizationCredentialsRoundTripper(authType string, authCredentials Secret, rt http.RoundTripper) http.RoundTripper { - return &authorizationCredentialsRoundTripper{authType, authCredentials, rt} +type SecretReader interface { + Fetch(ctx context.Context) (string, error) + Description() string + Immutable() bool } -func (rt *authorizationCredentialsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get("Authorization")) == 0 { - req = cloneRequest(req) - req.Header.Set("Authorization", fmt.Sprintf("%s %s", rt.authType, string(rt.authCredentials))) +type InlineSecret struct { + text string +} + +func NewInlineSecret(text string) *InlineSecret { + return &InlineSecret{text: text} +} + +func (s *InlineSecret) Fetch(context.Context) (string, error) { + return s.text, nil +} + +func (s *InlineSecret) Description() string { + return "inline" +} + +func (s *InlineSecret) Immutable() bool { + return true +} + +type FileSecret struct { + file string +} + +func NewFileSecret(file string) *FileSecret { + return &FileSecret{file: file} +} + +func (s *FileSecret) Fetch(ctx context.Context) (string, error) { + fileBytes, err := os.ReadFile(s.file) + if err != nil { + return "", fmt.Errorf("unable to read file %s: %w", s.file, err) } - return rt.rt.RoundTrip(req) + return strings.TrimSpace(string(fileBytes)), nil } -func (rt *authorizationCredentialsRoundTripper) CloseIdleConnections() { - if ci, ok := rt.rt.(closeIdler); ok { - ci.CloseIdleConnections() +func (s *FileSecret) Description() string { + return fmt.Sprintf("file %s", s.file) +} + +func (s *FileSecret) Immutable() bool { + return false +} + +// refSecret fetches a single secret from a SecretManager. +type refSecret struct { + ref string + manager SecretManager // manager is expected to be not nil. +} + +func (s *refSecret) Fetch(ctx context.Context) (string, error) { + return s.manager.Fetch(ctx, s.ref) +} + +func (s *refSecret) Description() string { + return fmt.Sprintf("ref %s", s.ref) +} + +func (s *refSecret) Immutable() bool { + return false +} + +// toSecret returns a SecretReader from one of the given sources, assuming exactly +// one or none of the sources are provided. +func toSecret(secretManager SecretManager, text Secret, file, ref string) (SecretReader, error) { + if text != "" { + return NewInlineSecret(string(text)), nil + } + if file != "" { + return NewFileSecret(file), nil } + if ref != "" { + if secretManager == nil { + return nil, errors.New("cannot use secret ref without manager") + } + return &refSecret{ + ref: ref, + manager: secretManager, + }, nil + } + return nil, nil } -type authorizationCredentialsFileRoundTripper struct { - authType string - authCredentialsFile string - rt http.RoundTripper +type authorizationCredentialsRoundTripper struct { + authType string + authCredentials SecretReader + rt http.RoundTripper } -// NewAuthorizationCredentialsFileRoundTripper adds the authorization -// credentials read from the provided file to a request unless the authorization -// header has already been set. This file is read for every request. -func NewAuthorizationCredentialsFileRoundTripper(authType, authCredentialsFile string, rt http.RoundTripper) http.RoundTripper { - return &authorizationCredentialsFileRoundTripper{authType, authCredentialsFile, rt} +// NewAuthorizationCredentialsRoundTripper adds the authorization credentials +// read from the provided SecretReader to a request unless the authorization header +// has already been set. +func NewAuthorizationCredentialsRoundTripper(authType string, authCredentials SecretReader, rt http.RoundTripper) http.RoundTripper { + return &authorizationCredentialsRoundTripper{authType, authCredentials, rt} } -func (rt *authorizationCredentialsFileRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get("Authorization")) == 0 { - b, err := os.ReadFile(rt.authCredentialsFile) +func (rt *authorizationCredentialsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) != 0 { + return rt.rt.RoundTrip(req) + } + + var authCredentials string + if rt.authCredentials != nil { + var err error + authCredentials, err = rt.authCredentials.Fetch(req.Context()) if err != nil { - return nil, fmt.Errorf("unable to read authorization credentials file %s: %w", rt.authCredentialsFile, err) + return nil, fmt.Errorf("unable to read authorization credentials: %w", err) } - authCredentials := strings.TrimSpace(string(b)) - - req = cloneRequest(req) - req.Header.Set("Authorization", fmt.Sprintf("%s %s", rt.authType, authCredentials)) } + req = cloneRequest(req) + req.Header.Set("Authorization", fmt.Sprintf("%s %s", rt.authType, authCredentials)) + return rt.rt.RoundTrip(req) } -func (rt *authorizationCredentialsFileRoundTripper) CloseIdleConnections() { +func (rt *authorizationCredentialsRoundTripper) CloseIdleConnections() { if ci, ok := rt.rt.(closeIdler); ok { ci.CloseIdleConnections() } } type basicAuthRoundTripper struct { - username string - password Secret - usernameFile string - passwordFile string - rt http.RoundTripper + username SecretReader + password SecretReader + rt http.RoundTripper } // NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a request unless it has // already been set. -func NewBasicAuthRoundTripper(username string, password Secret, usernameFile, passwordFile string, rt http.RoundTripper) http.RoundTripper { - return &basicAuthRoundTripper{username, password, usernameFile, passwordFile, rt} +func NewBasicAuthRoundTripper(username SecretReader, password SecretReader, rt http.RoundTripper) http.RoundTripper { + return &basicAuthRoundTripper{username, password, rt} } func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - var username string - var password string if len(req.Header.Get("Authorization")) != 0 { return rt.rt.RoundTrip(req) } - if rt.usernameFile != "" { - usernameBytes, err := os.ReadFile(rt.usernameFile) + var username string + var password string + if rt.username != nil { + var err error + username, err = rt.username.Fetch(req.Context()) if err != nil { - return nil, fmt.Errorf("unable to read basic auth username file %s: %w", rt.usernameFile, err) + return nil, fmt.Errorf("unable to read basic auth username: %w", err) } - username = strings.TrimSpace(string(usernameBytes)) - } else { - username = rt.username } - if rt.passwordFile != "" { - passwordBytes, err := os.ReadFile(rt.passwordFile) + if rt.password != nil { + var err error + password, err = rt.password.Fetch(req.Context()) if err != nil { - return nil, fmt.Errorf("unable to read basic auth password file %s: %w", rt.passwordFile, err) + return nil, fmt.Errorf("unable to read basic auth password: %w", err) } - password = strings.TrimSpace(string(passwordBytes)) - } else { - password = string(rt.password) } req = cloneRequest(req) req.SetBasicAuth(username, password) @@ -706,104 +864,118 @@ func (rt *basicAuthRoundTripper) CloseIdleConnections() { } type oauth2RoundTripper struct { - config *OAuth2 - rt http.RoundTripper - next http.RoundTripper - secret string - mtx sync.RWMutex - opts *httpClientOptions - client *http.Client + mtx sync.RWMutex + lastRT *oauth2.Transport + lastSecret string + + // Required for interaction with Oauth2 server. + config *OAuth2 + clientSecret SecretReader + opts *httpClientOptions + client *http.Client } -func NewOAuth2RoundTripper(config *OAuth2, next http.RoundTripper, opts *httpClientOptions) http.RoundTripper { +func NewOAuth2RoundTripper(clientSecret SecretReader, config *OAuth2, next http.RoundTripper, opts *httpClientOptions) http.RoundTripper { + if clientSecret == nil { + clientSecret = NewInlineSecret("") + } + return &oauth2RoundTripper{ config: config, - next: next, - opts: opts, + // A correct tokenSource will be added later on. + lastRT: &oauth2.Transport{Base: next}, + opts: opts, + clientSecret: clientSecret, } } -func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - var ( - secret string - changed bool - ) +func (rt *oauth2RoundTripper) newOauth2TokenSource(req *http.Request, secret string) (client *http.Client, source oauth2.TokenSource, err error) { + tlsConfig, err := NewTLSConfig(&rt.config.TLSConfig, WithSecretManager(rt.opts.secretManager)) + if err != nil { + return nil, nil, err + } + + tlsTransport := func(tlsConfig *tls.Config) (http.RoundTripper, error) { + return &http.Transport{ + TLSClientConfig: tlsConfig, + Proxy: rt.config.ProxyConfig.Proxy(), + ProxyConnectHeader: rt.config.ProxyConfig.GetProxyConnectHeader(), + DisableKeepAlives: !rt.opts.keepAlivesEnabled, + MaxIdleConns: 20, + MaxIdleConnsPerHost: 1, // see https://github.com/golang/go/issues/13801 + IdleConnTimeout: 10 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, nil + } - if rt.config.ClientSecretFile != "" { - data, err := os.ReadFile(rt.config.ClientSecretFile) + var t http.RoundTripper + tlsSettings, err := rt.config.TLSConfig.roundTripperSettings(rt.opts.secretManager) + if err != nil { + return nil, nil, err + } + if tlsSettings.CA == nil || tlsSettings.CA.Immutable() { + t, _ = tlsTransport(tlsConfig) + } else { + t, err = NewTLSRoundTripperWithContext(req.Context(), tlsConfig, tlsSettings, tlsTransport) if err != nil { - return nil, fmt.Errorf("unable to read oauth2 client secret file %s: %w", rt.config.ClientSecretFile, err) + return nil, nil, err } - secret = strings.TrimSpace(string(data)) - rt.mtx.RLock() - changed = secret != rt.secret - rt.mtx.RUnlock() - } else { - // Either an inline secret or nothing (use an empty string) was provided. - secret = string(rt.config.ClientSecret) } - if changed || rt.rt == nil { - config := &clientcredentials.Config{ - ClientID: rt.config.ClientID, - ClientSecret: secret, - Scopes: rt.config.Scopes, - TokenURL: rt.config.TokenURL, - EndpointParams: mapToValues(rt.config.EndpointParams), - } + if ua := req.UserAgent(); ua != "" { + t = NewUserAgentRoundTripper(ua, t) + } - tlsConfig, err := NewTLSConfig(&rt.config.TLSConfig) - if err != nil { - return nil, err - } + config := &clientcredentials.Config{ + ClientID: rt.config.ClientID, + ClientSecret: secret, + Scopes: rt.config.Scopes, + TokenURL: rt.config.TokenURL, + EndpointParams: mapToValues(rt.config.EndpointParams), + } + client = &http.Client{Transport: t} + ctx := context.WithValue(context.Background(), oauth2.HTTPClient, client) + return client, config.TokenSource(ctx), nil +} - tlsTransport := func(tlsConfig *tls.Config) (http.RoundTripper, error) { - return &http.Transport{ - TLSClientConfig: tlsConfig, - Proxy: rt.config.ProxyConfig.Proxy(), - ProxyConnectHeader: rt.config.ProxyConfig.GetProxyConnectHeader(), - DisableKeepAlives: !rt.opts.keepAlivesEnabled, - MaxIdleConns: 20, - MaxIdleConnsPerHost: 1, // see https://github.com/golang/go/issues/13801 - IdleConnTimeout: 10 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - }, nil - } +func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + var ( + secret string + needsInit bool + ) - var t http.RoundTripper - if len(rt.config.TLSConfig.CAFile) == 0 { - t, _ = tlsTransport(tlsConfig) - } else { - t, err = NewTLSRoundTripper(tlsConfig, rt.config.TLSConfig.roundTripperSettings(), tlsTransport) + rt.mtx.RLock() + secret = rt.lastSecret + needsInit = rt.lastRT.Source == nil + rt.mtx.RUnlock() + + // Fetch the secret if it's our first run or always if the secret can change. + if !rt.clientSecret.Immutable() || needsInit { + newSecret, err := rt.clientSecret.Fetch(req.Context()) + if err != nil { + return nil, fmt.Errorf("unable to read oauth2 client secret: %w", err) + } + if newSecret != secret || needsInit { + // Secret changed or it's a first run. Rebuilt oauth2 setup. + client, source, err := rt.newOauth2TokenSource(req, newSecret) if err != nil { return nil, err } - } - - if ua := req.UserAgent(); ua != "" { - t = NewUserAgentRoundTripper(ua, t) - } - client := &http.Client{Transport: t} - ctx := context.WithValue(context.Background(), oauth2.HTTPClient, client) - tokenSource := config.TokenSource(ctx) - - rt.mtx.Lock() - rt.secret = secret - rt.rt = &oauth2.Transport{ - Base: rt.next, - Source: tokenSource, - } - if rt.client != nil { - rt.client.CloseIdleConnections() + rt.mtx.Lock() + rt.lastSecret = secret + rt.lastRT.Source = source + if rt.client != nil { + rt.client.CloseIdleConnections() + } + rt.client = client + rt.mtx.Unlock() } - rt.client = client - rt.mtx.Unlock() } rt.mtx.RLock() - currentRT := rt.rt + currentRT := rt.lastRT rt.mtx.RUnlock() return currentRT.RoundTrip(req) } @@ -812,7 +984,7 @@ func (rt *oauth2RoundTripper) CloseIdleConnections() { if rt.client != nil { rt.client.CloseIdleConnections() } - if ci, ok := rt.next.(closeIdler); ok { + if ci, ok := rt.lastRT.Base.(closeIdler); ok { ci.CloseIdleConnections() } } @@ -840,8 +1012,27 @@ func cloneRequest(r *http.Request) *http.Request { return r2 } +type tlsConfigOptions struct { + secretManager SecretManager +} + +// TLSConfigOption defines an option that can be applied to the HTTP client. +type TLSConfigOption interface { + applyToTLSConfigOptions(options *tlsConfigOptions) +} + // NewTLSConfig creates a new tls.Config from the given TLSConfig. -func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { +func NewTLSConfig(cfg *TLSConfig, optFuncs ...TLSConfigOption) (*tls.Config, error) { + return NewTLSConfigWithContext(context.Background(), cfg, optFuncs...) +} + +// NewTLSConfigWithContext creates a new tls.Config from the given TLSConfig. +func NewTLSConfigWithContext(ctx context.Context, cfg *TLSConfig, optFuncs ...TLSConfigOption) (*tls.Config, error) { + opts := tlsConfigOptions{} + for _, opt := range optFuncs { + opt.applyToTLSConfigOptions(&opts) + } + if err := cfg.Validate(); err != nil { return nil, err } @@ -860,17 +1051,17 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { // If a CA cert is provided then let's read it in so we can validate the // scrape target's certificate properly. - if len(cfg.CA) > 0 { - if !updateRootCA(tlsConfig, []byte(cfg.CA)) { - return nil, fmt.Errorf("unable to use inline CA cert") - } - } else if len(cfg.CAFile) > 0 { - b, err := readCAFile(cfg.CAFile) + caSecret, err := toSecret(opts.secretManager, Secret(cfg.CA), cfg.CAFile, cfg.CARef) + if err != nil { + return nil, fmt.Errorf("unable to use CA cert: %w", err) + } + if caSecret != nil { + ca, err := caSecret.Fetch(ctx) if err != nil { - return nil, err + return nil, fmt.Errorf("unable to read CA cert: %w", err) } - if !updateRootCA(tlsConfig, b) { - return nil, fmt.Errorf("unable to use specified CA cert %s", cfg.CAFile) + if !updateRootCA(tlsConfig, []byte(ca)) { + return nil, fmt.Errorf("unable to use specified CA cert %s", caSecret.Description()) } } @@ -881,10 +1072,16 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { // If a client cert & key is provided then configure TLS config accordingly. if cfg.usingClientCert() && cfg.usingClientKey() { // Verify that client cert and key are valid. - if _, err := cfg.getClientCertificate(nil); err != nil { + if _, err := cfg.getClientCertificate(ctx, opts.secretManager); err != nil { return nil, err } - tlsConfig.GetClientCertificate = cfg.getClientCertificate + tlsConfig.GetClientCertificate = func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var ctx context.Context + if cri != nil { + ctx = cri.Context() + } + return cfg.getClientCertificate(ctx, opts.secretManager) + } } return tlsConfig, nil @@ -904,6 +1101,15 @@ type TLSConfig struct { CertFile string `yaml:"cert_file,omitempty" json:"cert_file,omitempty"` // The client key file for the targets. KeyFile string `yaml:"key_file,omitempty" json:"key_file,omitempty"` + // CARef is the name of the secret within the secret manager to use as the CA cert for the + // targets. + CARef string `yaml:"ca_ref,omitempty" json:"ca_ref,omitempty"` + // CertRef is the name of the secret within the secret manager to use as the client cert for + // the targets. + CertRef string `yaml:"cert_ref,omitempty" json:"cert_ref,omitempty"` + // KeyRef is the name of the secret within the secret manager to use as the client key for + // the targets. + KeyRef string `yaml:"key_ref,omitempty" json:"key_ref,omitempty"` // Used to verify the hostname for the targets. ServerName string `yaml:"server_name,omitempty" json:"server_name,omitempty"` // Disable target certificate validation. @@ -937,13 +1143,13 @@ func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // file-based fields for the TLS CA, client certificate, and client key are // used. func (c *TLSConfig) Validate() error { - if len(c.CA) > 0 && len(c.CAFile) > 0 { - return fmt.Errorf("at most one of ca and ca_file must be configured") + if nonZeroCount(len(c.CA) > 0, len(c.CAFile) > 0, len(c.CARef) > 0) > 1 { + return fmt.Errorf("at most one of ca, ca_file & ca_ref must be configured") } - if len(c.Cert) > 0 && len(c.CertFile) > 0 { - return fmt.Errorf("at most one of cert and cert_file must be configured") + if nonZeroCount(len(c.Cert) > 0, len(c.CertFile) > 0, len(c.CertRef) > 0) > 1 { + return fmt.Errorf("at most one of cert, cert_file & cert_ref must be configured") } - if len(c.Key) > 0 && len(c.KeyFile) > 0 { + if nonZeroCount(len(c.Key) > 0, len(c.KeyFile) > 0, len(c.KeyRef) > 0) > 1 { return fmt.Errorf("at most one of key and key_file must be configured") } @@ -957,66 +1163,70 @@ func (c *TLSConfig) Validate() error { } func (c *TLSConfig) usingClientCert() bool { - return len(c.Cert) > 0 || len(c.CertFile) > 0 + return len(c.Cert) > 0 || len(c.CertFile) > 0 || len(c.CertRef) > 0 } func (c *TLSConfig) usingClientKey() bool { - return len(c.Key) > 0 || len(c.KeyFile) > 0 + return len(c.Key) > 0 || len(c.KeyFile) > 0 || len(c.KeyRef) > 0 } -func (c *TLSConfig) roundTripperSettings() TLSRoundTripperSettings { - return TLSRoundTripperSettings{ - CA: c.CA, - CAFile: c.CAFile, - Cert: c.Cert, - CertFile: c.CertFile, - Key: string(c.Key), - KeyFile: c.KeyFile, +func (c *TLSConfig) roundTripperSettings(secretManager SecretManager) (TLSRoundTripperSettings, error) { + ca, err := toSecret(secretManager, Secret(c.CA), c.CAFile, c.CARef) + if err != nil { + return TLSRoundTripperSettings{}, err + } + cert, err := toSecret(secretManager, Secret(c.Cert), c.CertFile, c.CertRef) + if err != nil { + return TLSRoundTripperSettings{}, err + } + key, err := toSecret(secretManager, c.Key, c.KeyFile, c.KeyRef) + if err != nil { + return TLSRoundTripperSettings{}, err } + return TLSRoundTripperSettings{ + CA: ca, + Cert: cert, + Key: key, + }, nil } -// getClientCertificate reads the pair of client cert and key from disk and returns a tls.Certificate. -func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) { +// getClientCertificate reads the pair of client cert and key and returns a tls.Certificate. +func (c *TLSConfig) getClientCertificate(ctx context.Context, secretManager SecretManager) (*tls.Certificate, error) { var ( - certData, keyData []byte + certData, keyData string err error ) - if c.CertFile != "" { - certData, err = os.ReadFile(c.CertFile) + certSecret, err := toSecret(secretManager, Secret(c.Cert), c.CertFile, c.CertRef) + if err != nil { + return nil, fmt.Errorf("unable to use client cert: %w", err) + } + if certSecret != nil { + certData, err = certSecret.Fetch(ctx) if err != nil { - return nil, fmt.Errorf("unable to read specified client cert (%s): %w", c.CertFile, err) + return nil, fmt.Errorf("unable to read specified client cert: %w", err) } - } else { - certData = []byte(c.Cert) } - if c.KeyFile != "" { - keyData, err = os.ReadFile(c.KeyFile) + keySecret, err := toSecret(secretManager, Secret(c.Key), c.KeyFile, c.KeyRef) + if err != nil { + return nil, fmt.Errorf("unable to use client key: %w", err) + } + if keySecret != nil { + keyData, err = keySecret.Fetch(ctx) if err != nil { - return nil, fmt.Errorf("unable to read specified client key (%s): %w", c.KeyFile, err) + return nil, fmt.Errorf("unable to read specified client key: %w", err) } - } else { - keyData = []byte(c.Key) } - cert, err := tls.X509KeyPair(certData, keyData) + cert, err := tls.X509KeyPair([]byte(certData), []byte(keyData)) if err != nil { - return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %w", c.CertFile, c.KeyFile, err) + return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %w", certSecret.Description(), keySecret.Description(), err) } return &cert, nil } -// readCAFile reads the CA cert file from disk. -func readCAFile(f string) ([]byte, error) { - data, err := os.ReadFile(f) - if err != nil { - return nil, fmt.Errorf("unable to load specified CA cert %s: %w", f, err) - } - return data, nil -} - // updateRootCA parses the given byte slice as a series of PEM encoded certificates and updates tls.Config.RootCAs. func updateRootCA(cfg *tls.Config, b []byte) bool { caCertPool := x509.NewCertPool() @@ -1044,15 +1254,24 @@ type tlsRoundTripper struct { } type TLSRoundTripperSettings struct { - CA, CAFile string - Cert, CertFile string - Key, KeyFile string + CA SecretReader + Cert SecretReader + Key SecretReader } func NewTLSRoundTripper( cfg *tls.Config, settings TLSRoundTripperSettings, newRT func(*tls.Config) (http.RoundTripper, error), +) (http.RoundTripper, error) { + return NewTLSRoundTripperWithContext(context.Background(), cfg, settings, newRT) +} + +func NewTLSRoundTripperWithContext( + ctx context.Context, + cfg *tls.Config, + settings TLSRoundTripperSettings, + newRT func(*tls.Config) (http.RoundTripper, error), ) (http.RoundTripper, error) { t := &tlsRoundTripper{ settings: settings, @@ -1065,7 +1284,7 @@ func NewTLSRoundTripper( return nil, err } t.rt = rt - _, t.hashCAData, t.hashCertData, t.hashKeyData, err = t.getTLSDataWithHash() + _, t.hashCAData, t.hashCertData, t.hashKeyData, err = t.getTLSDataWithHash(ctx) if err != nil { return nil, err } @@ -1073,38 +1292,31 @@ func NewTLSRoundTripper( return t, nil } -func (t *tlsRoundTripper) getTLSDataWithHash() ([]byte, []byte, []byte, []byte, error) { - var ( - caBytes, certBytes, keyBytes []byte - - err error - ) +func (t *tlsRoundTripper) getTLSDataWithHash(ctx context.Context) ([]byte, []byte, []byte, []byte, error) { + var caBytes, certBytes, keyBytes []byte - if t.settings.CAFile != "" { - caBytes, err = os.ReadFile(t.settings.CAFile) + if t.settings.CA != nil { + ca, err := t.settings.CA.Fetch(ctx) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("unable to read CA cert: %w", err) } - } else if t.settings.CA != "" { - caBytes = []byte(t.settings.CA) + caBytes = []byte(ca) } - if t.settings.CertFile != "" { - certBytes, err = os.ReadFile(t.settings.CertFile) + if t.settings.Cert != nil { + cert, err := t.settings.Cert.Fetch(ctx) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("unable to read client cert: %w", err) } - } else if t.settings.Cert != "" { - certBytes = []byte(t.settings.Cert) + certBytes = []byte(cert) } - if t.settings.KeyFile != "" { - keyBytes, err = os.ReadFile(t.settings.KeyFile) + if t.settings.Key != nil { + key, err := t.settings.Key.Fetch(ctx) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("unable to read client key: %w", err) } - } else if t.settings.Key != "" { - keyBytes = []byte(t.settings.Key) + keyBytes = []byte(key) } var caHash, certHash, keyHash [32]byte @@ -1124,7 +1336,7 @@ func (t *tlsRoundTripper) getTLSDataWithHash() ([]byte, []byte, []byte, []byte, // RoundTrip implements the http.RoundTrip interface. func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - caData, caHash, certHash, keyHash, err := t.getTLSDataWithHash() + caData, caHash, certHash, keyHash, err := t.getTLSDataWithHash(req.Context()) if err != nil { return nil, err } @@ -1145,7 +1357,7 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { // using GetClientCertificate. tlsConfig := t.tlsConfig.Clone() if !updateRootCA(tlsConfig, caData) { - return nil, fmt.Errorf("unable to use specified CA cert %s", t.settings.CAFile) + return nil, fmt.Errorf("unable to use specified CA cert %s", t.settings.CA.Description()) } rt, err = t.newRT(tlsConfig) if err != nil { @@ -1236,7 +1448,7 @@ type ProxyConfig struct { // proxies during CONNECT requests. Assume that at least _some_ of // these headers are going to contain secrets and use Secret as the // value type instead of string. - ProxyConnectHeader Header `yaml:"proxy_connect_header,omitempty" json:"proxy_connect_header,omitempty"` + ProxyConnectHeader ProxyHeader `yaml:"proxy_connect_header,omitempty" json:"proxy_connect_header,omitempty"` proxyFunc func(*http.Request) (*url.URL, error) } diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 7f6cbe7d298..ff5ef7a9d92 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -21,9 +21,10 @@ import ( "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" "github.com/prometheus/common/model" + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" ) diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d58d..00000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index a21b9d15dd8..00000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index c24864a9273..126df9e67ac 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,9 +1,16 @@ --- linters: enable: + - errcheck - godot + - gosimple + - govet + - ineffassign - misspell - revive + - staticcheck + - testifylint + - unused linter-settings: godot: diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 0e9ace29b40..16172923506 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.56.2 +GOLANGCI_LINT_VERSION ?= v1.59.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go deleted file mode 100644 index a77b4dbb762..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go +++ /dev/null @@ -1,92 +0,0 @@ -//go:build aix && !cgo -// +build aix,!cgo - -package cpu - -import ( - "context" - "strconv" - "strings" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { - if percpu { - return []TimesStat{}, common.ErrNotImplementedError - } else { - out, err := invoke.CommandWithContext(ctx, "sar", "-u", "10", "1") - if err != nil { - return nil, err - } - lines := strings.Split(string(out), "\n") - if len(lines) < 5 { - return []TimesStat{}, common.ErrNotImplementedError - } - - ret := TimesStat{CPU: "cpu-total"} - h := strings.Fields(lines[len(lines)-3]) // headers - v := strings.Fields(lines[len(lines)-2]) // values - for i, header := range h { - if t, err := strconv.ParseFloat(v[i], 64); err == nil { - switch header { - case `%usr`: - ret.User = t - case `%sys`: - ret.System = t - case `%wio`: - ret.Iowait = t - case `%idle`: - ret.Idle = t - } - } - } - - return []TimesStat{ret}, nil - } -} - -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { - out, err := invoke.CommandWithContext(ctx, "prtconf") - if err != nil { - return nil, err - } - - ret := InfoStat{} - for _, line := range strings.Split(string(out), "\n") { - if strings.HasPrefix(line, "Number Of Processors:") { - p := strings.Fields(line) - if len(p) > 3 { - if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { - ret.Cores = int32(t) - } - } - } else if strings.HasPrefix(line, "Processor Clock Speed:") { - p := strings.Fields(line) - if len(p) > 4 { - if t, err := strconv.ParseFloat(p[3], 64); err == nil { - switch strings.ToUpper(p[4]) { - case "MHZ": - ret.Mhz = t - case "GHZ": - ret.Mhz = t * 1000.0 - case "KHZ": - ret.Mhz = t / 1000.0 - default: - ret.Mhz = t - } - } - } - break - } - } - return []InfoStat{ret}, nil -} - -func CountsWithContext(ctx context.Context, logical bool) (int, error) { - info, err := InfoWithContext(ctx) - if err == nil { - return int(info[0].Cores), nil - } - return 0, err -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go deleted file mode 100644 index bd5c9587137..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !go1.16 -// +build !go1.16 - -package net - -import ( - "os" -) - -func readDir(f *os.File, max int) ([]os.FileInfo, error) { - return f.Readdir(max) -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go deleted file mode 100644 index a45072e924a..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package net - -import ( - "os" -) - -func readDir(f *os.File, max int) ([]os.DirEntry, error) { - return f.ReadDir(max) -} diff --git a/vendor/github.com/shirou/gopsutil/v3/LICENSE b/vendor/github.com/shirou/gopsutil/v4/LICENSE similarity index 100% rename from vendor/github.com/shirou/gopsutil/v3/LICENSE rename to vendor/github.com/shirou/gopsutil/v4/LICENSE diff --git a/vendor/github.com/shirou/gopsutil/v3/common/env.go b/vendor/github.com/shirou/gopsutil/v4/common/env.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/common/env.go rename to vendor/github.com/shirou/gopsutil/v4/common/env.go index 4b5f4980c21..4acad1fd1e8 100644 --- a/vendor/github.com/shirou/gopsutil/v3/common/env.go +++ b/vendor/github.com/shirou/gopsutil/v4/common/env.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common type EnvKeyType string diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go index 83bc23d45ed..56f53c3a1ac 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( @@ -11,7 +12,7 @@ import ( "sync" "time" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) // TimesStat contains the amounts of time the CPU has spent performing different diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go similarity index 85% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go index 1439d1d7939..bc766bd4fe9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package cpu diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go index 9c1e70b1730..559dc5feafd 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package cpu diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go new file mode 100644 index 00000000000..329ef833666 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix && !cgo + +package cpu + +import ( + "context" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + var ret []TimesStat + if percpu { + per_out, err := invoke.CommandWithContext(ctx, "sar", "-u", "-P", "ALL", "10", "1") + if err != nil { + return nil, err + } + lines := strings.Split(string(per_out), "\n") + if len(lines) < 6 { + return []TimesStat{}, common.ErrNotImplementedError + } + + hp := strings.Fields(lines[5]) // headers + for l := 6; l < len(lines)-1; l++ { + ct := &TimesStat{} + v := strings.Fields(lines[l]) // values + for i, header := range hp { + // We're done in any of these use cases + if i >= len(v) || v[0] == "-" { + break + } + + // Position variable for v + pos := i + // There is a missing field at the beginning of all but the first line + // so adjust the position + if l > 6 { + pos = i - 1 + } + // We don't want invalid positions + if pos < 0 { + continue + } + + if t, err := strconv.ParseFloat(v[pos], 64); err == nil { + switch header { + case `cpu`: + ct.CPU = strconv.FormatFloat(t, 'f', -1, 64) + case `%usr`: + ct.User = t + case `%sys`: + ct.System = t + case `%wio`: + ct.Iowait = t + case `%idle`: + ct.Idle = t + } + } + } + // Valid CPU data, so append it + ret = append(ret, *ct) + } + } else { + out, err := invoke.CommandWithContext(ctx, "sar", "-u", "10", "1") + if err != nil { + return nil, err + } + lines := strings.Split(string(out), "\n") + if len(lines) < 5 { + return []TimesStat{}, common.ErrNotImplementedError + } + + ct := &TimesStat{CPU: "cpu-total"} + h := strings.Fields(lines[len(lines)-3]) // headers + v := strings.Fields(lines[len(lines)-2]) // values + for i, header := range h { + if t, err := strconv.ParseFloat(v[i], 64); err == nil { + switch header { + case `%usr`: + ct.User = t + case `%sys`: + ct.System = t + case `%wio`: + ct.Iowait = t + case `%idle`: + ct.Idle = t + } + } + } + + ret = append(ret, *ct) + } + + return ret, nil +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + out, err := invoke.CommandWithContext(ctx, "prtconf") + if err != nil { + return nil, err + } + + ret := InfoStat{} + for _, line := range strings.Split(string(out), "\n") { + if strings.HasPrefix(line, "Number Of Processors:") { + p := strings.Fields(line) + if len(p) > 3 { + if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { + ret.Cores = int32(t) + } + } + } else if strings.HasPrefix(line, "Processor Clock Speed:") { + p := strings.Fields(line) + if len(p) > 4 { + if t, err := strconv.ParseFloat(p[3], 64); err == nil { + switch strings.ToUpper(p[4]) { + case "MHZ": + ret.Mhz = t + case "GHZ": + ret.Mhz = t * 1000.0 + case "KHZ": + ret.Mhz = t / 1000.0 + default: + ret.Mhz = t + } + } + } + break + } else if strings.HasPrefix(line, "System Model:") { + p := strings.Split(string(line), ":") + if p != nil { + ret.VendorID = strings.TrimSpace(p[1]) + } + } else if strings.HasPrefix(line, "Processor Type:") { + p := strings.Split(string(line), ":") + if p != nil { + c := strings.Split(string(p[1]), "_") + if c != nil { + ret.Family = strings.TrimSpace(c[0]) + ret.Model = strings.TrimSpace(c[1]) + } + } + } + } + return []InfoStat{ret}, nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + info, err := InfoWithContext(ctx) + if err == nil { + return int(info[0].Cores), nil + } + return 0, err +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go index 41f395e5e05..79a458b8e21 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin -// +build darwin package cpu diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_cgo.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_cgo.go index 1d5f0772ed7..3a02024c5be 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin && cgo -// +build darwin,cgo package cpu diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_nocgo.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_nocgo.go index e067e99f980..1af8566a67b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_nocgo.go @@ -1,9 +1,9 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin && !cgo -// +build darwin,!cgo package cpu -import "github.com/shirou/gopsutil/v3/internal/common" +import "github.com/shirou/gopsutil/v4/internal/common" func perCPUTimes() ([]TimesStat, error) { return []TimesStat{}, common.ErrNotImplementedError diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go index fef53e5dcc1..19b1e9dd3ef 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( @@ -10,7 +11,7 @@ import ( "strings" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go index 57e14528db1..25ececa680e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go index 089f603c8fc..245c1ec98b8 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !solaris && !windows && !dragonfly && !plan9 && !aix -// +build !darwin,!linux,!freebsd,!openbsd,!netbsd,!solaris,!windows,!dragonfly,!plan9,!aix package cpu @@ -7,7 +7,7 @@ import ( "context" "runtime" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func Times(percpu bool) ([]TimesStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go index d3f47353cfc..c68d6bff0f6 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( @@ -10,7 +11,7 @@ import ( "strings" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go index 8b7f4c321eb..e4799bcf5c4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go index 57e14528db1..25ececa680e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go index 8b7f4c321eb..e4799bcf5c4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go index 57e14528db1..25ececa680e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go index da467e2dd20..f78c61a25b6 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package cpu @@ -13,7 +13,7 @@ import ( "github.com/tklauser/go-sysconf" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var ClocksPerSec = float64(100) diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go index 1f66be34255..2cda5cd2437 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package cpu @@ -9,7 +9,7 @@ import ( "runtime" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go index 57e14528db1..25ececa680e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go index 57e14528db1..25ececa680e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go index fe33290300e..33233d3c74a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package cpu @@ -9,7 +9,7 @@ import ( "runtime" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go index 5e878399a38..40a6f43e498 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go index d659058cd5e..464156d5402 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go index 5e878399a38..40a6f43e498 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go index d659058cd5e..464156d5402 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_riscv64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go index d659058cd5e..464156d5402 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_riscv64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go similarity index 91% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go index a2e99d8c05e..bff2e0c7584 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build plan9 -// +build plan9 package cpu @@ -9,7 +9,7 @@ import ( "runtime" stats "github.com/lufia/plan9stats" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func Times(percpu bool) ([]TimesStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go index 4231ad16819..d8ba1d3242e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go index e10612fd19d..4476b91cb5f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package cpu @@ -8,14 +8,12 @@ import ( "fmt" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "github.com/yusufpapurcu/wmi" "golang.org/x/sys/windows" ) -var ( - procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") -) +var procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") type win32_Processor struct { Family uint16 diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go index 5e8d43db835..6e75e74b018 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common // Copyright 2009 The Go Authors. All rights reserved. diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common.go index 5e25e507b48..642aabc5583 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common // @@ -25,7 +26,7 @@ import ( "strings" "time" - "github.com/shirou/gopsutil/v3/common" + "github.com/shirou/gopsutil/v4/common" ) var ( diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go index f1a78459777..53f9ae8d9a5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin -// +build darwin package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go index f590e2e67e6..53cdceeb6d4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || openbsd -// +build freebsd openbsd package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go index a429e16a2cc..85802dcb097 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go index efbc710a5ef..206532126c9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go index 58d76f334e2..00fa19a2fb4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go index 4af7e5c2aa8..2715b890bef 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux || freebsd || darwin || openbsd -// +build linux freebsd darwin openbsd package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go index 301b2315bb4..766ed2fcba3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go similarity index 88% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go index 147cfdc4b71..113ff2e9f42 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common import "unsafe" diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go similarity index 89% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go index 94cedfd3438..504f13ffd98 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common import ( diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go index a4aaadaf54d..888cc57faee 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common import "fmt" diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go b/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go new file mode 100644 index 00000000000..0a12fe2fe34 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux + +package mem + +import ( + "context" + "encoding/json" +) + +type ExVirtualMemory struct { + ActiveFile uint64 `json:"activefile"` + InactiveFile uint64 `json:"inactivefile"` + ActiveAnon uint64 `json:"activeanon"` + InactiveAnon uint64 `json:"inactiveanon"` + Unevictable uint64 `json:"unevictable"` +} + +func (v ExVirtualMemory) String() string { + s, _ := json.Marshal(v) + return string(s) +} + +type ExLinux struct{} + +func NewExLinux() *ExLinux { + return &ExLinux{} +} + +func (ex *ExLinux) VirtualMemory() (*ExVirtualMemory, error) { + return ex.VirtualMemoryWithContext(context.Background()) +} + +func (ex *ExLinux) VirtualMemoryWithContext(ctx context.Context) (*ExVirtualMemory, error) { + _, vmEx, err := fillFromMeminfoWithContext(ctx) + if err != nil { + return nil, err + } + return vmEx, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go b/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go new file mode 100644 index 00000000000..4f1573b3c5b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build windows + +package mem + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +// ExVirtualMemory represents Windows specific information +// https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-memorystatusex +type ExVirtualMemory struct { + VirtualTotal uint64 `json:"virtualTotal"` + VirtualAvail uint64 `json:"virtualAvail"` +} + +type ExWindows struct{} + +func NewExWindows() *ExWindows { + return &ExWindows{} +} + +func (e *ExWindows) VirtualMemory() (*ExVirtualMemory, error) { + var memInfo memoryStatusEx + memInfo.cbSize = uint32(unsafe.Sizeof(memInfo)) + mem, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))) + if mem == 0 { + return nil, windows.GetLastError() + } + + ret := &ExVirtualMemory{ + VirtualTotal: memInfo.ullTotalVirtual, + VirtualAvail: memInfo.ullAvailVirtual, + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem.go index edaf268bbf4..0da71a98863 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem.go @@ -1,9 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause package mem import ( "encoding/json" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var invoke common.Invoker = common.Invoke{} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go similarity index 86% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go index 22a6a4e9265..916bff30df3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go index 67e11dff886..2d03dd0c3f7 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go index cc6a76d2f0d..bc3c0ed3b4a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && !cgo -// +build aix,!cgo package mem @@ -8,11 +8,11 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { - vmem, swap, err := callSVMon(ctx) + vmem, swap, err := callSVMon(ctx, true) if err != nil { return nil, err } @@ -25,7 +25,7 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { } func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { - _, swap, err := callSVMon(ctx) + _, swap, err := callSVMon(ctx, false) if err != nil { return nil, err } @@ -35,7 +35,7 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { return swap, nil } -func callSVMon(ctx context.Context) (*VirtualMemoryStat, *SwapMemoryStat, error) { +func callSVMon(ctx context.Context, virt bool) (*VirtualMemoryStat, *SwapMemoryStat, error) { out, err := invoke.CommandWithContext(ctx, "svmon", "-G") if err != nil { return nil, nil, err @@ -45,7 +45,7 @@ func callSVMon(ctx context.Context) (*VirtualMemoryStat, *SwapMemoryStat, error) vmem := &VirtualMemoryStat{} swap := &SwapMemoryStat{} for _, line := range strings.Split(string(out), "\n") { - if strings.HasPrefix(line, "memory") { + if virt && strings.HasPrefix(line, "memory") { p := strings.Fields(line) if len(p) > 2 { if t, err := strconv.ParseUint(p[1], 10, 64); err == nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go index ef867d74223..4f3e57c038a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || openbsd || netbsd -// +build freebsd openbsd netbsd package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go index a05a0faba0b..a33c5f125a2 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin -// +build darwin package mem @@ -10,7 +10,7 @@ import ( "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func getHwMemsize() (uint64, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_cgo.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_cgo.go index e5da7dcdb23..cc6657d045c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin && cgo -// +build darwin,cgo package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_nocgo.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_nocgo.go index c9393168032..097a93e63e4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_nocgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin && !cgo -// +build darwin,!cgo package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go similarity index 86% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go index 697fd870963..ba882c8bee9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go @@ -1,12 +1,12 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows && !plan9 && !aix && !netbsd -// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows,!plan9,!aix,!netbsd package mem import ( "context" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemory() (*VirtualMemoryStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go index 9a56785b319..d9cae7116b2 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package mem @@ -8,7 +8,7 @@ import ( "errors" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go index 214a91e47f6..05bfdaf4e1a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go @@ -1,12 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package mem import ( "bufio" "context" - "encoding/json" "fmt" "io" "math" @@ -16,22 +15,9 @@ import ( "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) -type VirtualMemoryExStat struct { - ActiveFile uint64 `json:"activefile"` - InactiveFile uint64 `json:"inactivefile"` - ActiveAnon uint64 `json:"activeanon"` - InactiveAnon uint64 `json:"inactiveanon"` - Unevictable uint64 `json:"unevictable"` -} - -func (v VirtualMemoryExStat) String() string { - s, _ := json.Marshal(v) - return string(s) -} - func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } @@ -44,19 +30,7 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { return vm, nil } -func VirtualMemoryEx() (*VirtualMemoryExStat, error) { - return VirtualMemoryExWithContext(context.Background()) -} - -func VirtualMemoryExWithContext(ctx context.Context) (*VirtualMemoryExStat, error) { - _, vmEx, err := fillFromMeminfoWithContext(ctx) - if err != nil { - return nil, err - } - return vmEx, nil -} - -func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *VirtualMemoryExStat, error) { +func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *ExVirtualMemory, error) { filename := common.HostProcWithContext(ctx, "meminfo") lines, _ := common.ReadLines(filename) @@ -67,7 +41,7 @@ func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *Virtu sReclaimable := false // "Sreclaimable:" not available: 2.6.19 / Nov 2006 ret := &VirtualMemoryStat{} - retEx := &VirtualMemoryExStat{} + retEx := &ExVirtualMemory{} for _, line := range lines { fields := strings.Split(line, ":") @@ -409,7 +383,7 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { // calculateAvailVmem is a fallback under kernel 3.14 where /proc/meminfo does not provide // "MemAvailable:" column. It reimplements an algorithm from the link below // https://github.com/giampaolo/psutil/pull/890 -func calculateAvailVmem(ctx context.Context, ret *VirtualMemoryStat, retEx *VirtualMemoryExStat) uint64 { +func calculateAvailVmem(ctx context.Context, ret *VirtualMemoryStat, retEx *ExVirtualMemory) uint64 { var watermarkLow uint64 fn := common.HostProcWithContext(ctx, "zoneinfo") diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go index d1f54ecaf3f..0a41b3e340e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go index e37d5abe0dd..2510bb0d3aa 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package mem @@ -10,7 +10,7 @@ import ( "errors" "fmt" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go index de2b26ca40a..552e93f4a28 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && 386 -// +build openbsd,386 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go index d187abf01fa..73e5b72aa67 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go index 2488f18517c..57b5861de5b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm -// +build openbsd,arm // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go index 3661b16fb1b..f39a6456b73 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm64 -// +build openbsd,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_riscv64.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go index 7a7b480384e..f9f838f54ed 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_riscv64.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && riscv64 -// +build openbsd,riscv64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go index b5259f8446e..c17a102ee62 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build plan9 -// +build plan9 package mem @@ -8,7 +8,7 @@ import ( "os" stats "github.com/lufia/plan9stats" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func SwapMemory() (*SwapMemoryStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go index c911267e1ef..06d0d9a006b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build solaris -// +build solaris package mem @@ -11,7 +11,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "github.com/tklauser/go-sysconf" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go index 8c7fb1a1353..4666cbd01e8 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package mem @@ -9,7 +9,7 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/windows" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net.go b/vendor/github.com/shirou/gopsutil/v4/net/net.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/net/net.go rename to vendor/github.com/shirou/gopsutil/v4/net/net.go index 0f3a62f39c5..3890eda5308 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package net import ( @@ -5,7 +6,7 @@ import ( "encoding/json" "net" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var invoke common.Invoker = common.Invoke{} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix.go index 81feaa8d7a0..df59abecbe1 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package net @@ -11,7 +11,7 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func IOCounters(pernic bool) ([]IOCountersStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go index 8c34f881c0b..a45a5b75cce 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package net diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go index e3fce9021ce..f63a21e73bc 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && !cgo -// +build aix,!cgo package net @@ -9,7 +9,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func parseNetstatI(output string) ([]IOCountersStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go b/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go index 8a7b6374438..f86b7bf9e3b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin -// +build darwin package net @@ -12,7 +12,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var ( diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go b/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go index e136be1bace..e62deeeed36 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go @@ -1,12 +1,12 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !aix && !darwin && !linux && !freebsd && !openbsd && !windows && !solaris -// +build !aix,!darwin,!linux,!freebsd,!openbsd,!windows,!solaris package net import ( "context" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func IOCounters(pernic bool) ([]IOCountersStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go index bf8baf09495..155a49c4045 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package net @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func IOCounters(pernic bool) ([]IOCountersStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/net/net_linux.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_linux.go index 20ca5470a49..a46f1b9dcf3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package net @@ -16,7 +16,7 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) const ( // Conntrack Column numbers @@ -552,7 +552,7 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro return ret, err } defer f.Close() - dirEntries, err := readDir(f, max) + dirEntries, err := f.ReadDir(max) if err != nil { return ret, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go index 25bbe49ca1f..b6c31dd35d1 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package net @@ -12,13 +12,14 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var portMatch = regexp.MustCompile(`(.*)\.(\d+)$`) func ParseNetstat(output string, mode string, - iocs map[string]IOCountersStat) error { + iocs map[string]IOCountersStat, +) error { lines := strings.Split(output, "\n") exists := make([]string, 0, len(lines)-1) diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go b/vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go index 79d8ac30e24..b886066e82e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build solaris -// +build solaris package net @@ -11,7 +11,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) // NetIOCounters returnes network I/O statistics for every network diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go b/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/net/net_unix.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_unix.go index cb846e28a6d..71fc3b972a2 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || darwin -// +build freebsd darwin package net @@ -11,7 +11,7 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) // Return a list of network connections opened. diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go b/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/net/net_windows.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_windows.go index 5d384342f87..12f62cda05b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package net @@ -11,7 +11,7 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/windows" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process.go b/vendor/github.com/shirou/gopsutil/v4/process/process.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/process/process.go rename to vendor/github.com/shirou/gopsutil/v4/process/process.go index 1bb27abf8e8..ba27662523c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package process import ( @@ -9,10 +10,10 @@ import ( "sync" "time" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/mem" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/mem" + "github.com/shirou/gopsutil/v4/net" ) var ( @@ -29,9 +30,9 @@ type Process struct { parent int32 parentMutex sync.RWMutex // for windows ppid cache numCtxSwitches *NumCtxSwitchesStat - uids []int32 - gids []int32 - groups []int32 + uids []uint32 + gids []uint32 + groups []uint32 numThreads int32 memInfo *MemoryInfoStat sigInfo *SignalInfoStat @@ -102,10 +103,18 @@ type RlimitStat struct { } type IOCountersStat struct { - ReadCount uint64 `json:"readCount"` + // ReadCount is a number of read I/O operations such as syscalls. + ReadCount uint64 `json:"readCount"` + // WriteCount is a number of read I/O operations such as syscalls. WriteCount uint64 `json:"writeCount"` - ReadBytes uint64 `json:"readBytes"` + // ReadBytes is a number of all I/O read in bytes. This includes disk I/O on Linux and Windows. + ReadBytes uint64 `json:"readBytes"` + // WriteBytes is a number of all I/O write in bytes. This includes disk I/O on Linux and Windows. WriteBytes uint64 `json:"writeBytes"` + // DiskReadBytes is a number of disk I/O write in bytes. Currently only Linux has this value. + DiskReadBytes uint64 `json:"diskReadBytes"` + // DiskWriteBytes is a number of disk I/O read in bytes. Currently only Linux has this value. + DiskWriteBytes uint64 `json:"diskWriteBytes"` } type NumCtxSwitchesStat struct { @@ -368,7 +377,7 @@ func (p *Process) CPUPercentWithContext(ctx context.Context) (float64, error) { } // Groups returns all group IDs(include supplementary groups) of the process as a slice of the int -func (p *Process) Groups() ([]int32, error) { +func (p *Process) Groups() ([]uint32, error) { return p.GroupsWithContext(context.Background()) } @@ -433,12 +442,12 @@ func (p *Process) Foreground() (bool, error) { } // Uids returns user ids of the process as a slice of the int -func (p *Process) Uids() ([]int32, error) { +func (p *Process) Uids() ([]uint32, error) { return p.UidsWithContext(context.Background()) } // Gids returns group ids of the process as a slice of the int -func (p *Process) Gids() ([]int32, error) { +func (p *Process) Gids() ([]uint32, error) { return p.GidsWithContext(context.Background()) } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go index 263829ffae5..dcc056101a0 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin || freebsd || openbsd -// +build darwin freebsd openbsd package process @@ -8,8 +8,8 @@ import ( "context" "encoding/binary" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" ) type MemoryInfoExStat struct{} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go index 176661cbd6b..5231007c3c3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin -// +build darwin package process @@ -13,8 +13,8 @@ import ( "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) // copied from sys/sysctl.h @@ -117,31 +117,31 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return strings.IndexByte(string(out), '+') != -1, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } // See: http://unix.superglobalmegacorp.com/Net2/newsrc/sys/ucred.h.html - userEffectiveUID := int32(k.Eproc.Ucred.Uid) + userEffectiveUID := uint32(k.Eproc.Ucred.Uid) - return []int32{userEffectiveUID}, nil + return []uint32{userEffectiveUID}, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Eproc.Pcred.P_rgid), int32(k.Eproc.Pcred.P_rgid), int32(k.Eproc.Pcred.P_svgid)) + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Eproc.Pcred.P_rgid), uint32(k.Eproc.Pcred.P_rgid), uint32(k.Eproc.Pcred.P_svgid)) return gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError // k, err := p.getKProc() // if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go index b353e5eac7b..a13522473a1 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_darwin.go diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go index cbd6bdc793c..f1f3df365d9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin && arm64 -// +build darwin,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_darwin.go diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_cgo.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_darwin_cgo.go index 858f08e7a41..bbdfc963ebb 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin && cgo -// +build darwin,cgo package process @@ -20,7 +20,7 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v4/cpu" ) var ( diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_nocgo.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_darwin_nocgo.go index bc1d357df8c..129bb609873 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_nocgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin && !cgo -// +build darwin,!cgo package process @@ -9,8 +9,8 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" ) func (p *Process) CwdWithContext(ctx context.Context) (string, error) { @@ -24,14 +24,21 @@ func (p *Process) ExeWithContext(ctx context.Context) (string, error) { } txtFound := 0 lines := strings.Split(string(out), "\n") + fallback := "" for i := 1; i < len(lines); i++ { if lines[i] == "ftxt" { txtFound++ + if txtFound == 1 { + fallback = lines[i-1][1:] + } if txtFound == 2 { return lines[i-1][1:], nil } } } + if fallback != "" { + return fallback, nil + } return "", fmt.Errorf("missing txt data returned by lsof") } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go b/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go index 1a5d0c4b4a7..23793e92c50 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !linux && !freebsd && !openbsd && !windows && !solaris && !plan9 -// +build !darwin,!linux,!freebsd,!openbsd,!windows,!solaris,!plan9 package process @@ -7,9 +7,9 @@ import ( "context" "syscall" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) type Signal = syscall.Signal @@ -82,15 +82,15 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return false, common.ErrNotImplementedError } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go similarity index 90% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go index 40b10e14fcd..3d21183d656 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package process @@ -10,9 +10,9 @@ import ( "strconv" "strings" - cpu "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - net "github.com/shirou/gopsutil/v3/net" + cpu "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + net "github.com/shirou/gopsutil/v4/net" "golang.org/x/sys/unix" ) @@ -157,40 +157,40 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return strings.IndexByte(string(out), '+') != -1, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - uids := make([]int32, 0, 3) + uids := make([]uint32, 0, 3) - uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid)) + uids = append(uids, uint32(k.Ruid), uint32(k.Uid), uint32(k.Svuid)) return uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid)) + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Rgid), uint32(k.Ngroups), uint32(k.Svgid)) return gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - groups := make([]int32, k.Ngroups) + groups := make([]uint32, k.Ngroups) for i := int16(0); i < k.Ngroups; i++ { - groups[i] = int32(k.Groups[i]) + groups[i] = uint32(k.Groups[i]) } return groups, nil diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go index 08ab333b435..279ba9fbb40 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go index 560e627d249..f3b70ec1bec 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go index 81ae0b9a8d4..75ed3063050 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go index 73ac08201a0..3dc301c027f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd && arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/process/process_linux.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_linux.go index 557435b345d..8f1d6c89c9d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package process @@ -18,9 +18,9 @@ import ( "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) var pageSize = uint64(os.Getpagesize()) @@ -148,26 +148,26 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return pgid == tpgid, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.groups, nil } @@ -727,8 +727,12 @@ func (p *Process) fillFromIOWithContext(ctx context.Context) (*IOCountersStat, e case "syscw": ret.WriteCount = t case "read_bytes": - ret.ReadBytes = t + ret.DiskReadBytes = t case "write_bytes": + ret.DiskWriteBytes = t + case "rchar": + ret.ReadBytes = t + case "wchar": ret.WriteBytes = t } } @@ -866,32 +870,32 @@ func (p *Process) fillFromStatusWithContext(ctx context.Context) error { } p.tgid = int32(pval) case "Uid": - p.uids = make([]int32, 0, 4) + p.uids = make([]uint32, 0, 4) for _, i := range strings.Split(value, "\t") { v, err := strconv.ParseInt(i, 10, 32) if err != nil { return err } - p.uids = append(p.uids, int32(v)) + p.uids = append(p.uids, uint32(v)) } case "Gid": - p.gids = make([]int32, 0, 4) + p.gids = make([]uint32, 0, 4) for _, i := range strings.Split(value, "\t") { v, err := strconv.ParseInt(i, 10, 32) if err != nil { return err } - p.gids = append(p.gids, int32(v)) + p.gids = append(p.gids, uint32(v)) } case "Groups": groups := strings.Fields(value) - p.groups = make([]int32, 0, len(groups)) + p.groups = make([]uint32, 0, len(groups)) for _, i := range groups { - v, err := strconv.ParseInt(i, 10, 32) + v, err := strconv.ParseUint(i, 10, 32) if err != nil { return err } - p.groups = append(p.groups, int32(v)) + p.groups = append(p.groups, uint32(v)) } case "Threads": v, err := strconv.ParseInt(value, 10, 32) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go similarity index 90% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go index a58c5eb113c..7f85facb528 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package process @@ -14,10 +14,10 @@ import ( "strings" "unsafe" - cpu "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - mem "github.com/shirou/gopsutil/v3/mem" - net "github.com/shirou/gopsutil/v3/net" + cpu "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + mem "github.com/shirou/gopsutil/v4/mem" + net "github.com/shirou/gopsutil/v4/net" "golang.org/x/sys/unix" ) @@ -68,7 +68,12 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { } func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError + mib := []int32{CTLKern, KernProcCwd, p.Pid} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return "", err + } + return common.ByteToString(buf), nil } func (p *Process) ExeWithContext(ctx context.Context) (string, error) { @@ -171,40 +176,40 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return strings.IndexByte(string(out), '+') != -1, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - uids := make([]int32, 0, 3) + uids := make([]uint32, 0, 3) - uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid)) + uids = append(uids, uint32(k.Ruid), uint32(k.Uid), uint32(k.Svuid)) return uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid)) + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Rgid), uint32(k.Ngroups), uint32(k.Svgid)) return gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - groups := make([]int32, k.Ngroups) + groups := make([]uint32, k.Ngroups) for i := int16(0); i < k.Ngroups; i++ { - groups[i] = int32(k.Groups[i]) + groups[i] = uint32(k.Groups[i]) } return groups, nil diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go index f4ed0249172..5b84706a7cf 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && 386 -// +build openbsd,386 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go index 8607422b5f5..3229bb32c28 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_openbsd.go @@ -11,6 +12,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go index b94429f2e96..6f74ce75637 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm -// +build openbsd,arm // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go index a3291b8caf1..91045456258 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm64 -// +build openbsd,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_riscv64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go index 076f095eaae..e3e0d36a09e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_riscv64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && riscv64 -// +build openbsd,riscv64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go b/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go index bc4bc062a99..726758cae95 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build plan9 -// +build plan9 package process @@ -7,9 +7,9 @@ import ( "context" "syscall" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) type Signal = syscall.Note @@ -82,15 +82,15 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return false, common.ErrNotImplementedError } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go b/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/process/process_posix.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_posix.go index a01f9ecfc0d..caa9d3f7c03 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux || freebsd || openbsd || darwin || solaris -// +build linux freebsd openbsd darwin solaris package process @@ -16,7 +16,7 @@ import ( "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) type Signal = syscall.Signal diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go index dd4bd4760bd..04f86f16b5e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package process import ( @@ -7,9 +8,9 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) type MemoryMapsStat struct { @@ -95,15 +96,15 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return false, common.ErrNotImplementedError } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_windows.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_windows.go index f2053d98553..f3111649a6c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package process @@ -18,9 +18,9 @@ import ( "unicode/utf16" "unsafe" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" "golang.org/x/sys/windows" ) @@ -466,15 +466,15 @@ func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { return domain + "\\" + user, err } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go index db4d453349c..2b231c79d04 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build (windows && 386) || (windows && arm) -// +build windows,386 windows,arm package process @@ -8,7 +8,7 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/windows" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go index 74c6212cfde..befe5213900 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build (windows && amd64) || (windows && arm64) -// +build windows,amd64 windows,arm64 package process @@ -7,7 +7,7 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/windows" ) diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index a618ec24d84..2c8f4808c1a 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -26,33 +26,28 @@ linters: - errcheck #- exhaustive #- funlen - - gas #- gochecknoinits - goconst - #- gocritic + - gocritic #- gocyclo - #- gofmt + - gofmt - goimports - - golint #- gomnd #- goprintffuncname - #- gosec - #- gosimple + - gosec + - gosimple - govet - ineffassign - - interfacer #- lll - - maligned - - megacheck - #- misspell + - misspell #- nakedret #- noctx - #- nolintlint + - nolintlint #- rowserrcheck #- scopelint - #- staticcheck + - staticcheck #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - #- stylecheck + - stylecheck #- typecheck - unconvert #- unparam diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 5f965e057f2..25c30e3ccc3 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -17,21 +17,17 @@ package cobra import ( "fmt" "os" - "regexp" - "strings" ) const ( activeHelpMarker = "_activeHelp_ " // The below values should not be changed: programs will be using them explicitly // in their user documentation, and users will be using them explicitly. - activeHelpEnvVarSuffix = "_ACTIVE_HELP" - activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP" + activeHelpEnvVarSuffix = "ACTIVE_HELP" + activeHelpGlobalEnvVar = configEnvVarGlobalPrefix + "_" + activeHelpEnvVarSuffix activeHelpGlobalDisable = "0" ) -var activeHelpEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) - // AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. // Such strings will be processed by the completion script and will be shown as ActiveHelp // to the user. @@ -60,8 +56,5 @@ func GetActiveHelpConfig(cmd *Command) string { // variable. It has the format _ACTIVE_HELP where is the name of the // root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. func activeHelpEnvVar(name string) string { - // This format should not be changed: users will be using it explicitly. - activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) - activeHelpEnvVar = activeHelpEnvVarPrefixSubstRegexp.ReplaceAllString(activeHelpEnvVar, "_") - return activeHelpEnvVar + return configEnvVar(name, activeHelpEnvVarSuffix) } diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index e79ec33a81d..ed1e70ceaa4 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -52,9 +52,9 @@ func OnlyValidArgs(cmd *Command, args []string) error { if len(cmd.ValidArgs) > 0 { // Remove any description that may be included in ValidArgs. // A description is following a tab character. - var validArgs []string + validArgs := make([]string, 0, len(cmd.ValidArgs)) for _, v := range cmd.ValidArgs { - validArgs = append(validArgs, strings.Split(v, "\t")[0]) + validArgs = append(validArgs, strings.SplitN(v, "\t", 2)[0]) } for _, v := range args { if !stringInSlice(v, validArgs) { diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 8a531518409..f4d198cbcbd 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -597,19 +597,16 @@ func writeRequiredFlag(buf io.StringWriter, cmd *Command) { if nonCompletableFlag(flag) { return } - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - if flag.Value.Type() != "bool" { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) - - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) - } + if _, ok := flag.Annotations[BashCompOneRequiredFlag]; ok { + format := " must_have_one_flag+=(\"--%s" + if flag.Value.Type() != "bool" { + format += "=" + } + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) + + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) } } }) @@ -621,7 +618,7 @@ func writeRequiredNouns(buf io.StringWriter, cmd *Command) { for _, value := range cmd.ValidArgs { // Remove any description that may be included following a tab character. // Descriptions are not supported by bash completion. - value = strings.Split(value, "\t")[0] + value = strings.SplitN(value, "\t", 2)[0] WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) } if cmd.ValidArgsFunction != nil { diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index a6b160ce53c..e0b0947b04c 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -193,8 +193,6 @@ func ld(s, t string, ignoreCase bool) int { d := make([][]int, len(s)+1) for i := range d { d[i] = make([]int, len(t)+1) - } - for i := range d { d[i][0] = i } for j := range d[0] { diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 2fbe6c131a7..54748fc67eb 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -154,8 +154,10 @@ type Command struct { // pflags contains persistent flags. pflags *flag.FlagSet // lflags contains local flags. + // This field does not represent internal state, it's used as a cache to optimise LocalFlags function call lflags *flag.FlagSet // iflags contains inherited flags. + // This field does not represent internal state, it's used as a cache to optimise InheritedFlags function call iflags *flag.FlagSet // parentsPflags is all persistent flags of cmd's parents. parentsPflags *flag.FlagSet @@ -706,7 +708,7 @@ Loop: // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, // return the args, excluding the one at this position. if s == x { - ret := []string{} + ret := make([]string, 0, len(args)-1) ret = append(ret, args[:pos]...) ret = append(ret, args[pos+1:]...) return ret @@ -754,14 +756,14 @@ func (c *Command) findSuggestions(arg string) string { if c.SuggestionsMinimumDistance <= 0 { c.SuggestionsMinimumDistance = 2 } - suggestionsString := "" + var sb strings.Builder if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" + sb.WriteString("\n\nDid you mean this?\n") for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) + _, _ = fmt.Fprintf(&sb, "\t%v\n", s) } } - return suggestionsString + return sb.String() } func (c *Command) findNext(next string) *Command { @@ -873,7 +875,7 @@ func (c *Command) ArgsLenAtDash() int { func (c *Command) execute(a []string) (err error) { if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") + return fmt.Errorf("called Execute() on a nil Command") } if len(c.Deprecated) > 0 { @@ -1187,10 +1189,11 @@ func (c *Command) InitDefaultHelpFlag() { c.mergePersistentFlags() if c.Flags().Lookup("help") == nil { usage := "help for " - if c.Name() == "" { + name := c.displayName() + if name == "" { usage += "this command" } else { - usage += c.Name() + usage += name } c.Flags().BoolP("help", "h", false, usage) _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) @@ -1236,7 +1239,7 @@ func (c *Command) InitDefaultHelpCmd() { Use: "help [command]", Short: "Help about any command", Long: `Help provides help for any command in the application. -Simply type ` + c.Name() + ` help [path to command] for full details.`, +Simply type ` + c.displayName() + ` help [path to command] for full details.`, ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { var completions []string cmd, _, e := c.Root().Find(args) @@ -1427,6 +1430,10 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } + return c.displayName() +} + +func (c *Command) displayName() string { if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { return displayName } @@ -1436,10 +1443,11 @@ func (c *Command) CommandPath() string { // UseLine puts out the full usage for a given command (including parents). func (c *Command) UseLine() string { var useline string + use := strings.Replace(c.Use, c.Name(), c.displayName(), 1) if c.HasParent() { - useline = c.parent.CommandPath() + " " + c.Use + useline = c.parent.CommandPath() + " " + use } else { - useline = c.Use + useline = use } if c.DisableFlagsInUseLine { return useline @@ -1452,7 +1460,6 @@ func (c *Command) UseLine() string { // DebugFlags used to determine which flags have been assigned to which commands // and which persist. -// nolint:goconst func (c *Command) DebugFlags() { c.Println("DebugFlags called on", c.Name()) var debugflags func(*Command) @@ -1642,7 +1649,7 @@ func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) f // to this command (local and persistent declared here and by all parents). func (c *Command) Flags() *flag.FlagSet { if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1653,10 +1660,11 @@ func (c *Command) Flags() *flag.FlagSet { } // LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { persistentFlags := c.PersistentFlags() - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + out := flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.LocalFlags().VisitAll(func(f *flag.Flag) { if persistentFlags.Lookup(f.Name) == nil { out.AddFlag(f) @@ -1666,11 +1674,12 @@ func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { } // LocalFlags returns the local FlagSet specifically set in the current command. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalFlags() *flag.FlagSet { c.mergePersistentFlags() if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.lflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1693,11 +1702,12 @@ func (c *Command) LocalFlags() *flag.FlagSet { } // InheritedFlags returns all flags which were inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.iflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1718,6 +1728,7 @@ func (c *Command) InheritedFlags() *flag.FlagSet { } // NonInheritedFlags returns all flags which were not inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) NonInheritedFlags() *flag.FlagSet { return c.LocalFlags() } @@ -1725,7 +1736,7 @@ func (c *Command) NonInheritedFlags() *flag.FlagSet { // PersistentFlags returns the persistent FlagSet specifically set in the current command. func (c *Command) PersistentFlags() *flag.FlagSet { if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1738,9 +1749,9 @@ func (c *Command) PersistentFlags() *flag.FlagSet { func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.pflags.SetOutput(c.flagErrorBuf) c.lflags = nil @@ -1857,7 +1868,7 @@ func (c *Command) mergePersistentFlags() { // If c.parentsPflags == nil, it makes new. func (c *Command) updateParentsPflags() { if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.parentsPflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.parentsPflags.SetOutput(c.flagErrorBuf) c.parentsPflags.SortFlags = false } diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index b60f6b20007..c0c08b05721 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -17,6 +17,8 @@ package cobra import ( "fmt" "os" + "regexp" + "strconv" "strings" "sync" @@ -211,24 +213,29 @@ func (c *Command) initCompleteCmd(args []string) { // 2- Even without completions, we need to print the directive } - noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) + noDescriptions := cmd.CalledAs() == ShellCompNoDescRequestCmd + if !noDescriptions { + if doDescriptions, err := strconv.ParseBool(getEnvConfig(cmd, configEnvVarSuffixDescriptions)); err == nil { + noDescriptions = !doDescriptions + } + } + noActiveHelp := GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable + out := finalCmd.OutOrStdout() for _, comp := range completions { - if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable { - // Remove all activeHelp entries in this case - if strings.HasPrefix(comp, activeHelpMarker) { - continue - } + if noActiveHelp && strings.HasPrefix(comp, activeHelpMarker) { + // Remove all activeHelp entries if it's disabled. + continue } if noDescriptions { // Remove any description that may be included following a tab character. - comp = strings.Split(comp, "\t")[0] + comp = strings.SplitN(comp, "\t", 2)[0] } // Make sure we only write the first line to the output. // This is needed if a description contains a linebreak. // Otherwise the shell scripts will interpret the other lines as new flags // and could therefore provide a wrong completion. - comp = strings.Split(comp, "\n")[0] + comp = strings.SplitN(comp, "\n", 2)[0] // Finally trim the completion. This is especially important to get rid // of a trailing tab when there are no description following it. @@ -237,14 +244,14 @@ func (c *Command) initCompleteCmd(args []string) { // although there is no description). comp = strings.TrimSpace(comp) - // Print each possible completion to stdout for the completion script to consume. - fmt.Fprintln(finalCmd.OutOrStdout(), comp) + // Print each possible completion to the output for the completion script to consume. + fmt.Fprintln(out, comp) } // As the last printout, print the completion directive for the completion script to parse. // The directive integer must be that last character following a single colon (:). // The completion script expects : - fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) + fmt.Fprintf(out, ":%d\n", directive) // Print some helpful info to stderr for the user to understand. // Output from stderr must be ignored by the completion script. @@ -291,7 +298,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } if err != nil { // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) + return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) } finalCmd.ctx = c.ctx @@ -899,3 +906,34 @@ func CompError(msg string) { func CompErrorln(msg string) { CompError(fmt.Sprintf("%s\n", msg)) } + +// These values should not be changed: users will be using them explicitly. +const ( + configEnvVarGlobalPrefix = "COBRA" + configEnvVarSuffixDescriptions = "COMPLETION_DESCRIPTIONS" +) + +var configEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) + +// configEnvVar returns the name of the program-specific configuration environment +// variable. It has the format _ where is the name of the +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. +func configEnvVar(name, suffix string) string { + // This format should not be changed: users will be using it explicitly. + v := strings.ToUpper(fmt.Sprintf("%s_%s", name, suffix)) + v = configEnvVarPrefixSubstRegexp.ReplaceAllString(v, "_") + return v +} + +// getEnvConfig returns the value of the configuration environment variable +// _ where is the name of the root command in upper +// case, with all non-ASCII-alphanumeric characters replaced by `_`. +// If the value is empty or not set, the value of the environment variable +// COBRA_ is returned instead. +func getEnvConfig(cmd *Command, suffix string) string { + v := os.Getenv(configEnvVar(cmd.Root().Name(), suffix)) + if v == "" { + v = os.Getenv(configEnvVar(configEnvVarGlobalPrefix, suffix)) + } + return v +} diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index 0671ec5f202..560612fd338 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -23,9 +23,9 @@ import ( ) const ( - requiredAsGroup = "cobra_annotation_required_if_others_set" - oneRequired = "cobra_annotation_one_required" - mutuallyExclusive = "cobra_annotation_mutually_exclusive" + requiredAsGroupAnnotation = "cobra_annotation_required_if_others_set" + oneRequiredAnnotation = "cobra_annotation_one_required" + mutuallyExclusiveAnnotation = "cobra_annotation_mutually_exclusive" ) // MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors @@ -37,7 +37,7 @@ func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v)) } - if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, requiredAsGroupAnnotation, append(f.Annotations[requiredAsGroupAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -53,7 +53,7 @@ func (c *Command) MarkFlagsOneRequired(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v)) } - if err := c.Flags().SetAnnotation(v, oneRequired, append(f.Annotations[oneRequired], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, oneRequiredAnnotation, append(f.Annotations[oneRequiredAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -70,7 +70,7 @@ func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v)) } // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed. - if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, mutuallyExclusiveAnnotation, append(f.Annotations[mutuallyExclusiveAnnotation], strings.Join(flagNames, " "))); err != nil { panic(err) } } @@ -91,9 +91,9 @@ func (c *Command) ValidateFlagGroups() error { oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} flags.VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) if err := validateRequiredFlagGroups(groupStatus); err != nil { @@ -130,7 +130,7 @@ func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annota continue } - groupStatus[group] = map[string]bool{} + groupStatus[group] = make(map[string]bool, len(flagnames)) for _, name := range flagnames { groupStatus[group][name] = false } @@ -232,9 +232,9 @@ func (c *Command) enforceFlagGroupsForCompletion() { oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} c.Flags().VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) // If a flag that is part of a group is present, we make all the other flags @@ -253,17 +253,17 @@ func (c *Command) enforceFlagGroupsForCompletion() { // If none of the flags of a one-required group are present, we make all the flags // of that group required so that the shell completion suggests them automatically for flagList, flagnameAndStatus := range oneRequiredGroupStatus { - set := 0 + isSet := false - for _, isSet := range flagnameAndStatus { + for _, isSet = range flagnameAndStatus { if isSet { - set++ + break } } // None of the flags of the group are set, mark all flags in the group // as required - if set == 0 { + if !isSet { for _, fName := range strings.Split(flagList, " ") { _ = c.MarkFlagRequired(fName) } diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 55195193944..a830b7bcad2 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -28,8 +28,8 @@ import ( func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { // Variables should not contain a '-' or ':' character nameForVar := name - nameForVar = strings.Replace(nameForVar, "-", "_", -1) - nameForVar = strings.Replace(nameForVar, ":", "_", -1) + nameForVar = strings.ReplaceAll(nameForVar, "-", "_") + nameForVar = strings.ReplaceAll(nameForVar, ":", "_") compCmd := ShellCompRequestCmd if !includeDesc { diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index b96180b3b9d..3fc7d84f16c 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -11,7 +11,7 @@ [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.19-61CFDD.svg?style=flat-square) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) **Go configuration with fangs!** @@ -39,7 +39,7 @@ Many Go projects are built using Viper including: go get github.com/spf13/viper ``` -**Note:** Viper uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies. +**Note:** Viper uses [Go Modules](https://go.dev/wiki/Modules) to manage dependencies. ## What is Viper? @@ -420,7 +420,7 @@ flags, or environment variables. Viper supports multiple hosts. To use, pass a list of endpoints separated by `;`. For example `http://127.0.0.1:4001;http://127.0.0.1:4002`. -Viper uses [crypt](https://github.com/bketelsen/crypt) to retrieve +Viper uses [crypt](https://github.com/sagikazarmark/crypt) to retrieve configuration from the K/V store, which means that you can store your configuration values encrypted and have them automatically decrypted if you have the correct gpg keyring. Encryption is optional. @@ -432,7 +432,7 @@ independently of it. K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001. ```bash -$ go get github.com/bketelsen/crypt/bin/crypt +$ go get github.com/sagikazarmark/crypt/bin/crypt $ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json ``` diff --git a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md index c4e36c68603..b68993d4123 100644 --- a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md +++ b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md @@ -15,10 +15,10 @@ cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of: ``` As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`. -Viper opted to use [Go Modules](https://github.com/golang/go/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). +Viper opted to use [Go Modules](https://go.dev/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). The solution is easy: switch to using Go Modules. -Please refer to the [wiki](https://github.com/golang/go/wiki/Modules) on how to do that. +Please refer to the [wiki](https://go.dev/wiki/Modules) on how to do that. **tl;dr* `export GO111MODULE=on` diff --git a/vendor/github.com/spf13/viper/flake.lock b/vendor/github.com/spf13/viper/flake.lock index 78da51090b5..3840614fa20 100644 --- a/vendor/github.com/spf13/viper/flake.lock +++ b/vendor/github.com/spf13/viper/flake.lock @@ -8,11 +8,11 @@ "pre-commit-hooks": "pre-commit-hooks" }, "locked": { - "lastModified": 1687972261, - "narHash": "sha256-+mxvZfwMVoaZYETmuQWqTi/7T9UKoAE+WpdSQkOVJ2g=", + "lastModified": 1707817777, + "narHash": "sha256-vHyIs1OULQ3/91wD6xOiuayfI71JXALGA5KLnDKAcy0=", "owner": "cachix", "repo": "devenv", - "rev": "e85df562088573305e55906eaa964341f8cb0d9f", + "rev": "5a30b9e5ac7c6167e61b1f4193d5130bb9f8defa", "type": "github" }, "original": { @@ -42,11 +42,11 @@ "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1687762428, - "narHash": "sha256-DIf7mi45PKo+s8dOYF+UlXHzE0Wl/+k3tXUyAoAnoGE=", + "lastModified": 1706830856, + "narHash": "sha256-a0NYyp+h9hlb7ddVz4LUn1vT/PLwqfrWYcHMvFB1xYg=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "37dd7bb15791c86d55c5121740a1887ab55ee836", + "rev": "b253292d9c0a5ead9bc98c4e9a26c6312e27d69f", "type": "github" }, "original": { @@ -56,12 +56,15 @@ } }, "flake-utils": { + "inputs": { + "systems": "systems" + }, "locked": { - "lastModified": 1667395993, - "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", + "lastModified": 1685518550, + "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", "owner": "numtide", "repo": "flake-utils", - "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", + "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", "type": "github" }, "original": { @@ -151,11 +154,11 @@ "nixpkgs-lib": { "locked": { "dir": "lib", - "lastModified": 1685564631, - "narHash": "sha256-8ywr3AkblY4++3lIVxmrWZFzac7+f32ZEhH/A8pNscI=", + "lastModified": 1706550542, + "narHash": "sha256-UcsnCG6wx++23yeER4Hg18CXWbgNpqNXcHIo5/1Y+hc=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "4f53efe34b3a8877ac923b9350c874e3dcd5dc0a", + "rev": "97b17f32362e475016f942bbdfda4a4a72a8a652", "type": "github" }, "original": { @@ -184,27 +187,27 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1678872516, - "narHash": "sha256-/E1YwtMtFAu2KUQKV/1+KFuReYPANM2Rzehk84VxVoc=", + "lastModified": 1685801374, + "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "9b8e5abb18324c7fe9f07cb100c3cd4a29cda8b8", + "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-22.11", + "ref": "nixos-23.05", "repo": "nixpkgs", "type": "github" } }, "nixpkgs_2": { "locked": { - "lastModified": 1687886075, - "narHash": "sha256-PeayJDDDy+uw1Ats4moZnRdL1OFuZm1Tj+KiHlD67+o=", + "lastModified": 1707939175, + "narHash": "sha256-D1xan0lgxbmXDyzVqXTiSYHLmAMrMRdD+alKzEO/p3w=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "a565059a348422af5af9026b5174dc5c0dcefdae", + "rev": "f7e8132daca31b1e3859ac0fb49741754375ac3d", "type": "github" }, "original": { @@ -229,11 +232,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1686050334, - "narHash": "sha256-R0mczWjDzBpIvM3XXhO908X5e2CQqjyh/gFbwZk/7/Q=", + "lastModified": 1704725188, + "narHash": "sha256-qq8NbkhRZF1vVYQFt1s8Mbgo8knj+83+QlL5LBnYGpI=", "owner": "cachix", "repo": "pre-commit-hooks.nix", - "rev": "6881eb2ae5d8a3516e34714e7a90d9d95914c4dc", + "rev": "ea96f0c05924341c551a797aaba8126334c505d2", "type": "github" }, "original": { @@ -248,6 +251,21 @@ "flake-parts": "flake-parts", "nixpkgs": "nixpkgs_2" } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/vendor/github.com/spf13/viper/flake.nix b/vendor/github.com/spf13/viper/flake.nix index 9b26c3fcf5a..0230668cff6 100644 --- a/vendor/github.com/spf13/viper/flake.nix +++ b/vendor/github.com/spf13/viper/flake.nix @@ -20,6 +20,7 @@ default = { languages = { go.enable = true; + go.package = pkgs.go_1_22; }; pre-commit.hooks = { diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index 20eb4da177e..da68d9944cc 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -624,7 +624,7 @@ func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { // To retrieve a config file called myapp.json from /configs/myapp.json // you should set path to /configs and set config name (SetConfigName()) to // "myapp". -// Secure Remote Providers are implemented with github.com/bketelsen/crypt. +// Secure Remote Providers are implemented with github.com/sagikazarmark/crypt. func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) } @@ -1791,12 +1791,6 @@ func (v *Viper) writeConfig(filename string, force bool) error { return f.Sync() } -// Unmarshal a Reader into a map. -// Should probably be an unexported function. -func unmarshalReader(in io.Reader, c map[string]any) error { - return v.unmarshalReader(in, c) -} - func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { buf := new(bytes.Buffer) buf.ReadFrom(in) diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go index 1819ead7290..4858a08bfe3 100644 --- a/vendor/go.etcd.io/etcd/api/v3/version/version.go +++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.5.10" + Version = "3.5.12" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go index f4492009d6c..b314e068fea 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go @@ -25,18 +25,24 @@ import ( ) func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { - return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil) + return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil, true) } func PurgeFileWithDoneNotify(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) { doneC := make(chan struct{}) - errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC) + errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC, true) + return doneC, errC +} + +func PurgeFileWithoutFlock(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) { + doneC := make(chan struct{}) + errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC, false) return doneC, errC } // purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil. // if donec is non-nil, the function closes it to notify its exit. -func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error { +func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}, flock bool) <-chan error { if lg == nil { lg = zap.NewNop() } @@ -67,20 +73,25 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval fnames = newfnames for len(newfnames) > int(max) { f := filepath.Join(dirname, newfnames[0]) - l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode) - if err != nil { - lg.Warn("failed to lock file", zap.String("path", f), zap.Error(err)) - break + var l *LockedFile + if flock { + l, err = TryLockFile(f, os.O_WRONLY, PrivateFileMode) + if err != nil { + lg.Warn("failed to lock file", zap.String("path", f), zap.Error(err)) + break + } } if err = os.Remove(f); err != nil { lg.Error("failed to remove file", zap.String("path", f), zap.Error(err)) errC <- err return } - if err = l.Close(); err != nil { - lg.Error("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err)) - errC <- err - return + if flock { + if err = l.Close(); err != nil { + lg.Error("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err)) + errC <- err + return + } } lg.Info("purged", zap.String("path", f)) newfnames = newfnames[1:] diff --git a/vendor/go.opentelemetry.io/collector/component/component.go b/vendor/go.opentelemetry.io/collector/component/component.go index 794fc9235a9..f5f68b57290 100644 --- a/vendor/go.opentelemetry.io/collector/component/component.go +++ b/vendor/go.opentelemetry.io/collector/component/component.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package component outlines the abstraction of components within the OpenTelemetry Collector. It provides details on the component +// Package component outlines the abstraction of components within the OpenTelemetry Collector. It provides details on the component // lifecycle as well as defining the interface that components must fulfill. package component // import "go.opentelemetry.io/collector/component" @@ -11,12 +11,12 @@ import ( ) var ( - // ErrDataTypeIsNotSupported can be returned by receiver, exporter or processor factory funcs that create the - // Component if the particular telemetry data type is not supported by the receiver, exporter or processor. + // ErrDataTypeIsNotSupported can be returned by receiver, exporter, processor or connector factory funcs that create the + // Component if the particular telemetry data type is not supported by the receiver, exporter, processor or connector factory. ErrDataTypeIsNotSupported = errors.New("telemetry type is not supported") ) -// Component is either a receiver, exporter, processor, or an extension. +// Component is either a receiver, exporter, processor, connector, or an extension. // // A component's lifecycle has the following phases: // @@ -174,7 +174,7 @@ type Factory interface { // CreateDefaultConfig creates the default configuration for the Component. // This method can be called multiple times depending on the pipeline - // configuration and should not cause side-effects that prevent the creation + // configuration and should not cause side effects that prevent the creation // of multiple instances of the Component. // The object returned by this method needs to pass the checks implemented by // 'componenttest.CheckConfigStruct'. It is recommended to have these checks in the diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/obsreporttest.go b/vendor/go.opentelemetry.io/collector/component/componenttest/obsreporttest.go index f342879a533..ba076c65905 100644 --- a/vendor/go.opentelemetry.io/collector/component/componenttest/obsreporttest.go +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/obsreporttest.go @@ -78,20 +78,20 @@ func (tts *TestTelemetry) CheckExporterMetricGauge(metric string, val int64) err // CheckProcessorTraces checks that for the current exported values for trace exporter metrics match given values. // When this function is called it is required to also call SetupTelemetry as first thing. -func (tts *TestTelemetry) CheckProcessorTraces(acceptedSpans, refusedSpans, droppedSpans int64) error { - return tts.prometheusChecker.checkProcessorTraces(tts.id, acceptedSpans, refusedSpans, droppedSpans) +func (tts *TestTelemetry) CheckProcessorTraces(acceptedSpans, refusedSpans, droppedSpans, insertedSpans int64) error { + return tts.prometheusChecker.checkProcessorTraces(tts.id, acceptedSpans, refusedSpans, droppedSpans, insertedSpans) } // CheckProcessorMetrics checks that for the current exported values for metrics exporter metrics match given values. // When this function is called it is required to also call SetupTelemetry as first thing. -func (tts *TestTelemetry) CheckProcessorMetrics(acceptedMetricPoints, refusedMetricPoints, droppedMetricPoints int64) error { - return tts.prometheusChecker.checkProcessorMetrics(tts.id, acceptedMetricPoints, refusedMetricPoints, droppedMetricPoints) +func (tts *TestTelemetry) CheckProcessorMetrics(acceptedMetricPoints, refusedMetricPoints, droppedMetricPoints, insertedMetricPoints int64) error { + return tts.prometheusChecker.checkProcessorMetrics(tts.id, acceptedMetricPoints, refusedMetricPoints, droppedMetricPoints, insertedMetricPoints) } // CheckProcessorLogs checks that for the current exported values for logs exporter metrics match given values. // When this function is called it is required to also call SetupTelemetry as first thing. -func (tts *TestTelemetry) CheckProcessorLogs(acceptedLogRecords, refusedLogRecords, droppedLogRecords int64) error { - return tts.prometheusChecker.checkProcessorLogs(tts.id, acceptedLogRecords, refusedLogRecords, droppedLogRecords) +func (tts *TestTelemetry) CheckProcessorLogs(acceptedLogRecords, refusedLogRecords, droppedLogRecords, insertedLogRecords int64) error { + return tts.prometheusChecker.checkProcessorLogs(tts.id, acceptedLogRecords, refusedLogRecords, droppedLogRecords, insertedLogRecords) } // CheckReceiverTraces checks that for the current exported values for trace receiver metrics match given values. diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/otelprometheuschecker.go b/vendor/go.opentelemetry.io/collector/component/componenttest/otelprometheuschecker.go index 5beef52373a..9dbe60384aa 100644 --- a/vendor/go.opentelemetry.io/collector/component/componenttest/otelprometheuschecker.go +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/otelprometheuschecker.go @@ -48,24 +48,26 @@ func (pc *prometheusChecker) checkReceiver(receiver component.ID, datatype, prot pc.checkCounter(fmt.Sprintf("receiver_refused_%s", datatype), droppedMetricPoints, receiverAttrs)) } -func (pc *prometheusChecker) checkProcessorTraces(processor component.ID, accepted, refused, dropped int64) error { - return pc.checkProcessor(processor, "spans", accepted, refused, dropped) +func (pc *prometheusChecker) checkProcessorTraces(processor component.ID, accepted, refused, dropped, inserted int64) error { + return pc.checkProcessor(processor, "spans", accepted, refused, dropped, inserted) } -func (pc *prometheusChecker) checkProcessorMetrics(processor component.ID, accepted, refused, dropped int64) error { - return pc.checkProcessor(processor, "metric_points", accepted, refused, dropped) +func (pc *prometheusChecker) checkProcessorMetrics(processor component.ID, accepted, refused, dropped, inserted int64) error { + return pc.checkProcessor(processor, "metric_points", accepted, refused, dropped, inserted) } -func (pc *prometheusChecker) checkProcessorLogs(processor component.ID, accepted, refused, dropped int64) error { - return pc.checkProcessor(processor, "log_records", accepted, refused, dropped) +func (pc *prometheusChecker) checkProcessorLogs(processor component.ID, accepted, refused, dropped, inserted int64) error { + return pc.checkProcessor(processor, "log_records", accepted, refused, dropped, inserted) } -func (pc *prometheusChecker) checkProcessor(processor component.ID, datatype string, accepted, refused, dropped int64) error { +func (pc *prometheusChecker) checkProcessor(processor component.ID, datatype string, accepted, refused, dropped, inserted int64) error { processorAttrs := attributesForProcessorMetrics(processor) return multierr.Combine( pc.checkCounter(fmt.Sprintf("processor_accepted_%s", datatype), accepted, processorAttrs), pc.checkCounter(fmt.Sprintf("processor_refused_%s", datatype), refused, processorAttrs), - pc.checkCounter(fmt.Sprintf("processor_dropped_%s", datatype), dropped, processorAttrs)) + pc.checkCounter(fmt.Sprintf("processor_dropped_%s", datatype), dropped, processorAttrs), + pc.checkCounter(fmt.Sprintf("processor_inserted_%s", datatype), inserted, processorAttrs), + ) } func (pc *prometheusChecker) checkExporterTraces(exporter component.ID, sent, sendFailed int64) error { diff --git a/vendor/go.opentelemetry.io/collector/component/config.go b/vendor/go.opentelemetry.io/collector/component/config.go index b53ff872fda..b961d1fdb1f 100644 --- a/vendor/go.opentelemetry.io/collector/component/config.go +++ b/vendor/go.opentelemetry.io/collector/component/config.go @@ -9,8 +9,6 @@ import ( "regexp" "go.uber.org/multierr" - - "go.opentelemetry.io/collector/confmap" ) // Config defines the configuration for a component.Component. @@ -26,12 +24,6 @@ type Config any // for an interface type Foo is to use a *Foo value. var configValidatorType = reflect.TypeOf((*ConfigValidator)(nil)).Elem() -// UnmarshalConfig helper function to UnmarshalConfig a Config. -// Deprecated: [v0.101.0] Use conf.Unmarshal(&intoCfg) -func UnmarshalConfig(conf *confmap.Conf, intoCfg Config) error { - return conf.Unmarshal(intoCfg) -} - // ConfigValidator defines an optional interface for configurations to implement to do validation. type ConfigValidator interface { // Validate the configuration and returns an error if invalid. @@ -91,7 +83,7 @@ func callValidateIfPossible(v reflect.Value) error { } // If the pointer type implements ConfigValidator call Validate on the pointer to the current value. - if reflect.PtrTo(v.Type()).Implements(configValidatorType) { + if reflect.PointerTo(v.Type()).Implements(configValidatorType) { // If not addressable, then create a new *V pointer and set the value to current v. if !v.CanAddr() { pv := reflect.New(reflect.PtrTo(v.Type()).Elem()) diff --git a/vendor/go.opentelemetry.io/collector/component/host.go b/vendor/go.opentelemetry.io/collector/component/host.go index 50472a6d88c..4c98ee70f96 100644 --- a/vendor/go.opentelemetry.io/collector/component/host.go +++ b/vendor/go.opentelemetry.io/collector/component/host.go @@ -20,7 +20,7 @@ type Host interface { GetFactory(kind Kind, componentType Type) Factory // GetExtensions returns the map of extensions. Only enabled and created extensions will be returned. - // Typically is used to find an extension by type or by full config name. Both cases + // Typically, it is used to find an extension by type or by full config name. Both cases // can be done by iterating the returned map. There are typically very few extensions, // so there are no performance implications due to iteration. // diff --git a/vendor/go.opentelemetry.io/collector/component/telemetry.go b/vendor/go.opentelemetry.io/collector/component/telemetry.go index 17ca3dcab67..6f8f0ec84d3 100644 --- a/vendor/go.opentelemetry.io/collector/component/telemetry.go +++ b/vendor/go.opentelemetry.io/collector/component/telemetry.go @@ -15,8 +15,8 @@ import ( // TelemetrySettings provides components with APIs to report telemetry. // // Note: there is a service version of this struct, servicetelemetry.TelemetrySettings, that mirrors -// this struct with the exception of ReportStatus. When adding or removing anything from -// this struct consider whether or not the same should be done for the service version. +// this struct except ReportStatus. When adding or removing anything from +// this struct consider whether the same should be done for the service version. type TelemetrySettings struct { // Logger that the factory can use during creation and can pass to the created // component to be used later as well. @@ -37,6 +37,7 @@ type TelemetrySettings struct { // ReportStatus allows a component to report runtime changes in status. The service // will automatically report status for a component during startup and shutdown. Components can - // use this method to report status after start and before shutdown. + // use this method to report status after start and before shutdown. For more details about + // component status reporting see: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-status.md ReportStatus func(*StatusEvent) } diff --git a/vendor/go.opentelemetry.io/collector/config/configauth/configauth.go b/vendor/go.opentelemetry.io/collector/config/configauth/configauth.go index aa7002c270b..dc275f705e0 100644 --- a/vendor/go.opentelemetry.io/collector/config/configauth/configauth.go +++ b/vendor/go.opentelemetry.io/collector/config/configauth/configauth.go @@ -7,6 +7,7 @@ package configauth // import "go.opentelemetry.io/collector/config/configauth" import ( + "context" "errors" "fmt" @@ -33,7 +34,15 @@ func NewDefaultAuthentication() *Authentication { // GetServerAuthenticator attempts to select the appropriate auth.Server from the list of extensions, // based on the requested extension name. If an authenticator is not found, an error is returned. +// +// Deprecated: [v0.103.0] Use GetServerAuthenticatorContext instead. func (a Authentication) GetServerAuthenticator(extensions map[component.ID]component.Component) (auth.Server, error) { + return a.GetServerAuthenticatorContext(context.Background(), extensions) +} + +// GetServerAuthenticatorContext attempts to select the appropriate auth.Server from the list of extensions, +// based on the requested extension name. If an authenticator is not found, an error is returned. +func (a Authentication) GetServerAuthenticatorContext(_ context.Context, extensions map[component.ID]component.Component) (auth.Server, error) { if ext, found := extensions[a.AuthenticatorID]; found { if server, ok := ext.(auth.Server); ok { return server, nil @@ -44,10 +53,15 @@ func (a Authentication) GetServerAuthenticator(extensions map[component.ID]compo return nil, fmt.Errorf("failed to resolve authenticator %q: %w", a.AuthenticatorID, errAuthenticatorNotFound) } -// GetClientAuthenticator attempts to select the appropriate auth.Client from the list of extensions, +// Deprecated: [v0.103.0] Use GetClientAuthenticatorContext instead. +func (a Authentication) GetClientAuthenticator(extensions map[component.ID]component.Component) (auth.Client, error) { + return a.GetClientAuthenticatorContext(context.Background(), extensions) +} + +// GetClientAuthenticatorContext attempts to select the appropriate auth.Client from the list of extensions, // based on the component id of the extension. If an authenticator is not found, an error is returned. // This should be only used by HTTP clients. -func (a Authentication) GetClientAuthenticator(extensions map[component.ID]component.Component) (auth.Client, error) { +func (a Authentication) GetClientAuthenticatorContext(_ context.Context, extensions map[component.ID]component.Component) (auth.Client, error) { if ext, found := extensions[a.AuthenticatorID]; found { if client, ok := ext.(auth.Client); ok { return client, nil diff --git a/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go b/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go index b57a199461c..87e7b83d766 100644 --- a/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go +++ b/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go @@ -12,6 +12,7 @@ import ( "time" "github.com/mostynb/go-grpc-compression/nonclobbering/snappy" + "github.com/mostynb/go-grpc-compression/nonclobbering/zstd" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.opentelemetry.io/otel" "google.golang.org/grpc" @@ -27,7 +28,6 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configauth" "go.opentelemetry.io/collector/config/configcompression" - grpcInternal "go.opentelemetry.io/collector/config/configgrpc/internal" "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/config/configtelemetry" @@ -218,8 +218,8 @@ func (gcs *ClientConfig) isSchemeHTTPS() bool { // a non-blocking dial (the function won't wait for connections to be // established, and connecting happens in the background). To make it a blocking // dial, use grpc.WithBlock() dial option. -func (gcs *ClientConfig) ToClientConn(_ context.Context, host component.Host, settings component.TelemetrySettings, extraOpts ...grpc.DialOption) (*grpc.ClientConn, error) { - opts, err := gcs.toDialOptions(host, settings) +func (gcs *ClientConfig) ToClientConn(ctx context.Context, host component.Host, settings component.TelemetrySettings, extraOpts ...grpc.DialOption) (*grpc.ClientConn, error) { + opts, err := gcs.toDialOptions(ctx, host, settings) if err != nil { return nil, err } @@ -227,7 +227,7 @@ func (gcs *ClientConfig) ToClientConn(_ context.Context, host component.Host, se return grpc.NewClient(gcs.sanitizedEndpoint(), opts...) } -func (gcs *ClientConfig) toDialOptions(host component.Host, settings component.TelemetrySettings) ([]grpc.DialOption, error) { +func (gcs *ClientConfig) toDialOptions(ctx context.Context, host component.Host, settings component.TelemetrySettings) ([]grpc.DialOption, error) { var opts []grpc.DialOption if gcs.Compression.IsCompressed() { cp, err := getGRPCCompressionName(gcs.Compression) @@ -237,7 +237,7 @@ func (gcs *ClientConfig) toDialOptions(host component.Host, settings component.T opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor(cp))) } - tlsCfg, err := gcs.TLSSetting.LoadTLSConfig(context.Background()) + tlsCfg, err := gcs.TLSSetting.LoadTLSConfig(ctx) if err != nil { return nil, err } @@ -271,7 +271,7 @@ func (gcs *ClientConfig) toDialOptions(host component.Host, settings component.T return nil, errors.New("no extensions configuration available") } - grpcAuthenticator, cerr := gcs.Auth.GetClientAuthenticator(host.GetExtensions()) + grpcAuthenticator, cerr := gcs.Auth.GetClientAuthenticatorContext(ctx, host.GetExtensions()) if cerr != nil { return nil, cerr } @@ -387,7 +387,7 @@ func (gss *ServerConfig) toServerOption(host component.Host, settings component. var sInterceptors []grpc.StreamServerInterceptor if gss.Auth != nil { - authenticator, err := gss.Auth.GetServerAuthenticator(host.GetExtensions()) + authenticator, err := gss.Auth.GetServerAuthenticatorContext(context.Background(), host.GetExtensions()) if err != nil { return nil, err } @@ -426,7 +426,7 @@ func getGRPCCompressionName(compressionType configcompression.Type) (string, err case configcompression.TypeSnappy: return snappy.Name, nil case configcompression.TypeZstd: - return grpcInternal.ZstdName, nil + return zstd.Name, nil default: return "", fmt.Errorf("unsupported compression type %q", compressionType) } diff --git a/vendor/go.opentelemetry.io/collector/config/configgrpc/internal/zstd.go b/vendor/go.opentelemetry.io/collector/config/configgrpc/internal/zstd.go deleted file mode 100644 index 0718b73535f..00000000000 --- a/vendor/go.opentelemetry.io/collector/config/configgrpc/internal/zstd.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright The OpenTelemetry Authors -// Copyright 2017 gRPC authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/collector/config/configgrpc/internal" - -import ( - "errors" - "io" - "sync" - - "github.com/klauspost/compress/zstd" - "google.golang.org/grpc/encoding" -) - -const ZstdName = "zstd" - -func init() { - encoding.RegisterCompressor(NewZstdCodec()) -} - -type writer struct { - *zstd.Encoder - pool *sync.Pool -} - -func NewZstdCodec() encoding.Compressor { - c := &compressor{} - c.poolCompressor.New = func() any { - zw, _ := zstd.NewWriter(nil, zstd.WithEncoderConcurrency(1), zstd.WithWindowSize(512*1024)) - return &writer{Encoder: zw, pool: &c.poolCompressor} - } - return c -} - -func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { - z := c.poolCompressor.Get().(*writer) - z.Encoder.Reset(w) - return z, nil -} - -func (z *writer) Close() error { - defer z.pool.Put(z) - return z.Encoder.Close() -} - -type reader struct { - *zstd.Decoder - pool *sync.Pool -} - -func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { - z, inPool := c.poolDecompressor.Get().(*reader) - if !inPool { - newZ, err := zstd.NewReader(r) - if err != nil { - return nil, err - } - return &reader{Decoder: newZ, pool: &c.poolDecompressor}, nil - } - if err := z.Reset(r); err != nil { - c.poolDecompressor.Put(z) - return nil, err - } - return z, nil -} - -func (z *reader) Read(p []byte) (n int, err error) { - n, err = z.Decoder.Read(p) - if errors.Is(err, io.EOF) { - z.pool.Put(z) - } - return n, err -} - -func (c *compressor) Name() string { - return ZstdName -} - -type compressor struct { - poolCompressor sync.Pool - poolDecompressor sync.Pool -} diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/README.md b/vendor/go.opentelemetry.io/collector/config/confighttp/README.md index a0227c2402b..2a6b0e7fd64 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/README.md +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/README.md @@ -34,6 +34,8 @@ README](../configtls/README.md). - [`disable_keep_alives`](https://golang.org/pkg/net/http/#Transport) - [`http2_read_idle_timeout`](https://pkg.go.dev/golang.org/x/net/http2#Transport) - [`http2_ping_timeout`](https://pkg.go.dev/golang.org/x/net/http2#Transport) +- [`cookies`](https://pkg.go.dev/net/http#CookieJar) + - [`enabled`] if enabled, the client will store cookies from server responses and reuse them in subsequent requests. Example: @@ -51,6 +53,8 @@ exporter: test1: "value1" "test 2": "value 2" compression: zstd + cookies: + enabled: true ``` ## Server Configuration @@ -75,6 +79,7 @@ will not be enabled. not set, browsers use a default of 5 seconds. - `endpoint`: Valid value syntax available [here](https://github.com/grpc/grpc/blob/master/doc/naming.md) - `max_request_body_size`: configures the maximum allowed body size in bytes for a single request. Default: `0` (no restriction) +- `compression_algorithms`: configures the list of compression algorithms the server can accept. Default: ["", "gzip", "zstd", "zlib", "snappy", "deflate"] - [`tls`](../configtls/README.md) - [`auth`](../configauth/README.md) @@ -98,6 +103,7 @@ receivers: - Example-Header max_age: 7200 endpoint: 0.0.0.0:55690 + compression_algorithms: ["", "gzip"] processors: attributes: actions: diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go b/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go index a700bec845b..4498fefe864 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go @@ -25,6 +25,53 @@ type compressRoundTripper struct { compressor *compressor } +var availableDecoders = map[string]func(body io.ReadCloser) (io.ReadCloser, error){ + "": func(io.ReadCloser) (io.ReadCloser, error) { + // Not a compressed payload. Nothing to do. + return nil, nil + }, + "gzip": func(body io.ReadCloser) (io.ReadCloser, error) { + gr, err := gzip.NewReader(body) + if err != nil { + return nil, err + } + return gr, nil + }, + "zstd": func(body io.ReadCloser) (io.ReadCloser, error) { + zr, err := zstd.NewReader( + body, + // Concurrency 1 disables async decoding. We don't need async decoding, it is pointless + // for our use-case (a server accepting decoding http requests). + // Disabling async improves performance (I benchmarked it previously when working + // on https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/23257). + zstd.WithDecoderConcurrency(1), + ) + if err != nil { + return nil, err + } + return zr.IOReadCloser(), nil + }, + "zlib": func(body io.ReadCloser) (io.ReadCloser, error) { + zr, err := zlib.NewReader(body) + if err != nil { + return nil, err + } + return zr, nil + }, + "snappy": func(body io.ReadCloser) (io.ReadCloser, error) { + sr := snappy.NewReader(body) + sb := new(bytes.Buffer) + _, err := io.Copy(sb, sr) + if err != nil { + return nil, err + } + if err = body.Close(); err != nil { + return nil, err + } + return io.NopCloser(sb), nil + }, +} + func newCompressRoundTripper(rt http.RoundTripper, compressionType configcompression.Type) (*compressRoundTripper, error) { encoder, err := newCompressor(compressionType) if err != nil { @@ -77,64 +124,27 @@ type decompressor struct { // by identifying the compression format in the "Content-Encoding" header and re-writing // request body so that the handlers further in the chain can work on decompressed data. // It supports gzip and deflate/zlib compression. -func httpContentDecompressor(h http.Handler, maxRequestBodySize int64, eh func(w http.ResponseWriter, r *http.Request, errorMsg string, statusCode int), decoders map[string]func(body io.ReadCloser) (io.ReadCloser, error)) http.Handler { +func httpContentDecompressor(h http.Handler, maxRequestBodySize int64, eh func(w http.ResponseWriter, r *http.Request, errorMsg string, statusCode int), enableDecoders []string, decoders map[string]func(body io.ReadCloser) (io.ReadCloser, error)) http.Handler { errHandler := defaultErrorHandler if eh != nil { errHandler = eh } + enabled := map[string]func(body io.ReadCloser) (io.ReadCloser, error){} + for _, dec := range enableDecoders { + enabled[dec] = availableDecoders[dec] + + if dec == "deflate" { + enabled["deflate"] = availableDecoders["zlib"] + } + } + d := &decompressor{ maxRequestBodySize: maxRequestBodySize, errHandler: errHandler, base: h, - decoders: map[string]func(body io.ReadCloser) (io.ReadCloser, error){ - "": func(io.ReadCloser) (io.ReadCloser, error) { - // Not a compressed payload. Nothing to do. - return nil, nil - }, - "gzip": func(body io.ReadCloser) (io.ReadCloser, error) { - gr, err := gzip.NewReader(body) - if err != nil { - return nil, err - } - return gr, nil - }, - "zstd": func(body io.ReadCloser) (io.ReadCloser, error) { - zr, err := zstd.NewReader( - body, - // Concurrency 1 disables async decoding. We don't need async decoding, it is pointless - // for our use-case (a server accepting decoding http requests). - // Disabling async improves performance (I benchmarked it previously when working - // on https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/23257). - zstd.WithDecoderConcurrency(1), - ) - if err != nil { - return nil, err - } - return zr.IOReadCloser(), nil - }, - "zlib": func(body io.ReadCloser) (io.ReadCloser, error) { - zr, err := zlib.NewReader(body) - if err != nil { - return nil, err - } - return zr, nil - }, - "snappy": func(body io.ReadCloser) (io.ReadCloser, error) { - sr := snappy.NewReader(body) - sb := new(bytes.Buffer) - _, err := io.Copy(sb, sr) - if err != nil { - return nil, err - } - if err = body.Close(); err != nil { - return nil, err - } - return io.NopCloser(sb), nil - }, - }, + decoders: enabled, } - d.decoders["deflate"] = d.decoders["zlib"] for key, dec := range decoders { d.decoders[key] = dec diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/confighttp.go b/vendor/go.opentelemetry.io/collector/config/confighttp/confighttp.go index 71b2f17ee2f..fa6ff795331 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/confighttp.go +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/confighttp.go @@ -11,6 +11,7 @@ import ( "io" "net" "net/http" + "net/http/cookiejar" "net/url" "time" @@ -18,6 +19,7 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel" "golang.org/x/net/http2" + "golang.org/x/net/publicsuffix" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configauth" @@ -31,6 +33,7 @@ import ( const headerContentEncoding = "Content-Encoding" const defaultMaxRequestBodySize = 20 * 1024 * 1024 // 20MiB +var defaultCompressionAlgorithms = []string{"", "gzip", "zstd", "zlib", "snappy", "deflate"} // ClientConfig defines settings for creating an HTTP client. type ClientConfig struct { @@ -58,7 +61,10 @@ type ClientConfig struct { Headers map[string]configopaque.String `mapstructure:"headers"` // Custom Round Tripper to allow for individual components to intercept HTTP requests - CustomRoundTripper func(next http.RoundTripper) (http.RoundTripper, error) + // + // Deprecated: [v0.103.0] Set (*http.Client).Transport on the *http.Client returned from ToClient + // to configure this. + CustomRoundTripper func(next http.RoundTripper) (http.RoundTripper, error) `mapstructure:"-"` // Auth configuration for outgoing HTTP calls. Auth *configauth.Authentication `mapstructure:"auth"` @@ -100,6 +106,14 @@ type ClientConfig struct { // HTTP2PingTimeout if there's no response to the ping within the configured value, the connection will be closed. // If not set or set to 0, it defaults to 15s. HTTP2PingTimeout time.Duration `mapstructure:"http2_ping_timeout"` + // Cookies configures the cookie management of the HTTP client. + Cookies *CookiesConfig `mapstructure:"cookies"` +} + +// CookiesConfig defines the configuration of the HTTP client regarding cookies served by the server. +type CookiesConfig struct { + // Enabled if true, cookies from HTTP responses will be reused in further HTTP requests with the same server. + Enabled bool `mapstructure:"enabled"` } // NewDefaultClientConfig returns ClientConfig type object with @@ -181,7 +195,7 @@ func (hcs *ClientConfig) ToClient(ctx context.Context, host component.Host, sett return nil, errors.New("extensions configuration not found") } - httpCustomAuthRoundTripper, aerr := hcs.Auth.GetClientAuthenticator(ext) + httpCustomAuthRoundTripper, aerr := hcs.Auth.GetClientAuthenticatorContext(ctx, ext) if aerr != nil { return nil, aerr } @@ -228,9 +242,18 @@ func (hcs *ClientConfig) ToClient(ctx context.Context, host component.Host, sett } } + var jar http.CookieJar + if hcs.Cookies != nil && hcs.Cookies.Enabled { + jar, err = cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + if err != nil { + return nil, err + } + } + return &http.Client{ Transport: clientTransport, Timeout: hcs.Timeout, + Jar: jar, }, nil } @@ -280,6 +303,9 @@ type ServerConfig struct { // Additional headers attached to each HTTP response sent to the client. // Header values are opaque since they may be sensitive. ResponseHeaders map[string]configopaque.String `mapstructure:"response_headers"` + + // CompressionAlgorithms configures the list of compression algorithms the server can accept. Default: ["", "gzip", "zstd", "zlib", "snappy", "deflate"] + CompressionAlgorithms []string `mapstructure:"compression_algorithms"` } // ToListener creates a net.Listener. @@ -345,14 +371,18 @@ func (hss *ServerConfig) ToServer(_ context.Context, host component.Host, settin hss.MaxRequestBodySize = defaultMaxRequestBodySize } - handler = httpContentDecompressor(handler, hss.MaxRequestBodySize, serverOpts.errHandler, serverOpts.decoders) + if hss.CompressionAlgorithms == nil { + hss.CompressionAlgorithms = defaultCompressionAlgorithms + } + + handler = httpContentDecompressor(handler, hss.MaxRequestBodySize, serverOpts.errHandler, hss.CompressionAlgorithms, serverOpts.decoders) if hss.MaxRequestBodySize > 0 { handler = maxRequestBodySizeInterceptor(handler, hss.MaxRequestBodySize) } if hss.Auth != nil { - server, err := hss.Auth.GetServerAuthenticator(host.GetExtensions()) + server, err := hss.Auth.GetServerAuthenticatorContext(context.Background(), host.GetExtensions()) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/collector/config/configtls/README.md b/vendor/go.opentelemetry.io/collector/config/configtls/README.md index aecfc284b41..cdc8d31663b 100644 --- a/vendor/go.opentelemetry.io/collector/config/configtls/README.md +++ b/vendor/go.opentelemetry.io/collector/config/configtls/README.md @@ -122,6 +122,7 @@ Beyond TLS configuration, the following setting can optionally be configured client certificate. (optional) This sets the ClientCAs and ClientAuth to RequireAndVerifyClientCert in the TLSConfig. Please refer to https://godoc.org/crypto/tls#Config for more information. +- `client_ca_file_reload` (default = false): Reload the ClientCAs file when it is modified. Example: diff --git a/vendor/go.opentelemetry.io/collector/confmap/README.md b/vendor/go.opentelemetry.io/collector/confmap/README.md index 42dcf548d89..bfa24c8bcc0 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/README.md +++ b/vendor/go.opentelemetry.io/collector/confmap/README.md @@ -102,3 +102,60 @@ configuration retrieved via the `Provider` used to retrieve the “initial” co ``` The `Resolver` does that by passing an `onChange` func to each `Provider.Retrieve` call and capturing all watch events. + +## Troubleshooting + +### Null Maps + +Due to how our underlying merge library, [koanf](https://github.com/knadh/koanf), behaves, configuration resolution +will treat configuration such as + +```yaml +processors: +``` + +as null, which is a valid value. As a result if you have configuration `A`: + +```yaml +receivers: + nop: + +processors: + nop: + +exporters: + nop: + +extensions: + nop: + +service: + extensions: [nop] + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [nop] +``` + +and configuration `B`: + +```yaml +processors: +``` + +and do `./otelcorecol --config A.yaml --config B.yaml` + +The result will be an error: + +``` +Error: invalid configuration: service::pipelines::traces: references processor "nop" which is not configured +2024/06/10 14:37:14 collector server run finished with error: invalid configuration: service::pipelines::traces: references processor "nop" which is not configured +``` + +This happens because configuration `B` sets `processors` to null, removing the `nop` processor defined in configuration `A`, +so the `nop` processor referenced in configuration `A`'s pipeline no longer exists. + +This situation can be remedied 2 ways: +1. Use `{}` when you want to represent an empty map, such as `processors: {}` instead of `processors:`. +2. Omit configuration like `processors:` from your configuration. diff --git a/vendor/go.opentelemetry.io/collector/confmap/confmap.go b/vendor/go.opentelemetry.io/collector/confmap/confmap.go index 8a32216a2df..f24d12de17f 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/confmap.go +++ b/vendor/go.opentelemetry.io/collector/confmap/confmap.go @@ -16,6 +16,7 @@ import ( "github.com/knadh/koanf/providers/confmap" "github.com/knadh/koanf/v2" + "go.opentelemetry.io/collector/confmap/internal" encoder "go.opentelemetry.io/collector/confmap/internal/mapstructure" ) @@ -156,7 +157,7 @@ func decodeConfig(m *Conf, result any, errorUnused bool, skipTopLevelUnmarshaler ErrorUnused: errorUnused, Result: result, TagName: "mapstructure", - WeaklyTypedInput: true, + WeaklyTypedInput: !internal.StrictlyTypedInputGate.IsEnabled(), MatchName: caseSensitiveMatchName, DecodeHook: mapstructure.ComposeDecodeHookFunc( expandNilStructPointersHookFunc(), @@ -191,6 +192,7 @@ func decodeConfig(m *Conf, result any, errorUnused bool, skipTopLevelUnmarshaler func encoderConfig(rawVal any) *encoder.EncoderConfig { return &encoder.EncoderConfig{ EncodeHook: mapstructure.ComposeDecodeHookFunc( + encoder.YamlMarshalerHookFunc(), encoder.TextMarshalerHookFunc(), marshalerHookFunc(rawVal), ), diff --git a/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/LICENSE b/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/Makefile b/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/Makefile deleted file mode 100644 index bdd863a203b..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/expand.go b/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/expand.go deleted file mode 100644 index 029b48eaf17..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/expand.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package expandconverter // import "go.opentelemetry.io/collector/confmap/converter/expandconverter" - -import ( - "context" - "fmt" - "os" - "regexp" - - "go.uber.org/zap" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/internal/envvar" -) - -type converter struct { - logger *zap.Logger - - // Record of which env vars we have logged a warning for - loggedDeprecations map[string]struct{} -} - -// NewFactory returns a factory for a confmap.Converter, -// which expands all environment variables for a given confmap.Conf. -func NewFactory() confmap.ConverterFactory { - return confmap.NewConverterFactory(newConverter) -} - -func newConverter(set confmap.ConverterSettings) confmap.Converter { - return converter{ - loggedDeprecations: make(map[string]struct{}), - logger: set.Logger, - } -} - -func (c converter) Convert(_ context.Context, conf *confmap.Conf) error { - var err error - out := make(map[string]any) - for _, k := range conf.AllKeys() { - out[k], err = c.expandStringValues(conf.Get(k)) - if err != nil { - return err - } - } - return conf.Merge(confmap.NewFromStringMap(out)) -} - -func (c converter) expandStringValues(value any) (any, error) { - var err error - switch v := value.(type) { - case string: - return c.expandEnv(v) - case []any: - nslice := make([]any, 0, len(v)) - for _, vint := range v { - var nv any - nv, err = c.expandStringValues(vint) - if err != nil { - return nil, err - } - nslice = append(nslice, nv) - } - return nslice, nil - case map[string]any: - nmap := map[string]any{} - for mk, mv := range v { - nmap[mk], err = c.expandStringValues(mv) - if err != nil { - return nil, err - } - } - return nmap, nil - default: - return v, nil - } -} - -func (c converter) expandEnv(s string) (string, error) { - var err error - res := os.Expand(s, func(str string) string { - // Matches on $VAR style environment variables - // in order to make sure we don't log a warning for ${VAR} - var regex = regexp.MustCompile(fmt.Sprintf(`\$%s`, regexp.QuoteMeta(str))) - if _, exists := c.loggedDeprecations[str]; !exists && regex.MatchString(s) { - msg := fmt.Sprintf("Variable substitution using $VAR will be deprecated in favor of ${VAR} and ${env:VAR}, please update $%s", str) - c.logger.Warn(msg, zap.String("variable", str)) - c.loggedDeprecations[str] = struct{}{} - } - // This allows escaping environment variable substitution via $$, e.g. - // - $FOO will be substituted with env var FOO - // - $$FOO will be replaced with $FOO - // - $$$FOO will be replaced with $ + substituted env var FOO - if str == "$" { - return "$" - } - // For $ENV style environment variables os.Expand returns once it hits a character that isn't an underscore or - // an alphanumeric character - so we cannot detect those malformed environment variables. - // For ${ENV} style variables we can detect those kinds of env var names! - if !envvar.ValidationRegexp.MatchString(str) { - err = fmt.Errorf("environment variable %q has invalid name: must match regex %s", str, envvar.ValidationPattern) - return "" - } - val, exists := os.LookupEnv(str) - if !exists { - c.logger.Warn("Configuration references unset environment variable", zap.String("name", str)) - } else if len(val) == 0 { - c.logger.Info("Configuration references empty environment variable", zap.String("name", str)) - } - return val - }) - return res, err -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/expand.go b/vendor/go.opentelemetry.io/collector/confmap/expand.go index 768395f76fd..efc4bb95911 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/expand.go +++ b/vendor/go.opentelemetry.io/collector/confmap/expand.go @@ -11,6 +11,8 @@ import ( "regexp" "strconv" "strings" + + "go.opentelemetry.io/collector/confmap/internal" ) // schemePattern defines the regexp pattern for scheme names. @@ -111,22 +113,42 @@ func (mr *Resolver) findAndExpandURI(ctx context.Context, input string) (any, bo if uri == input { // If the value is a single URI, then the return value can be anything. // This is the case `foo: ${file:some_extra_config.yml}`. - return mr.expandURI(ctx, input) + ret, err := mr.expandURI(ctx, input) + if err != nil { + return input, false, err + } + + expanded, err := ret.AsRaw() + if err != nil { + return input, false, err + } + return expanded, true, err } - expanded, changed, err := mr.expandURI(ctx, uri) + expanded, err := mr.expandURI(ctx, uri) if err != nil { return input, false, err } - repl, err := toString(expanded) + + var repl string + if internal.StrictlyTypedInputGate.IsEnabled() { + repl, err = expanded.AsString() + } else { + repl, err = toString(expanded) + } if err != nil { return input, false, fmt.Errorf("expanding %v: %w", uri, err) } - return strings.ReplaceAll(input, uri, repl), changed, err + return strings.ReplaceAll(input, uri, repl), true, err } // toString attempts to convert input to a string. -func toString(input any) (string, error) { +func toString(ret *Retrieved) (string, error) { // This list must be kept in sync with checkRawConfType. + input, err := ret.AsRaw() + if err != nil { + return "", err + } + val := reflect.ValueOf(input) switch val.Kind() { case reflect.String: @@ -142,7 +164,7 @@ func toString(input any) (string, error) { } } -func (mr *Resolver) expandURI(ctx context.Context, input string) (any, bool, error) { +func (mr *Resolver) expandURI(ctx context.Context, input string) (*Retrieved, error) { // strip ${ and } uri := input[2 : len(input)-1] @@ -152,19 +174,18 @@ func (mr *Resolver) expandURI(ctx context.Context, input string) (any, bool, err lURI, err := newLocation(uri) if err != nil { - return nil, false, err + return nil, err } if strings.Contains(lURI.opaqueValue, "$") { - return nil, false, fmt.Errorf("the uri %q contains unsupported characters ('$')", lURI.asString()) + return nil, fmt.Errorf("the uri %q contains unsupported characters ('$')", lURI.asString()) } ret, err := mr.retrieveValue(ctx, lURI) if err != nil { - return nil, false, err + return nil, err } mr.closers = append(mr.closers, ret.Close) - val, err := ret.AsRaw() - return val, true, err + return ret, nil } type location struct { diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/envvar/pattern.go b/vendor/go.opentelemetry.io/collector/confmap/internal/envvar/pattern.go deleted file mode 100644 index 6ce639ec275..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/internal/envvar/pattern.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package envvar // import "go.opentelemetry.io/collector/confmap/internal/envvar" - -import "regexp" - -const ValidationPattern = `^[a-zA-Z_][a-zA-Z0-9_]*$` - -var ValidationRegexp = regexp.MustCompile(ValidationPattern) diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/featuregate.go b/vendor/go.opentelemetry.io/collector/confmap/internal/featuregate.go new file mode 100644 index 00000000000..6e9b9ea8745 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/confmap/internal/featuregate.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/confmap/internal" + +import "go.opentelemetry.io/collector/featuregate" + +const StrictlyTypedInputID = "confmap.strictlyTypedInput" + +var StrictlyTypedInputGate = featuregate.GlobalRegistry().MustRegister(StrictlyTypedInputID, + featuregate.StageAlpha, + featuregate.WithRegisterFromVersion("v0.103.0"), + featuregate.WithRegisterDescription("Makes type casting rules during configuration unmarshaling stricter. See https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/rfcs/env-vars.md for more details."), +) diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go b/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go index d0e222e08b6..994ad038fd3 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go +++ b/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go @@ -11,6 +11,7 @@ import ( "strings" "github.com/go-viper/mapstructure/v2" + "gopkg.in/yaml.v3" ) const ( @@ -228,3 +229,35 @@ func TextMarshalerHookFunc() mapstructure.DecodeHookFuncValue { return string(out), nil } } + +// YamlMarshalerHookFunc returns a DecodeHookFuncValue that checks for structs +// that have yaml tags but no mapstructure tags. If found, it will convert the struct +// to map[string]any using the yaml package, which respects the yaml tags. Ultimately, +// this allows mapstructure to later marshal the map[string]any in a generic way. +func YamlMarshalerHookFunc() mapstructure.DecodeHookFuncValue { + return func(from reflect.Value, _ reflect.Value) (any, error) { + if from.Kind() == reflect.Struct { + for i := 0; i < from.NumField(); i++ { + if _, ok := from.Type().Field(i).Tag.Lookup("mapstructure"); ok { + // The struct has at least one mapstructure tag so don't do anything. + return from.Interface(), nil + } + + if _, ok := from.Type().Field(i).Tag.Lookup("yaml"); ok { + // The struct has at least one yaml tag, so convert it to map[string]any using yaml. + yamlBytes, err := yaml.Marshal(from.Interface()) + if err != nil { + return nil, err + } + var m map[string]any + err = yaml.Unmarshal(yamlBytes, &m) + if err != nil { + return nil, err + } + return m, nil + } + } + } + return from.Interface(), nil + } +} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider.go index 192577ed4d8..f774cca3834 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/provider.go +++ b/vendor/go.opentelemetry.io/collector/confmap/provider.go @@ -8,6 +8,7 @@ import ( "fmt" "go.uber.org/zap" + "gopkg.in/yaml.v3" ) // ProviderSettings are the settings to initialize a Provider. @@ -99,10 +100,15 @@ type ChangeEvent struct { type Retrieved struct { rawConf any closeFunc CloseFunc + + stringRepresentation string + isSetString bool } type retrievedSettings struct { - closeFunc CloseFunc + stringRepresentation string + isSetString bool + closeFunc CloseFunc } // RetrievedOption options to customize Retrieved values. @@ -116,6 +122,32 @@ func WithRetrievedClose(closeFunc CloseFunc) RetrievedOption { } } +func withStringRepresentation(stringRepresentation string) RetrievedOption { + return func(settings *retrievedSettings) { + settings.stringRepresentation = stringRepresentation + settings.isSetString = true + } +} + +// NewRetrievedFromYAML returns a new Retrieved instance that contains the deserialized data from the yaml bytes. +// * yamlBytes the yaml bytes that will be deserialized. +// * opts specifies options associated with this Retrieved value, such as CloseFunc. +func NewRetrievedFromYAML(yamlBytes []byte, opts ...RetrievedOption) (*Retrieved, error) { + var rawConf any + if err := yaml.Unmarshal(yamlBytes, &rawConf); err != nil { + return nil, err + } + + switch v := rawConf.(type) { + case string: + opts = append(opts, withStringRepresentation(v)) + case int, int32, int64, float32, float64, bool: + opts = append(opts, withStringRepresentation(string(yamlBytes))) + } + + return NewRetrieved(rawConf, opts...) +} + // NewRetrieved returns a new Retrieved instance that contains the data from the raw deserialized config. // The rawConf can be one of the following types: // - Primitives: int, int32, int64, float32, float64, bool, string; @@ -129,7 +161,12 @@ func NewRetrieved(rawConf any, opts ...RetrievedOption) (*Retrieved, error) { for _, opt := range opts { opt(&set) } - return &Retrieved{rawConf: rawConf, closeFunc: set.closeFunc}, nil + return &Retrieved{ + rawConf: rawConf, + closeFunc: set.closeFunc, + stringRepresentation: set.stringRepresentation, + isSetString: set.isSetString, + }, nil } // AsConf returns the retrieved configuration parsed as a Conf. @@ -152,6 +189,20 @@ func (r *Retrieved) AsRaw() (any, error) { return r.rawConf, nil } +// AsString returns the retrieved configuration as a string. +// If the retrieved configuration is not convertible to a string unambiguously, an error is returned. +// If the retrieved configuration is a string, the string is returned. +// This method is used to resolve ${} references in inline position. +func (r *Retrieved) AsString() (string, error) { + if !r.isSetString { + if str, ok := r.rawConf.(string); ok { + return str, nil + } + return "", fmt.Errorf("retrieved value does not have unambiguous string representation: %v", r.rawConf) + } + return r.stringRepresentation, nil +} + // Close and release any watchers that Provider.Retrieve may have created. // // Should block until all resources are closed, and guarantee that `onChange` is not diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/Makefile b/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/Makefile deleted file mode 100644 index bdd863a203b..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/provider.go deleted file mode 100644 index 50192b5d994..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/provider.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package envprovider // import "go.opentelemetry.io/collector/confmap/provider/envprovider" - -import ( - "context" - "fmt" - "os" - "strings" - - "go.uber.org/zap" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/internal/envvar" - "go.opentelemetry.io/collector/confmap/provider/internal" -) - -const ( - schemeName = "env" -) - -type provider struct { - logger *zap.Logger -} - -// NewFactory returns a factory for a confmap.Provider that reads the configuration from the given environment variable. -// -// This Provider supports "env" scheme, and can be called with a selector: -// `env:NAME_OF_ENVIRONMENT_VARIABLE` -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(ps confmap.ProviderSettings) confmap.Provider { - return &provider{ - logger: ps.Logger, - } -} - -func (emp *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFunc) (*confmap.Retrieved, error) { - if !strings.HasPrefix(uri, schemeName+":") { - return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, schemeName) - } - envVarName := uri[len(schemeName)+1:] - if !envvar.ValidationRegexp.MatchString(envVarName) { - return nil, fmt.Errorf("environment variable %q has invalid name: must match regex %s", envVarName, envvar.ValidationPattern) - - } - val, exists := os.LookupEnv(envVarName) - if !exists { - emp.logger.Warn("Configuration references unset environment variable", zap.String("name", envVarName)) - } else if len(val) == 0 { - emp.logger.Info("Configuration references empty environment variable", zap.String("name", envVarName)) - } - - return internal.NewRetrievedFromYAML([]byte(val)) -} - -func (*provider) Scheme() string { - return schemeName -} - -func (*provider) Shutdown(context.Context) error { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/LICENSE b/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/Makefile b/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/Makefile deleted file mode 100644 index bdd863a203b..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/provider.go deleted file mode 100644 index fe958280cfb..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/provider.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package fileprovider // import "go.opentelemetry.io/collector/confmap/provider/fileprovider" - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal" -) - -const schemeName = "file" - -type provider struct{} - -// NewFactory returns a factory for a confmap.Provider that reads the configuration from a file. -// -// This Provider supports "file" scheme, and can be called with a "uri" that follows: -// -// file-uri = "file:" local-path -// local-path = [ drive-letter ] file-path -// drive-letter = ALPHA ":" -// -// The "file-path" can be relative or absolute, and it can be any OS supported format. -// -// Examples: -// `file:path/to/file` - relative path (unix, windows) -// `file:/path/to/file` - absolute path (unix, windows) -// `file:c:/path/to/file` - absolute path including drive-letter (windows) -// `file:c:\path\to\file` - absolute path including drive-letter (windows) -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(confmap.ProviderSettings) confmap.Provider { - return &provider{} -} - -func (fmp *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFunc) (*confmap.Retrieved, error) { - if !strings.HasPrefix(uri, schemeName+":") { - return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, schemeName) - } - - // Clean the path before using it. - content, err := os.ReadFile(filepath.Clean(uri[len(schemeName)+1:])) - if err != nil { - return nil, fmt.Errorf("unable to read the file %v: %w", uri, err) - } - - return internal.NewRetrievedFromYAML(content) -} - -func (*provider) Scheme() string { - return schemeName -} - -func (*provider) Shutdown(context.Context) error { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/LICENSE b/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/Makefile b/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/Makefile deleted file mode 100644 index bdd863a203b..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/README.md b/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/README.md deleted file mode 100644 index 673fd2ccd2e..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/README.md +++ /dev/null @@ -1,13 +0,0 @@ -What is this new component httpprovider? -- An implementation of `confmap.Provider` for HTTP (httpprovider) allows OTEL Collector the ability to load configuration for itself by fetching and reading config files stored in HTTP servers. - -How this new component httpprovider works? -- It will be called by `confmap.Resolver` to load configurations for OTEL Collector. -- By giving a config URI starting with prefix 'http://', this httpprovider will be used to download config files from given HTTP URIs, and then used the downloaded config files to deploy the OTEL Collector. -- In our code, we check the validity scheme and string pattern of HTTP URIs. And also check if there are any problems on config downloading and config deserialization. - -Expected URI format: -- http://... - -Prerequistes: -- Need to setup a HTTP server ahead, which returns with a config files according to the given URI \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/provider.go deleted file mode 100644 index c47c1df5d99..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/provider.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package httpprovider // import "go.opentelemetry.io/collector/confmap/provider/httpprovider" - -import ( - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider" -) - -// NewFactory returns a factory for a confmap.Provider that reads the configuration from a http server. -// -// This Provider supports "http" scheme. -// -// One example for HTTP URI is: http://localhost:3333/getConfig -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(set confmap.ProviderSettings) confmap.Provider { - return configurablehttpprovider.New(configurablehttpprovider.HTTPScheme, set) -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/LICENSE b/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/Makefile b/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/Makefile deleted file mode 100644 index bdd863a203b..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/README.md b/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/README.md deleted file mode 100644 index 270dd1b23dd..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/README.md +++ /dev/null @@ -1,18 +0,0 @@ -### What is the httpsprovider? - -An implementation of `confmap.Provider` for HTTPS (httpsprovider) allows OTEL Collector to use the HTTPS protocol to -load configuration files stored in web servers. - -Expected URI format: -- https://... - -### Prerequistes - -You need to setup a HTTP server with support to HTTPS. The server must have a certificate that can be validated in the -host running the collector using system root certificates. - -### Configuration - -At this moment, this component only support communicating with servers whose certificate can be verified using the root -CA certificates installed in the system. The process of adding more root CA certificates to the system is operating -system dependent. For Linux, please refer to the `update-ca-trust` command. diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/provider.go deleted file mode 100644 index 579c15babf2..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/provider.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package httpsprovider // import "go.opentelemetry.io/collector/confmap/provider/httpsprovider" - -import ( - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider" -) - -// NewFactory returns a factory for a confmap.Provider that reads the configuration from a https server. -// -// This Provider supports "https" scheme. One example of an HTTPS URI is: https://localhost:3333/getConfig -// -// To add extra CA certificates you need to install certificates in the system pool. This procedure is operating system -// dependent. E.g.: on Linux please refer to the `update-ca-trust` command. -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(set confmap.ProviderSettings) confmap.Provider { - return configurablehttpprovider.New(configurablehttpprovider.HTTPSScheme, set) -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider/provider.go deleted file mode 100644 index c683d3d9a4e..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider/provider.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package configurablehttpprovider // import "go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider" - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "strings" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal" -) - -type SchemeType string - -const ( - HTTPScheme SchemeType = "http" - HTTPSScheme SchemeType = "https" -) - -type provider struct { - scheme SchemeType - caCertPath string // Used for tests - insecureSkipVerify bool // Used for tests -} - -// New returns a new provider that reads the configuration from http server using the configured transport mechanism -// depending on the selected scheme. -// There are two types of transport supported: PlainText (HTTPScheme) and TLS (HTTPSScheme). -// -// One example for http-uri: http://localhost:3333/getConfig -// One example for https-uri: https://localhost:3333/getConfig -// This is used by the http and https external implementations. -func New(scheme SchemeType, _ confmap.ProviderSettings) confmap.Provider { - return &provider{scheme: scheme} -} - -// Create the client based on the type of scheme that was selected. -func (fmp *provider) createClient() (*http.Client, error) { - switch fmp.scheme { - case HTTPScheme: - return &http.Client{}, nil - case HTTPSScheme: - pool, err := x509.SystemCertPool() - - if err != nil { - return nil, fmt.Errorf("unable to create a cert pool: %w", err) - } - - if fmp.caCertPath != "" { - cert, err := os.ReadFile(filepath.Clean(fmp.caCertPath)) - - if err != nil { - return nil, fmt.Errorf("unable to read CA from %q URI: %w", fmp.caCertPath, err) - } - - if ok := pool.AppendCertsFromPEM(cert); !ok { - return nil, fmt.Errorf("unable to add CA from uri: %s into the cert pool", fmp.caCertPath) - } - } - - return &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: fmp.insecureSkipVerify, - RootCAs: pool, - }, - }, - }, nil - default: - return nil, fmt.Errorf("invalid scheme type: %s", fmp.scheme) - } -} - -func (fmp *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFunc) (*confmap.Retrieved, error) { - - if !strings.HasPrefix(uri, string(fmp.scheme)+":") { - return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, string(fmp.scheme)) - } - - client, err := fmp.createClient() - - if err != nil { - return nil, fmt.Errorf("unable to configure http transport layer: %w", err) - } - - // send a HTTP GET request - resp, err := client.Get(uri) - if err != nil { - return nil, fmt.Errorf("unable to download the file via HTTP GET for uri %q: %w ", uri, err) - } - defer resp.Body.Close() - - // check the HTTP status code - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("failed to load resource from uri %q. status code: %d", uri, resp.StatusCode) - } - - // read the response body - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("fail to read the response body from uri %q: %w", uri, err) - } - - return internal.NewRetrievedFromYAML(body) -} - -func (fmp *provider) Scheme() string { - return string(fmp.scheme) -} - -func (*provider) Shutdown(context.Context) error { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/internal/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/internal/provider.go deleted file mode 100644 index 5a378997529..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/internal/provider.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/collector/confmap/provider/internal" - -import ( - "gopkg.in/yaml.v3" - - "go.opentelemetry.io/collector/confmap" -) - -// NewRetrievedFromYAML returns a new Retrieved instance that contains the deserialized data from the yaml bytes. -// * yamlBytes the yaml bytes that will be deserialized. -// * opts specifies options associated with this Retrieved value, such as CloseFunc. -func NewRetrievedFromYAML(yamlBytes []byte, opts ...confmap.RetrievedOption) (*confmap.Retrieved, error) { - var rawConf any - if err := yaml.Unmarshal(yamlBytes, &rawConf); err != nil { - return nil, err - } - return confmap.NewRetrieved(rawConf, opts...) -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/LICENSE b/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/Makefile b/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/Makefile deleted file mode 100644 index bdd863a203b..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/provider.go deleted file mode 100644 index 949fc71bf7d..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/provider.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package yamlprovider // import "go.opentelemetry.io/collector/confmap/provider/yamlprovider" - -import ( - "context" - "fmt" - "strings" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal" -) - -const schemeName = "yaml" - -type provider struct{} - -// NewFactory returns a factory for a confmap.Provider that allows to provide yaml bytes. -// -// This Provider supports "yaml" scheme, and can be called with a "uri" that follows: -// -// bytes-uri = "yaml:" yaml-bytes -// -// Examples: -// `yaml:processors::batch::timeout: 2s` -// `yaml:processors::batch/foo::timeout: 3s` -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(confmap.ProviderSettings) confmap.Provider { - return &provider{} -} - -func (s *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFunc) (*confmap.Retrieved, error) { - if !strings.HasPrefix(uri, schemeName+":") { - return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, schemeName) - } - - return internal.NewRetrievedFromYAML([]byte(uri[len(schemeName)+1:])) -} - -func (*provider) Scheme() string { - return schemeName -} - -func (s *provider) Shutdown(context.Context) error { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/connector/connector.go b/vendor/go.opentelemetry.io/collector/connector/connector.go index 94927092117..48f2e6b089d 100644 --- a/vendor/go.opentelemetry.io/collector/connector/connector.go +++ b/vendor/go.opentelemetry.io/collector/connector/connector.go @@ -67,7 +67,12 @@ type Logs interface { } // CreateSettings configures Connector creators. -type CreateSettings struct { +// +// Deprecated: [v0.103.0] Use connector.Settings instead. +type CreateSettings = Settings + +// Settings configures Connector creators. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -93,17 +98,17 @@ type Factory interface { // tests of any implementation of the Factory interface. CreateDefaultConfig() component.Config - CreateTracesToTraces(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) - CreateTracesToMetrics(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Traces, error) - CreateTracesToLogs(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Traces, error) + CreateTracesToTraces(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) + CreateTracesToMetrics(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Metrics) (Traces, error) + CreateTracesToLogs(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Logs) (Traces, error) - CreateMetricsToTraces(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Metrics, error) - CreateMetricsToMetrics(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Metrics, error) - CreateMetricsToLogs(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Metrics, error) + CreateMetricsToTraces(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Traces) (Metrics, error) + CreateMetricsToMetrics(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Metrics) (Metrics, error) + CreateMetricsToLogs(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Logs) (Metrics, error) - CreateLogsToTraces(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Logs, error) - CreateLogsToMetrics(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Logs, error) - CreateLogsToLogs(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Logs, error) + CreateLogsToTraces(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Traces) (Logs, error) + CreateLogsToMetrics(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Metrics) (Logs, error) + CreateLogsToLogs(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Logs) (Logs, error) TracesToTracesStability() component.StabilityLevel TracesToMetricsStability() component.StabilityLevel @@ -136,12 +141,12 @@ func (f factoryOptionFunc) apply(o *factory) { } // CreateTracesToTracesFunc is the equivalent of Factory.CreateTracesToTraces(). -type CreateTracesToTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Traces, error) +type CreateTracesToTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Traces, error) // CreateTracesToTraces implements Factory.CreateTracesToTraces(). func (f CreateTracesToTracesFunc) CreateTracesToTraces( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) { if f == nil { @@ -151,12 +156,12 @@ func (f CreateTracesToTracesFunc) CreateTracesToTraces( } // CreateTracesToMetricsFunc is the equivalent of Factory.CreateTracesToMetrics(). -type CreateTracesToMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Traces, error) +type CreateTracesToMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Traces, error) // CreateTracesToMetrics implements Factory.CreateTracesToMetrics(). func (f CreateTracesToMetricsFunc) CreateTracesToMetrics( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (Traces, error) { @@ -167,12 +172,12 @@ func (f CreateTracesToMetricsFunc) CreateTracesToMetrics( } // CreateTracesToLogsFunc is the equivalent of Factory.CreateTracesToLogs(). -type CreateTracesToLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Traces, error) +type CreateTracesToLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Traces, error) // CreateTracesToLogs implements Factory.CreateTracesToLogs(). func (f CreateTracesToLogsFunc) CreateTracesToLogs( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Logs, ) (Traces, error) { @@ -183,12 +188,12 @@ func (f CreateTracesToLogsFunc) CreateTracesToLogs( } // CreateMetricsToTracesFunc is the equivalent of Factory.CreateMetricsToTraces(). -type CreateMetricsToTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Metrics, error) +type CreateMetricsToTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Metrics, error) // CreateMetricsToTraces implements Factory.CreateMetricsToTraces(). func (f CreateMetricsToTracesFunc) CreateMetricsToTraces( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Traces, ) (Metrics, error) { @@ -199,12 +204,12 @@ func (f CreateMetricsToTracesFunc) CreateMetricsToTraces( } // CreateMetricsToMetricsFunc is the equivalent of Factory.CreateMetricsToTraces(). -type CreateMetricsToMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Metrics, error) +type CreateMetricsToMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Metrics, error) // CreateMetricsToMetrics implements Factory.CreateMetricsToTraces(). func (f CreateMetricsToMetricsFunc) CreateMetricsToMetrics( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (Metrics, error) { @@ -215,12 +220,12 @@ func (f CreateMetricsToMetricsFunc) CreateMetricsToMetrics( } // CreateMetricsToLogsFunc is the equivalent of Factory.CreateMetricsToLogs(). -type CreateMetricsToLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Metrics, error) +type CreateMetricsToLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Metrics, error) // CreateMetricsToLogs implements Factory.CreateMetricsToLogs(). func (f CreateMetricsToLogsFunc) CreateMetricsToLogs( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Logs, ) (Metrics, error) { @@ -231,12 +236,12 @@ func (f CreateMetricsToLogsFunc) CreateMetricsToLogs( } // CreateLogsToTracesFunc is the equivalent of Factory.CreateLogsToTraces(). -type CreateLogsToTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Logs, error) +type CreateLogsToTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Logs, error) // CreateLogsToTraces implements Factory.CreateLogsToTraces(). func (f CreateLogsToTracesFunc) CreateLogsToTraces( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Traces, ) (Logs, error) { @@ -247,12 +252,12 @@ func (f CreateLogsToTracesFunc) CreateLogsToTraces( } // CreateLogsToMetricsFunc is the equivalent of Factory.CreateLogsToMetrics(). -type CreateLogsToMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Logs, error) +type CreateLogsToMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Logs, error) // CreateLogsToMetrics implements Factory.CreateLogsToMetrics(). func (f CreateLogsToMetricsFunc) CreateLogsToMetrics( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (Logs, error) { @@ -263,12 +268,12 @@ func (f CreateLogsToMetricsFunc) CreateLogsToMetrics( } // CreateLogsToLogsFunc is the equivalent of Factory.CreateLogsToLogs(). -type CreateLogsToLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Logs, error) +type CreateLogsToLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Logs, error) // CreateLogsToLogs implements Factory.CreateLogsToLogs(). func (f CreateLogsToLogsFunc) CreateLogsToLogs( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Logs, ) (Logs, error) { @@ -460,7 +465,7 @@ func NewBuilder(cfgs map[component.ID]component.Config, factories map[component. } // CreateTracesToTraces creates a Traces connector based on the settings and config. -func (b *Builder) CreateTracesToTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Traces, error) { +func (b *Builder) CreateTracesToTraces(ctx context.Context, set Settings, next consumer.Traces) (Traces, error) { if next == nil { return nil, errNilNextConsumer } @@ -479,7 +484,7 @@ func (b *Builder) CreateTracesToTraces(ctx context.Context, set CreateSettings, } // CreateTracesToMetrics creates a Traces connector based on the settings and config. -func (b *Builder) CreateTracesToMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Traces, error) { +func (b *Builder) CreateTracesToMetrics(ctx context.Context, set Settings, next consumer.Metrics) (Traces, error) { if next == nil { return nil, errNilNextConsumer } @@ -498,7 +503,7 @@ func (b *Builder) CreateTracesToMetrics(ctx context.Context, set CreateSettings, } // CreateTracesToLogs creates a Traces connector based on the settings and config. -func (b *Builder) CreateTracesToLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Traces, error) { +func (b *Builder) CreateTracesToLogs(ctx context.Context, set Settings, next consumer.Logs) (Traces, error) { if next == nil { return nil, errNilNextConsumer } @@ -517,7 +522,7 @@ func (b *Builder) CreateTracesToLogs(ctx context.Context, set CreateSettings, ne } // CreateMetricsToTraces creates a Metrics connector based on the settings and config. -func (b *Builder) CreateMetricsToTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Metrics, error) { +func (b *Builder) CreateMetricsToTraces(ctx context.Context, set Settings, next consumer.Traces) (Metrics, error) { if next == nil { return nil, errNilNextConsumer } @@ -536,7 +541,7 @@ func (b *Builder) CreateMetricsToTraces(ctx context.Context, set CreateSettings, } // CreateMetricsToMetrics creates a Metrics connector based on the settings and config. -func (b *Builder) CreateMetricsToMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Metrics, error) { +func (b *Builder) CreateMetricsToMetrics(ctx context.Context, set Settings, next consumer.Metrics) (Metrics, error) { if next == nil { return nil, errNilNextConsumer } @@ -555,7 +560,7 @@ func (b *Builder) CreateMetricsToMetrics(ctx context.Context, set CreateSettings } // CreateMetricsToLogs creates a Metrics connector based on the settings and config. -func (b *Builder) CreateMetricsToLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Metrics, error) { +func (b *Builder) CreateMetricsToLogs(ctx context.Context, set Settings, next consumer.Logs) (Metrics, error) { if next == nil { return nil, errNilNextConsumer } @@ -574,7 +579,7 @@ func (b *Builder) CreateMetricsToLogs(ctx context.Context, set CreateSettings, n } // CreateLogsToTraces creates a Logs connector based on the settings and config. -func (b *Builder) CreateLogsToTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Logs, error) { +func (b *Builder) CreateLogsToTraces(ctx context.Context, set Settings, next consumer.Traces) (Logs, error) { if next == nil { return nil, errNilNextConsumer } @@ -593,7 +598,7 @@ func (b *Builder) CreateLogsToTraces(ctx context.Context, set CreateSettings, ne } // CreateLogsToMetrics creates a Logs connector based on the settings and config. -func (b *Builder) CreateLogsToMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Logs, error) { +func (b *Builder) CreateLogsToMetrics(ctx context.Context, set Settings, next consumer.Metrics) (Logs, error) { if next == nil { return nil, errNilNextConsumer } @@ -612,7 +617,7 @@ func (b *Builder) CreateLogsToMetrics(ctx context.Context, set CreateSettings, n } // CreateLogsToLogs creates a Logs connector based on the settings and config. -func (b *Builder) CreateLogsToLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Logs, error) { +func (b *Builder) CreateLogsToLogs(ctx context.Context, set Settings, next consumer.Logs) (Logs, error) { if next == nil { return nil, errNilNextConsumer } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporter.go b/vendor/go.opentelemetry.io/collector/exporter/exporter.go index 818fa3ecac3..d78e952dd7a 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporter.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporter.go @@ -31,8 +31,13 @@ type Logs interface { consumer.Logs } -// CreateSettings configures exporter creators. -type CreateSettings struct { +// CreateSettings configures Exporter creators. +// +// Deprecated: [v0.103.0] Use exporter.Settings instead. +type CreateSettings = Settings + +// Settings configures exporter creators. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -52,7 +57,7 @@ type Factory interface { // CreateTracesExporter creates a TracesExporter based on this config. // If the exporter type does not support tracing or if the config is not valid, // an error will be returned instead. - CreateTracesExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Traces, error) + CreateTracesExporter(ctx context.Context, set Settings, cfg component.Config) (Traces, error) // TracesExporterStability gets the stability level of the TracesExporter. TracesExporterStability() component.StabilityLevel @@ -60,7 +65,7 @@ type Factory interface { // CreateMetricsExporter creates a MetricsExporter based on this config. // If the exporter type does not support metrics or if the config is not valid, // an error will be returned instead. - CreateMetricsExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Metrics, error) + CreateMetricsExporter(ctx context.Context, set Settings, cfg component.Config) (Metrics, error) // MetricsExporterStability gets the stability level of the MetricsExporter. MetricsExporterStability() component.StabilityLevel @@ -68,7 +73,7 @@ type Factory interface { // CreateLogsExporter creates a LogsExporter based on the config. // If the exporter type does not support logs or if the config is not valid, // an error will be returned instead. - CreateLogsExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Logs, error) + CreateLogsExporter(ctx context.Context, set Settings, cfg component.Config) (Logs, error) // LogsExporterStability gets the stability level of the LogsExporter. LogsExporterStability() component.StabilityLevel @@ -92,10 +97,10 @@ func (f factoryOptionFunc) applyExporterFactoryOption(o *factory) { } // CreateTracesFunc is the equivalent of Factory.CreateTraces. -type CreateTracesFunc func(context.Context, CreateSettings, component.Config) (Traces, error) +type CreateTracesFunc func(context.Context, Settings, component.Config) (Traces, error) // CreateTracesExporter implements ExporterFactory.CreateTracesExporter(). -func (f CreateTracesFunc) CreateTracesExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Traces, error) { +func (f CreateTracesFunc) CreateTracesExporter(ctx context.Context, set Settings, cfg component.Config) (Traces, error) { if f == nil { return nil, component.ErrDataTypeIsNotSupported } @@ -103,10 +108,10 @@ func (f CreateTracesFunc) CreateTracesExporter(ctx context.Context, set CreateSe } // CreateMetricsFunc is the equivalent of Factory.CreateMetrics. -type CreateMetricsFunc func(context.Context, CreateSettings, component.Config) (Metrics, error) +type CreateMetricsFunc func(context.Context, Settings, component.Config) (Metrics, error) // CreateMetricsExporter implements ExporterFactory.CreateMetricsExporter(). -func (f CreateMetricsFunc) CreateMetricsExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Metrics, error) { +func (f CreateMetricsFunc) CreateMetricsExporter(ctx context.Context, set Settings, cfg component.Config) (Metrics, error) { if f == nil { return nil, component.ErrDataTypeIsNotSupported } @@ -114,10 +119,10 @@ func (f CreateMetricsFunc) CreateMetricsExporter(ctx context.Context, set Create } // CreateLogsFunc is the equivalent of Factory.CreateLogs. -type CreateLogsFunc func(context.Context, CreateSettings, component.Config) (Logs, error) +type CreateLogsFunc func(context.Context, Settings, component.Config) (Logs, error) // CreateLogsExporter implements Factory.CreateLogsExporter(). -func (f CreateLogsFunc) CreateLogsExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Logs, error) { +func (f CreateLogsFunc) CreateLogsExporter(ctx context.Context, set Settings, cfg component.Config) (Logs, error) { if f == nil { return nil, component.ErrDataTypeIsNotSupported } @@ -214,7 +219,7 @@ func NewBuilder(cfgs map[component.ID]component.Config, factories map[component. } // CreateTraces creates a Traces exporter based on the settings and config. -func (b *Builder) CreateTraces(ctx context.Context, set CreateSettings) (Traces, error) { +func (b *Builder) CreateTraces(ctx context.Context, set Settings) (Traces, error) { cfg, existsCfg := b.cfgs[set.ID] if !existsCfg { return nil, fmt.Errorf("exporter %q is not configured", set.ID) @@ -230,7 +235,7 @@ func (b *Builder) CreateTraces(ctx context.Context, set CreateSettings) (Traces, } // CreateMetrics creates a Metrics exporter based on the settings and config. -func (b *Builder) CreateMetrics(ctx context.Context, set CreateSettings) (Metrics, error) { +func (b *Builder) CreateMetrics(ctx context.Context, set Settings) (Metrics, error) { cfg, existsCfg := b.cfgs[set.ID] if !existsCfg { return nil, fmt.Errorf("exporter %q is not configured", set.ID) @@ -246,7 +251,7 @@ func (b *Builder) CreateMetrics(ctx context.Context, set CreateSettings) (Metric } // CreateLogs creates a Logs exporter based on the settings and config. -func (b *Builder) CreateLogs(ctx context.Context, set CreateSettings) (Logs, error) { +func (b *Builder) CreateLogs(ctx context.Context, set Settings) (Logs, error) { cfg, existsCfg := b.cfgs[set.ID] if !existsCfg { return nil, fmt.Errorf("exporter %q is not configured", set.ID) diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/batch_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/batch_sender.go index 086c9724aa3..4d9635195e2 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/batch_sender.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/batch_sender.go @@ -30,13 +30,12 @@ type batchSender struct { // concurrencyLimit is the maximum number of goroutines that can be blocked by the batcher. // If this number is reached and all the goroutines are busy, the batch will be sent right away. // Populated from the number of queue consumers if queue is enabled. - concurrencyLimit uint64 - activeRequests atomic.Uint64 - - resetTimerCh chan struct{} + concurrencyLimit int64 + activeRequests atomic.Int64 mu sync.Mutex activeBatch *batch + lastFlushed time.Time logger *zap.Logger @@ -46,7 +45,7 @@ type batchSender struct { } // newBatchSender returns a new batch consumer component. -func newBatchSender(cfg exporterbatcher.Config, set exporter.CreateSettings, +func newBatchSender(cfg exporterbatcher.Config, set exporter.Settings, mf exporterbatcher.BatchMergeFunc[Request], msf exporterbatcher.BatchMergeSplitFunc[Request]) *batchSender { bs := &batchSender{ activeBatch: newEmptyBatch(), @@ -54,15 +53,15 @@ func newBatchSender(cfg exporterbatcher.Config, set exporter.CreateSettings, logger: set.Logger, mergeFunc: mf, mergeSplitFunc: msf, - shutdownCh: make(chan struct{}), + shutdownCh: nil, shutdownCompleteCh: make(chan struct{}), stopped: &atomic.Bool{}, - resetTimerCh: make(chan struct{}), } return bs } func (bs *batchSender) Start(_ context.Context, _ component.Host) error { + bs.shutdownCh = make(chan struct{}) timer := time.NewTimer(bs.cfg.FlushTimeout) go func() { for { @@ -84,16 +83,17 @@ func (bs *batchSender) Start(_ context.Context, _ component.Host) error { return case <-timer.C: bs.mu.Lock() + nextFlush := bs.cfg.FlushTimeout if bs.activeBatch.request != nil { - bs.exportActiveBatch() + sinceLastFlush := time.Since(bs.lastFlushed) + if sinceLastFlush >= bs.cfg.FlushTimeout { + bs.exportActiveBatch() + } else { + nextFlush = bs.cfg.FlushTimeout - sinceLastFlush + } } bs.mu.Unlock() - timer.Reset(bs.cfg.FlushTimeout) - case <-bs.resetTimerCh: - if !timer.Stop() { - <-timer.C - } - timer.Reset(bs.cfg.FlushTimeout) + timer.Reset(nextFlush) } } }() @@ -106,6 +106,10 @@ type batch struct { request Request done chan struct{} err error + + // requestsBlocked is the number of requests blocked in this batch + // that can be immediately released from activeRequests when batch sending completes. + requestsBlocked int64 } func newEmptyBatch() *batch { @@ -119,18 +123,14 @@ func newEmptyBatch() *batch { // Caller must hold the lock. func (bs *batchSender) exportActiveBatch() { go func(b *batch) { - b.err = b.request.Export(b.ctx) + b.err = bs.nextSender.send(b.ctx, b.request) close(b.done) + bs.activeRequests.Add(-b.requestsBlocked) }(bs.activeBatch) + bs.lastFlushed = time.Now() bs.activeBatch = newEmptyBatch() } -func (bs *batchSender) resetTimer() { - if !bs.stopped.Load() { - bs.resetTimerCh <- struct{}{} - } -} - // isActiveBatchReady returns true if the active batch is ready to be exported. // The batch is ready if it has reached the minimum size or the concurrency limit is reached. // Caller must hold the lock. @@ -154,20 +154,25 @@ func (bs *batchSender) send(ctx context.Context, req Request) error { // sendMergeSplitBatch sends the request to the batch which may be split into multiple requests. func (bs *batchSender) sendMergeSplitBatch(ctx context.Context, req Request) error { bs.mu.Lock() - bs.activeRequests.Add(1) - defer bs.activeRequests.Add(^uint64(0)) reqs, err := bs.mergeSplitFunc(ctx, bs.cfg.MaxSizeConfig, bs.activeBatch.request, req) if err != nil || len(reqs) == 0 { bs.mu.Unlock() return err } + + bs.activeRequests.Add(1) + if len(reqs) == 1 { + bs.activeBatch.requestsBlocked++ + } else { + // if there was a split, we want to make sure that bs.activeRequests is released once all of the parts are sent instead of using batch.requestsBlocked + defer bs.activeRequests.Add(-1) + } if len(reqs) == 1 || bs.activeBatch.request != nil { bs.updateActiveBatch(ctx, reqs[0]) batch := bs.activeBatch if bs.isActiveBatchReady() || len(reqs) > 1 { bs.exportActiveBatch() - bs.resetTimer() } bs.mu.Unlock() <-batch.done @@ -182,7 +187,7 @@ func (bs *batchSender) sendMergeSplitBatch(ctx context.Context, req Request) err // Intentionally do not put the last request in the active batch to not block it. // TODO: Consider including the partial request in the error to avoid double publishing. for _, r := range reqs { - if err := r.Export(ctx); err != nil { + if err := bs.nextSender.send(ctx, r); err != nil { return err } } @@ -192,8 +197,6 @@ func (bs *batchSender) sendMergeSplitBatch(ctx context.Context, req Request) err // sendMergeBatch sends the request to the batch and waits for the batch to be exported. func (bs *batchSender) sendMergeBatch(ctx context.Context, req Request) error { bs.mu.Lock() - bs.activeRequests.Add(1) - defer bs.activeRequests.Add(^uint64(0)) if bs.activeBatch.request != nil { var err error @@ -203,11 +206,13 @@ func (bs *batchSender) sendMergeBatch(ctx context.Context, req Request) error { return err } } + + bs.activeRequests.Add(1) bs.updateActiveBatch(ctx, req) batch := bs.activeBatch + batch.requestsBlocked++ if bs.isActiveBatchReady() { bs.exportActiveBatch() - bs.resetTimer() } bs.mu.Unlock() <-batch.done @@ -227,7 +232,9 @@ func (bs *batchSender) updateActiveBatch(ctx context.Context, req Request) { func (bs *batchSender) Shutdown(context.Context) error { bs.stopped.Store(true) - close(bs.shutdownCh) - <-bs.shutdownCompleteCh + if bs.shutdownCh != nil { + close(bs.shutdownCh) + <-bs.shutdownCompleteCh + } return nil } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/common.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/common.go index aa84e9019f2..4016c14e7c9 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/common.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/common.go @@ -110,7 +110,7 @@ func WithQueue(config QueueSettings) Option { NumConsumers: config.NumConsumers, QueueSize: config.QueueSize, }) - o.queueSender = newQueueSender(q, o.set, config.NumConsumers, o.exportFailureMessage) + o.queueSender = newQueueSender(q, o.set, config.NumConsumers, o.exportFailureMessage, o.obsrep.telemetryBuilder) return nil } } @@ -132,7 +132,7 @@ func WithRequestQueue(cfg exporterqueue.Config, queueFactory exporterqueue.Facto DataType: o.signal, ExporterSettings: o.set, } - o.queueSender = newQueueSender(queueFactory(context.Background(), set, cfg), o.set, cfg.NumConsumers, o.exportFailureMessage) + o.queueSender = newQueueSender(queueFactory(context.Background(), set, cfg), o.set, cfg.NumConsumers, o.exportFailureMessage, o.obsrep.telemetryBuilder) return nil } } @@ -231,7 +231,7 @@ type baseExporter struct { marshaler exporterqueue.Marshaler[Request] unmarshaler exporterqueue.Unmarshaler[Request] - set exporter.CreateSettings + set exporter.Settings obsrep *ObsReport // Message for the user to be added with an export failure message. @@ -249,7 +249,7 @@ type baseExporter struct { consumerOptions []consumer.Option } -func newBaseExporter(set exporter.CreateSettings, signal component.DataType, osf obsrepSenderFactory, options ...Option) (*baseExporter, error) { +func newBaseExporter(set exporter.Settings, signal component.DataType, osf obsrepSenderFactory, options ...Option) (*baseExporter, error) { obsReport, err := NewObsReport(ObsReportSettings{ExporterID: set.ID, ExporterCreateSettings: set}) if err != nil { return nil, err @@ -280,7 +280,7 @@ func newBaseExporter(set exporter.CreateSettings, signal component.DataType, osf if bs, ok := be.batchSender.(*batchSender); ok { // If queue sender is enabled assign to the batch sender the same number of workers. if qs, ok := be.queueSender.(*queueSender); ok { - bs.concurrencyLimit = uint64(qs.numConsumers) + bs.concurrencyLimit = int64(qs.numConsumers) } // Batcher sender mutates the data. be.consumerOptions = append(be.consumerOptions, consumer.WithCapabilities(consumer.Capabilities{MutatesData: true})) diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md index ac974e01dd2..df11c1af81e 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md @@ -30,6 +30,22 @@ Number of spans failed to be added to the sending queue. | ---- | ----------- | ---------- | --------- | | 1 | Sum | Int | true | +### exporter_queue_capacity + +Fixed capacity of the retry queue (in batches) + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### exporter_queue_size + +Current size of the retry queue (in batches) + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + ### exporter_send_failed_log_records Number of log records in failed attempts to send to destination. diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go index 481cc2b27ec..4383809885e 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go @@ -3,8 +3,10 @@ package metadata import ( + "context" "errors" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/trace" @@ -24,9 +26,12 @@ func Tracer(settings component.TelemetrySettings) trace.Tracer { // TelemetryBuilder provides an interface for components to report telemetry // as defined in metadata and user config. type TelemetryBuilder struct { + meter metric.Meter ExporterEnqueueFailedLogRecords metric.Int64Counter ExporterEnqueueFailedMetricPoints metric.Int64Counter ExporterEnqueueFailedSpans metric.Int64Counter + ExporterQueueCapacity metric.Int64ObservableGauge + ExporterQueueSize metric.Int64ObservableGauge ExporterSendFailedLogRecords metric.Int64Counter ExporterSendFailedMetricPoints metric.Int64Counter ExporterSendFailedSpans metric.Int64Counter @@ -34,6 +39,7 @@ type TelemetryBuilder struct { ExporterSentMetricPoints metric.Int64Counter ExporterSentSpans metric.Int64Counter level configtelemetry.Level + attributeSet attribute.Set } // telemetryBuilderOption applies changes to default builder. @@ -46,6 +52,43 @@ func WithLevel(lvl configtelemetry.Level) telemetryBuilderOption { } } +// WithAttributeSet applies a set of attributes for asynchronous instruments. +func WithAttributeSet(set attribute.Set) telemetryBuilderOption { + return func(builder *TelemetryBuilder) { + builder.attributeSet = set + } +} + +// InitExporterQueueCapacity configures the ExporterQueueCapacity metric. +func (builder *TelemetryBuilder) InitExporterQueueCapacity(cb func() int64) error { + var err error + builder.ExporterQueueCapacity, err = builder.meter.Int64ObservableGauge( + "exporter_queue_capacity", + metric.WithDescription("Fixed capacity of the retry queue (in batches)"), + metric.WithUnit("1"), + metric.WithInt64Callback(func(_ context.Context, o metric.Int64Observer) error { + o.Observe(cb(), metric.WithAttributeSet(builder.attributeSet)) + return nil + }), + ) + return err +} + +// InitExporterQueueSize configures the ExporterQueueSize metric. +func (builder *TelemetryBuilder) InitExporterQueueSize(cb func() int64) error { + var err error + builder.ExporterQueueSize, err = builder.meter.Int64ObservableGauge( + "exporter_queue_size", + metric.WithDescription("Current size of the retry queue (in batches)"), + metric.WithUnit("1"), + metric.WithInt64Callback(func(_ context.Context, o metric.Int64Observer) error { + o.Observe(cb(), metric.WithAttributeSet(builder.attributeSet)) + return nil + }), + ) + return err +} + // NewTelemetryBuilder provides a struct with methods to update all internal telemetry // for a component func NewTelemetryBuilder(settings component.TelemetrySettings, options ...telemetryBuilderOption) (*TelemetryBuilder, error) { @@ -53,64 +96,61 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...teleme for _, op := range options { op(&builder) } - var ( - err, errs error - meter metric.Meter - ) + var err, errs error if builder.level >= configtelemetry.LevelBasic { - meter = Meter(settings) + builder.meter = Meter(settings) } else { - meter = noop.Meter{} + builder.meter = noop.Meter{} } - builder.ExporterEnqueueFailedLogRecords, err = meter.Int64Counter( + builder.ExporterEnqueueFailedLogRecords, err = builder.meter.Int64Counter( "exporter_enqueue_failed_log_records", metric.WithDescription("Number of log records failed to be added to the sending queue."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ExporterEnqueueFailedMetricPoints, err = meter.Int64Counter( + builder.ExporterEnqueueFailedMetricPoints, err = builder.meter.Int64Counter( "exporter_enqueue_failed_metric_points", metric.WithDescription("Number of metric points failed to be added to the sending queue."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ExporterEnqueueFailedSpans, err = meter.Int64Counter( + builder.ExporterEnqueueFailedSpans, err = builder.meter.Int64Counter( "exporter_enqueue_failed_spans", metric.WithDescription("Number of spans failed to be added to the sending queue."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ExporterSendFailedLogRecords, err = meter.Int64Counter( + builder.ExporterSendFailedLogRecords, err = builder.meter.Int64Counter( "exporter_send_failed_log_records", metric.WithDescription("Number of log records in failed attempts to send to destination."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ExporterSendFailedMetricPoints, err = meter.Int64Counter( + builder.ExporterSendFailedMetricPoints, err = builder.meter.Int64Counter( "exporter_send_failed_metric_points", metric.WithDescription("Number of metric points in failed attempts to send to destination."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ExporterSendFailedSpans, err = meter.Int64Counter( + builder.ExporterSendFailedSpans, err = builder.meter.Int64Counter( "exporter_send_failed_spans", metric.WithDescription("Number of spans in failed attempts to send to destination."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ExporterSentLogRecords, err = meter.Int64Counter( + builder.ExporterSentLogRecords, err = builder.meter.Int64Counter( "exporter_sent_log_records", metric.WithDescription("Number of log record successfully sent to destination."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ExporterSentMetricPoints, err = meter.Int64Counter( + builder.ExporterSentMetricPoints, err = builder.meter.Int64Counter( "exporter_sent_metric_points", metric.WithDescription("Number of metric points successfully sent to destination."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ExporterSentSpans, err = meter.Int64Counter( + builder.ExporterSentSpans, err = builder.meter.Int64Counter( "exporter_sent_spans", metric.WithDescription("Number of spans successfully sent to destination."), metric.WithUnit("1"), diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs.go index 2706ec7fe92..c8505f5b97c 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs.go @@ -71,7 +71,7 @@ type logsExporter struct { // NewLogsExporter creates an exporter.Logs that records observability metrics and wraps every request with a Span. func NewLogsExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, pusher consumer.ConsumeLogsFunc, options ...Option, @@ -106,7 +106,7 @@ func requestFromLogs(pusher consumer.ConsumeLogsFunc) RequestFromLogsFunc { // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. func NewLogsRequestExporter( _ context.Context, - set exporter.CreateSettings, + set exporter.Settings, converter RequestFromLogsFunc, options ...Option, ) (exporter.Logs, error) { diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml index 26b06a37536..0703902f6f4 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml @@ -80,3 +80,21 @@ telemetry: sum: value_type: int monotonic: true + + exporter_queue_size: + enabled: true + description: Current size of the retry queue (in batches) + unit: 1 + optional: true + gauge: + value_type: int + async: true + + exporter_queue_capacity: + enabled: true + description: Fixed capacity of the retry queue (in batches) + unit: 1 + optional: true + gauge: + value_type: int + async: true diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics.go index 83db18229dd..683dc576f7a 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics.go @@ -71,7 +71,7 @@ type metricsExporter struct { // NewMetricsExporter creates an exporter.Metrics that records observability metrics and wraps every request with a Span. func NewMetricsExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, pusher consumer.ConsumeMetricsFunc, options ...Option, @@ -106,7 +106,7 @@ func requestFromMetrics(pusher consumer.ConsumeMetricsFunc) RequestFromMetricsFu // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. func NewMetricsRequestExporter( _ context.Context, - set exporter.CreateSettings, + set exporter.Settings, converter RequestFromMetricsFunc, options ...Option, ) (exporter.Metrics, error) { diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/obsexporter.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/obsexporter.go index e3a78c34b04..1e4d1179f4d 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/obsexporter.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/obsexporter.go @@ -10,7 +10,6 @@ import ( "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configtelemetry" @@ -24,7 +23,6 @@ type ObsReport struct { level configtelemetry.Level spanNamePrefix string tracer trace.Tracer - logger *zap.Logger otelAttrs []attribute.KeyValue telemetryBuilder *metadata.TelemetryBuilder @@ -33,7 +31,7 @@ type ObsReport struct { // ObsReportSettings are settings for creating an ObsReport. type ObsReportSettings struct { ExporterID component.ID - ExporterCreateSettings exporter.CreateSettings + ExporterCreateSettings exporter.Settings } // NewObsReport creates a new Exporter. @@ -42,7 +40,9 @@ func NewObsReport(cfg ObsReportSettings) (*ObsReport, error) { } func newExporter(cfg ObsReportSettings) (*ObsReport, error) { - telemetryBuilder, err := metadata.NewTelemetryBuilder(cfg.ExporterCreateSettings.TelemetrySettings) + telemetryBuilder, err := metadata.NewTelemetryBuilder(cfg.ExporterCreateSettings.TelemetrySettings, + metadata.WithAttributeSet(attribute.NewSet(attribute.String(obsmetrics.ExporterKey, cfg.ExporterID.String()))), + ) if err != nil { return nil, err } @@ -51,7 +51,6 @@ func newExporter(cfg ObsReportSettings) (*ObsReport, error) { level: cfg.ExporterCreateSettings.TelemetrySettings.MetricsLevel, spanNamePrefix: obsmetrics.ExporterPrefix + cfg.ExporterID.String(), tracer: cfg.ExporterCreateSettings.TracerProvider.Tracer(cfg.ExporterID.String()), - logger: cfg.ExporterCreateSettings.Logger, otelAttrs: []attribute.KeyValue{ attribute.String(obsmetrics.ExporterKey, cfg.ExporterID.String()), @@ -70,7 +69,7 @@ func (or *ObsReport) StartTracesOp(ctx context.Context) context.Context { // EndTracesOp completes the export operation that was started with StartTracesOp. func (or *ObsReport) EndTracesOp(ctx context.Context, numSpans int, err error) { numSent, numFailedToSend := toNumItems(numSpans, err) - or.recordMetrics(noCancellationContext{Context: ctx}, component.DataTypeTraces, numSent, numFailedToSend) + or.recordMetrics(context.WithoutCancel(ctx), component.DataTypeTraces, numSent, numFailedToSend) endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentSpansKey, obsmetrics.FailedToSendSpansKey) } @@ -85,7 +84,7 @@ func (or *ObsReport) StartMetricsOp(ctx context.Context) context.Context { // StartMetricsOp. func (or *ObsReport) EndMetricsOp(ctx context.Context, numMetricPoints int, err error) { numSent, numFailedToSend := toNumItems(numMetricPoints, err) - or.recordMetrics(noCancellationContext{Context: ctx}, component.DataTypeMetrics, numSent, numFailedToSend) + or.recordMetrics(context.WithoutCancel(ctx), component.DataTypeMetrics, numSent, numFailedToSend) endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentMetricPointsKey, obsmetrics.FailedToSendMetricPointsKey) } @@ -99,7 +98,7 @@ func (or *ObsReport) StartLogsOp(ctx context.Context) context.Context { // EndLogsOp completes the export operation that was started with StartLogsOp. func (or *ObsReport) EndLogsOp(ctx context.Context, numLogRecords int, err error) { numSent, numFailedToSend := toNumItems(numLogRecords, err) - or.recordMetrics(noCancellationContext{Context: ctx}, component.DataTypeLogs, numSent, numFailedToSend) + or.recordMetrics(context.WithoutCancel(ctx), component.DataTypeLogs, numSent, numFailedToSend) endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentLogRecordsKey, obsmetrics.FailedToSendLogRecordsKey) } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_sender.go index 3e539d7f9a0..f7817128b39 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_sender.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_sender.go @@ -6,16 +6,15 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporte import ( "context" "errors" - "time" "go.opentelemetry.io/otel/attribute" - otelmetric "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" "go.uber.org/multierr" "go.uber.org/zap" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" @@ -23,10 +22,6 @@ import ( const defaultQueueSize = 1000 -var ( - scopeName = "go.opentelemetry.io/collector/exporterhelper" -) - // QueueSettings defines configuration for queueing batches before sending to the consumerSender. type QueueSettings struct { // Enabled indicates whether to not enqueue batches before sending to the consumerSender. @@ -73,27 +68,21 @@ func (qCfg *QueueSettings) Validate() error { type queueSender struct { baseRequestSender - fullName string queue exporterqueue.Queue[Request] numConsumers int traceAttribute attribute.KeyValue - logger *zap.Logger - meter otelmetric.Meter consumers *queue.Consumers[Request] - metricCapacity otelmetric.Int64ObservableGauge - metricSize otelmetric.Int64ObservableGauge + telemetryBuilder *metadata.TelemetryBuilder } -func newQueueSender(q exporterqueue.Queue[Request], set exporter.CreateSettings, numConsumers int, - exportFailureMessage string) *queueSender { +func newQueueSender(q exporterqueue.Queue[Request], set exporter.Settings, numConsumers int, + exportFailureMessage string, telemetryBuilder *metadata.TelemetryBuilder) *queueSender { qs := &queueSender{ - fullName: set.ID.String(), - queue: q, - numConsumers: numConsumers, - traceAttribute: attribute.String(obsmetrics.ExporterKey, set.ID.String()), - logger: set.TelemetrySettings.Logger, - meter: set.TelemetrySettings.MeterProvider.Meter(scopeName), + queue: q, + numConsumers: numConsumers, + traceAttribute: attribute.String(obsmetrics.ExporterKey, set.ID.String()), + telemetryBuilder: telemetryBuilder, } consumeFunc := func(ctx context.Context, req Request) error { err := qs.nextSender.send(ctx, req) @@ -113,32 +102,10 @@ func (qs *queueSender) Start(ctx context.Context, host component.Host) error { return err } - var err, errs error - - attrs := otelmetric.WithAttributeSet(attribute.NewSet(attribute.String(obsmetrics.ExporterKey, qs.fullName))) - - qs.metricSize, err = qs.meter.Int64ObservableGauge( - obsmetrics.ExporterKey+"/queue_size", - otelmetric.WithDescription("Current size of the retry queue (in batches)"), - otelmetric.WithUnit("1"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(int64(qs.queue.Size()), attrs) - return nil - }), + return multierr.Append( + qs.telemetryBuilder.InitExporterQueueSize(func() int64 { return int64(qs.queue.Size()) }), + qs.telemetryBuilder.InitExporterQueueCapacity(func() int64 { return int64(qs.queue.Capacity()) }), ) - errs = multierr.Append(errs, err) - - qs.metricCapacity, err = qs.meter.Int64ObservableGauge( - obsmetrics.ExporterKey+"/queue_capacity", - otelmetric.WithDescription("Fixed capacity of the retry queue (in batches)"), - otelmetric.WithUnit("1"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(int64(qs.queue.Capacity()), attrs) - return nil - })) - - errs = multierr.Append(errs, err) - return errs } // Shutdown is invoked during service shutdown. @@ -152,7 +119,7 @@ func (qs *queueSender) Shutdown(ctx context.Context) error { func (qs *queueSender) send(ctx context.Context, req Request) error { // Prevent cancellation and deadline to propagate to the context stored in the queue. // The grpc/http based receivers will cancel the request context after this function returns. - c := noCancellationContext{Context: ctx} + c := context.WithoutCancel(ctx) span := trace.SpanFromContext(c) if err := qs.queue.Offer(c, req); err != nil { @@ -163,19 +130,3 @@ func (qs *queueSender) send(ctx context.Context, req Request) error { span.AddEvent("Enqueued item.", trace.WithAttributes(qs.traceAttribute)) return nil } - -type noCancellationContext struct { - context.Context -} - -func (noCancellationContext) Deadline() (deadline time.Time, ok bool) { - return -} - -func (noCancellationContext) Done() <-chan struct{} { - return nil -} - -func (noCancellationContext) Err() error { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/retry_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/retry_sender.go index 6e8a36f9ef4..03b1dba80cc 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/retry_sender.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/retry_sender.go @@ -51,7 +51,7 @@ type retrySender struct { logger *zap.Logger } -func newRetrySender(config configretry.BackOffConfig, set exporter.CreateSettings) *retrySender { +func newRetrySender(config configretry.BackOffConfig, set exporter.Settings) *retrySender { return &retrySender{ traceAttribute: attribute.String(obsmetrics.ExporterKey, set.ID.String()), cfg: config, diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces.go index 6f5f39f3de5..e510bae3dd3 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces.go @@ -71,7 +71,7 @@ type traceExporter struct { // NewTracesExporter creates an exporter.Traces that records observability metrics and wraps every request with a Span. func NewTracesExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, pusher consumer.ConsumeTracesFunc, options ...Option, @@ -106,7 +106,7 @@ func requestFromTraces(pusher consumer.ConsumeTracesFunc) RequestFromTracesFunc // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. func NewTracesRequestExporter( _ context.Context, - set exporter.CreateSettings, + set exporter.Settings, converter RequestFromTracesFunc, options ...Option, ) (exporter.Traces, error) { diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/queue.go b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/queue.go index 5b568851b66..f47196ba124 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/queue.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/queue.go @@ -25,7 +25,7 @@ type Queue[T any] queue.Queue[T] // Settings defines settings for creating a queue. type Settings struct { DataType component.DataType - ExporterSettings exporter.CreateSettings + ExporterSettings exporter.Settings } // Marshaler is a function that can marshal a request into bytes. diff --git a/vendor/go.opentelemetry.io/collector/exporter/exportertest/contract_checker.go b/vendor/go.opentelemetry.io/collector/exporter/exportertest/contract_checker.go index 7b3f55c1902..0ae8f6b5c9a 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exportertest/contract_checker.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exportertest/contract_checker.go @@ -86,17 +86,17 @@ func checkConsumeContractScenario(t *testing.T, params CheckConsumeContractParam mockConsumerInstance := newMockConsumer(decisionFunc) switch params.DataType { case component.DataTypeLogs: - r, err := params.ReceiverFactory.CreateLogsReceiver(context.Background(), receivertest.NewNopCreateSettings(), params.ReceiverConfig, &mockConsumerInstance) + r, err := params.ReceiverFactory.CreateLogsReceiver(context.Background(), receivertest.NewNopSettings(), params.ReceiverConfig, &mockConsumerInstance) require.NoError(t, err) require.NoError(t, r.Start(context.Background(), componenttest.NewNopHost())) checkLogs(t, params, r, &mockConsumerInstance, checkIfTestPassed) case component.DataTypeTraces: - r, err := params.ReceiverFactory.CreateTracesReceiver(context.Background(), receivertest.NewNopCreateSettings(), params.ReceiverConfig, &mockConsumerInstance) + r, err := params.ReceiverFactory.CreateTracesReceiver(context.Background(), receivertest.NewNopSettings(), params.ReceiverConfig, &mockConsumerInstance) require.NoError(t, err) require.NoError(t, r.Start(context.Background(), componenttest.NewNopHost())) checkTraces(t, params, r, &mockConsumerInstance, checkIfTestPassed) case component.DataTypeMetrics: - r, err := params.ReceiverFactory.CreateMetricsReceiver(context.Background(), receivertest.NewNopCreateSettings(), params.ReceiverConfig, &mockConsumerInstance) + r, err := params.ReceiverFactory.CreateMetricsReceiver(context.Background(), receivertest.NewNopSettings(), params.ReceiverConfig, &mockConsumerInstance) require.NoError(t, err) require.NoError(t, r.Start(context.Background(), componenttest.NewNopHost())) checkMetrics(t, params, r, &mockConsumerInstance, checkIfTestPassed) @@ -110,7 +110,7 @@ func checkMetrics(t *testing.T, params CheckConsumeContractParams, mockReceiver ctx := context.Background() var exp exporter.Metrics var err error - exp, err = params.ExporterFactory.CreateMetricsExporter(ctx, NewNopCreateSettings(), params.ExporterConfig) + exp, err = params.ExporterFactory.CreateMetricsExporter(ctx, NewNopSettings(), params.ExporterConfig) require.NoError(t, err) require.NotNil(t, exp) @@ -150,7 +150,7 @@ func checkTraces(t *testing.T, params CheckConsumeContractParams, mockReceiver c ctx := context.Background() var exp exporter.Traces var err error - exp, err = params.ExporterFactory.CreateTracesExporter(ctx, NewNopCreateSettings(), params.ExporterConfig) + exp, err = params.ExporterFactory.CreateTracesExporter(ctx, NewNopSettings(), params.ExporterConfig) require.NoError(t, err) require.NotNil(t, exp) @@ -190,7 +190,7 @@ func checkLogs(t *testing.T, params CheckConsumeContractParams, mockReceiver com ctx := context.Background() var exp exporter.Logs var err error - exp, err = params.ExporterFactory.CreateLogsExporter(ctx, NewNopCreateSettings(), params.ExporterConfig) + exp, err = params.ExporterFactory.CreateLogsExporter(ctx, NewNopSettings(), params.ExporterConfig) require.NoError(t, err) require.NotNil(t, exp) diff --git a/vendor/go.opentelemetry.io/collector/exporter/exportertest/nop_exporter.go b/vendor/go.opentelemetry.io/collector/exporter/exportertest/nop_exporter.go index 6589d867bec..34beae0c32a 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exportertest/nop_exporter.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exportertest/nop_exporter.go @@ -17,8 +17,15 @@ import ( var nopType = component.MustNewType("nop") // NewNopCreateSettings returns a new nop settings for Create*Exporter functions. -func NewNopCreateSettings() exporter.CreateSettings { - return exporter.CreateSettings{ +// +// Deprecated: [v0.103.0] Use exportertest.NewNopSettings instead. +func NewNopCreateSettings() exporter.Settings { + return NewNopSettings() +} + +// NewNopSettings returns a new nop settings for Create*Exporter functions. +func NewNopSettings() exporter.Settings { + return exporter.Settings{ ID: component.NewIDWithName(nopType, uuid.NewString()), TelemetrySettings: componenttest.NewNopTelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo(), @@ -36,15 +43,15 @@ func NewNopFactory() exporter.Factory { ) } -func createTracesExporter(context.Context, exporter.CreateSettings, component.Config) (exporter.Traces, error) { +func createTracesExporter(context.Context, exporter.Settings, component.Config) (exporter.Traces, error) { return nopInstance, nil } -func createMetricsExporter(context.Context, exporter.CreateSettings, component.Config) (exporter.Metrics, error) { +func createMetricsExporter(context.Context, exporter.Settings, component.Config) (exporter.Metrics, error) { return nopInstance, nil } -func createLogsExporter(context.Context, exporter.CreateSettings, component.Config) (exporter.Logs, error) { +func createLogsExporter(context.Context, exporter.Settings, component.Config) (exporter.Logs, error) { return nopInstance, nil } diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/persistent_queue.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/persistent_queue.go index 07b0d1fb628..7dd646c6ef3 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/persistent_queue.go +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/persistent_queue.go @@ -89,7 +89,7 @@ type PersistentQueueSettings[T any] struct { StorageID component.ID Marshaler func(req T) ([]byte, error) Unmarshaler func([]byte) (T, error) - ExporterSettings exporter.CreateSettings + ExporterSettings exporter.Settings } // NewPersistentQueue creates a new queue backed by file storage; name and signal must be a unique combination that identifies the queue storage diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md index 8926a14369b..b4a5c7b055d 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md @@ -5,13 +5,14 @@ | ------------- |-----------| | Stability | [beta]: logs | | | [stable]: traces, metrics | -| Distributions | [core], [contrib] | +| Distributions | [core], [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fotlp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fotlp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fotlp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fotlp) | [beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [stable]: https://github.com/open-telemetry/opentelemetry-collector#stable [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s Export data via gRPC using [OTLP]( diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go index 62956f293ed..f1d5b668e54 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "net" + "regexp" "strconv" "strings" @@ -49,8 +50,9 @@ func (c *Config) sanitizedEndpoint() string { return strings.TrimPrefix(c.Endpoint, "http://") case strings.HasPrefix(c.Endpoint, "https://"): return strings.TrimPrefix(c.Endpoint, "https://") - case strings.HasPrefix(c.Endpoint, "dns:///"): - return strings.TrimPrefix(c.Endpoint, "dns:///") + case strings.HasPrefix(c.Endpoint, "dns://"): + r := regexp.MustCompile("^dns://[/]?") + return r.ReplaceAllString(c.Endpoint, "") default: return c.Endpoint } diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go index 8a72aab8cdc..c06a6c605b2 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go @@ -45,7 +45,7 @@ func createDefaultConfig() component.Config { func createTracesExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Traces, error) { oce := newExporter(cfg, set) @@ -62,7 +62,7 @@ func createTracesExporter( func createMetricsExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Metrics, error) { oce := newExporter(cfg, set) @@ -80,7 +80,7 @@ func createMetricsExporter( func createLogsExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Logs, error) { oce := newExporter(cfg, set) diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml index f24e40e1f91..0d517c86116 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml @@ -5,7 +5,7 @@ status: stability: stable: [traces, metrics] beta: [logs] - distributions: [core, contrib] + distributions: [core, contrib, k8s] tests: config: diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go index 21864d7723a..b8d6dee2a75 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go @@ -46,7 +46,7 @@ type baseExporter struct { userAgent string } -func newExporter(cfg component.Config, set exporter.CreateSettings) *baseExporter { +func newExporter(cfg component.Config, set exporter.Settings) *baseExporter { oCfg := cfg.(*Config) userAgent := fmt.Sprintf("%s/%s (%s/%s)", @@ -151,8 +151,7 @@ func processError(err error) error { return nil } - // Now, this is this a real error. - + // Now, this is a real error. retryInfo := getRetryInfo(st) if !shouldRetry(st.Code(), retryInfo) { @@ -168,7 +167,6 @@ func processError(err error) error { } // Need to retry. - return err } diff --git a/vendor/go.opentelemetry.io/collector/extension/extension.go b/vendor/go.opentelemetry.io/collector/extension/extension.go index 292358b33ba..8ee61a422cc 100644 --- a/vendor/go.opentelemetry.io/collector/extension/extension.go +++ b/vendor/go.opentelemetry.io/collector/extension/extension.go @@ -63,7 +63,12 @@ type StatusWatcher interface { } // CreateSettings is passed to Factory.Create(...) function. -type CreateSettings struct { +// +// Deprecated: [v0.103.0] Use extension.Settings instead. +type CreateSettings = Settings + +// Settings is passed to Factory.Create(...) function. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -74,10 +79,10 @@ type CreateSettings struct { } // CreateFunc is the equivalent of Factory.Create(...) function. -type CreateFunc func(context.Context, CreateSettings, component.Config) (Extension, error) +type CreateFunc func(context.Context, Settings, component.Config) (Extension, error) // CreateExtension implements Factory.Create. -func (f CreateFunc) CreateExtension(ctx context.Context, set CreateSettings, cfg component.Config) (Extension, error) { +func (f CreateFunc) CreateExtension(ctx context.Context, set Settings, cfg component.Config) (Extension, error) { return f(ctx, set, cfg) } @@ -85,7 +90,7 @@ type Factory interface { component.Factory // CreateExtension creates an extension based on the given config. - CreateExtension(ctx context.Context, set CreateSettings, cfg component.Config) (Extension, error) + CreateExtension(ctx context.Context, set Settings, cfg component.Config) (Extension, error) // ExtensionStability gets the stability level of the Extension. ExtensionStability() component.StabilityLevel @@ -149,7 +154,7 @@ func NewBuilder(cfgs map[component.ID]component.Config, factories map[component. } // Create creates an extension based on the settings and configs available. -func (b *Builder) Create(ctx context.Context, set CreateSettings) (Extension, error) { +func (b *Builder) Create(ctx context.Context, set Settings) (Extension, error) { cfg, existsCfg := b.cfgs[set.ID] if !existsCfg { return nil, fmt.Errorf("extension %q is not configured", set.ID) diff --git a/vendor/go.opentelemetry.io/collector/internal/featuregates/featuregates.go b/vendor/go.opentelemetry.io/collector/internal/featuregates/featuregates.go new file mode 100644 index 00000000000..e74c75fe481 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/featuregates/featuregates.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package featuregates // import "go.opentelemetry.io/collector/internal/featuregates" + +import "go.opentelemetry.io/collector/featuregate" + +var UseUnifiedEnvVarExpansionRules = featuregate.GlobalRegistry().MustRegister("confmap.unifyEnvVarExpansion", + featuregate.StageBeta, + featuregate.WithRegisterFromVersion("v0.103.0"), + featuregate.WithRegisterDescription("`${FOO}` will now be expanded as if it was `${env:FOO}` and no longer expands $ENV syntax. See https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/rfcs/env-vars.md for more details. When this feature gate is stable, expandconverter will be removed.")) diff --git a/vendor/go.opentelemetry.io/collector/internal/localhostgate/featuregate.go b/vendor/go.opentelemetry.io/collector/internal/localhostgate/featuregate.go index 78c19622967..7c8ee7aeb49 100644 --- a/vendor/go.opentelemetry.io/collector/internal/localhostgate/featuregate.go +++ b/vendor/go.opentelemetry.io/collector/internal/localhostgate/featuregate.go @@ -23,7 +23,7 @@ const UseLocalHostAsDefaultHostID = "component.UseLocalHostAsDefaultHost" var UseLocalHostAsDefaultHostfeatureGate = mustRegisterOrLoad( featuregate.GlobalRegistry(), UseLocalHostAsDefaultHostID, - featuregate.StageAlpha, + featuregate.StageBeta, featuregate.WithRegisterDescription("controls whether server-like receivers and extensions such as the OTLP receiver use localhost as the default host for their endpoints"), ) @@ -60,8 +60,8 @@ func EndpointForPort(port int) string { // LogAboutUseLocalHostAsDefault logs about the upcoming change from 0.0.0.0 to localhost on server-like components. func LogAboutUseLocalHostAsDefault(logger *zap.Logger) { if !UseLocalHostAsDefaultHostfeatureGate.IsEnabled() { - logger.Warn( - "The default endpoints for all servers in components will change to use localhost instead of 0.0.0.0 in a future version. Use the feature gate to preview the new default.", + logger.Info( + "The default endpoints for all servers in components have changed to use localhost instead of 0.0.0.0. Use the feature gate to temporarily revert to the previous default.", zap.String("feature gate ID", UseLocalHostAsDefaultHostID), ) } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/command.go b/vendor/go.opentelemetry.io/collector/otelcol/command.go index 648b3a5ffda..8516ec831ff 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/command.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/command.go @@ -10,12 +10,14 @@ import ( "github.com/spf13/cobra" "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/internal/featuregates" ) // NewCommand constructs a new cobra.Command using the given CollectorSettings. // Any URIs specified in CollectorSettings.ConfigProviderSettings.ResolverSettings.URIs // are considered defaults and will be overwritten by config flags passed as // command-line arguments to the executable. +// At least one Provider must be set. func NewCommand(set CollectorSettings) *cobra.Command { flagSet := flags(featuregate.GlobalRegistry()) rootCmd := &cobra.Command{ @@ -41,6 +43,17 @@ func NewCommand(set CollectorSettings) *cobra.Command { return rootCmd } +// NewCommandMustSetProvider constructs a new cobra.Command using the given CollectorSettings. +// Any URIs specified in CollectorSettings.ConfigProviderSettings.ResolverSettings.URIs +// are considered defaults and will be overwritten by config flags passed as +// command-line arguments to the executable. +// At least one Provider must be supplied via CollectorSettings.ConfigProviderSettings.ResolverSettings.ProviderFactories. +// +// Deprecated: [v0.104.0] use NewCommand instead +func NewCommandMustSetProvider(set CollectorSettings) *cobra.Command { + return NewCommand(set) +} + // Puts command line flags from flags into the CollectorSettings, to be used during config resolution. func updateSettingsUsingFlags(set *CollectorSettings, flags *flag.FlagSet) error { resolverSet := &set.ConfigProviderSettings.ResolverSettings @@ -52,11 +65,13 @@ func updateSettingsUsingFlags(set *CollectorSettings, flags *flag.FlagSet) error if len(resolverSet.URIs) == 0 { return errors.New("at least one config flag must be provided") } - // Provide a default set of providers and converters if none have been specified. - // TODO: Remove this after CollectorSettings.ConfigProvider is removed and instead - // do it in the builder. - if len(resolverSet.ProviderFactories) == 0 && len(resolverSet.ConverterFactories) == 0 { - set.ConfigProviderSettings = newDefaultConfigProviderSettings(resolverSet.URIs) + + if featuregates.UseUnifiedEnvVarExpansionRules.IsEnabled() && set.ConfigProviderSettings.ResolverSettings.DefaultScheme == "" { + set.ConfigProviderSettings.ResolverSettings.DefaultScheme = "env" + } + + if len(resolverSet.ProviderFactories) == 0 { + return errors.New("at least one Provider must be supplied") } return nil } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go b/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go index 6360b536509..ea5ab1b2026 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go @@ -8,12 +8,6 @@ import ( "fmt" "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/converter/expandconverter" - "go.opentelemetry.io/collector/confmap/provider/envprovider" - "go.opentelemetry.io/collector/confmap/provider/fileprovider" - "go.opentelemetry.io/collector/confmap/provider/httpprovider" - "go.opentelemetry.io/collector/confmap/provider/httpsprovider" - "go.opentelemetry.io/collector/confmap/provider/yamlprovider" ) // ConfigProvider provides the service configuration. @@ -130,19 +124,3 @@ func (cm *configProvider) GetConfmap(ctx context.Context) (*confmap.Conf, error) return conf, nil } - -func newDefaultConfigProviderSettings(uris []string) ConfigProviderSettings { - return ConfigProviderSettings{ - ResolverSettings: confmap.ResolverSettings{ - URIs: uris, - ProviderFactories: []confmap.ProviderFactory{ - fileprovider.NewFactory(), - envprovider.NewFactory(), - yamlprovider.NewFactory(), - httpprovider.NewFactory(), - httpsprovider.NewFactory(), - }, - ConverterFactories: []confmap.ConverterFactory{expandconverter.NewFactory()}, - }, - } -} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental/profiles_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental/profiles_service.pb.go new file mode 100644 index 00000000000..f998cd3f8c5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental/profiles_service.pb.go @@ -0,0 +1,845 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto + +package v1experimental + +import ( + context "context" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + + v1experimental "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ExportProfilesServiceRequest struct { + // An array of ResourceProfiles. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceProfiles []*v1experimental.ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"` +} + +func (m *ExportProfilesServiceRequest) Reset() { *m = ExportProfilesServiceRequest{} } +func (m *ExportProfilesServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportProfilesServiceRequest) ProtoMessage() {} +func (*ExportProfilesServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3d903b74e05b443d, []int{0} +} +func (m *ExportProfilesServiceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportProfilesServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExportProfilesServiceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExportProfilesServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportProfilesServiceRequest.Merge(m, src) +} +func (m *ExportProfilesServiceRequest) XXX_Size() int { + return m.Size() +} +func (m *ExportProfilesServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportProfilesServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportProfilesServiceRequest proto.InternalMessageInfo + +func (m *ExportProfilesServiceRequest) GetResourceProfiles() []*v1experimental.ResourceProfiles { + if m != nil { + return m.ResourceProfiles + } + return nil +} + +type ExportProfilesServiceResponse struct { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + PartialSuccess ExportProfilesPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"` +} + +func (m *ExportProfilesServiceResponse) Reset() { *m = ExportProfilesServiceResponse{} } +func (m *ExportProfilesServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportProfilesServiceResponse) ProtoMessage() {} +func (*ExportProfilesServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3d903b74e05b443d, []int{1} +} +func (m *ExportProfilesServiceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportProfilesServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExportProfilesServiceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExportProfilesServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportProfilesServiceResponse.Merge(m, src) +} +func (m *ExportProfilesServiceResponse) XXX_Size() int { + return m.Size() +} +func (m *ExportProfilesServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportProfilesServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportProfilesServiceResponse proto.InternalMessageInfo + +func (m *ExportProfilesServiceResponse) GetPartialSuccess() ExportProfilesPartialSuccess { + if m != nil { + return m.PartialSuccess + } + return ExportProfilesPartialSuccess{} +} + +type ExportProfilesPartialSuccess struct { + // The number of rejected profiles. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + RejectedProfiles int64 `protobuf:"varint,1,opt,name=rejected_profiles,json=rejectedProfiles,proto3" json:"rejected_profiles,omitempty"` + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (m *ExportProfilesPartialSuccess) Reset() { *m = ExportProfilesPartialSuccess{} } +func (m *ExportProfilesPartialSuccess) String() string { return proto.CompactTextString(m) } +func (*ExportProfilesPartialSuccess) ProtoMessage() {} +func (*ExportProfilesPartialSuccess) Descriptor() ([]byte, []int) { + return fileDescriptor_3d903b74e05b443d, []int{2} +} +func (m *ExportProfilesPartialSuccess) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportProfilesPartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExportProfilesPartialSuccess.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExportProfilesPartialSuccess) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportProfilesPartialSuccess.Merge(m, src) +} +func (m *ExportProfilesPartialSuccess) XXX_Size() int { + return m.Size() +} +func (m *ExportProfilesPartialSuccess) XXX_DiscardUnknown() { + xxx_messageInfo_ExportProfilesPartialSuccess.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportProfilesPartialSuccess proto.InternalMessageInfo + +func (m *ExportProfilesPartialSuccess) GetRejectedProfiles() int64 { + if m != nil { + return m.RejectedProfiles + } + return 0 +} + +func (m *ExportProfilesPartialSuccess) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +func init() { + proto.RegisterType((*ExportProfilesServiceRequest)(nil), "opentelemetry.proto.collector.profiles.v1experimental.ExportProfilesServiceRequest") + proto.RegisterType((*ExportProfilesServiceResponse)(nil), "opentelemetry.proto.collector.profiles.v1experimental.ExportProfilesServiceResponse") + proto.RegisterType((*ExportProfilesPartialSuccess)(nil), "opentelemetry.proto.collector.profiles.v1experimental.ExportProfilesPartialSuccess") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto", fileDescriptor_3d903b74e05b443d) +} + +var fileDescriptor_3d903b74e05b443d = []byte{ + // 437 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0x3f, 0xcb, 0xd3, 0x40, + 0x18, 0xcf, 0xf5, 0x95, 0x17, 0xbc, 0xaa, 0xd5, 0xd0, 0xa1, 0x14, 0x8d, 0x25, 0x2e, 0x01, 0xe1, + 0x42, 0x2b, 0x05, 0x11, 0x5c, 0x2a, 0xdd, 0x14, 0x43, 0x5a, 0x1c, 0x44, 0x08, 0x31, 0x7d, 0x0c, + 0x29, 0x69, 0xee, 0xbc, 0xbb, 0x96, 0xba, 0x89, 0xa3, 0x93, 0x1f, 0xc2, 0xc9, 0xdd, 0xef, 0x50, + 0xb7, 0x8e, 0x4e, 0x22, 0xed, 0x17, 0x79, 0x49, 0xae, 0x09, 0x4d, 0x68, 0x29, 0x94, 0x6e, 0x77, + 0xbf, 0xe3, 0xf7, 0xe7, 0xf9, 0x1d, 0x0f, 0x7e, 0x4d, 0x19, 0x24, 0x12, 0x62, 0x98, 0x81, 0xe4, + 0x5f, 0x6c, 0xc6, 0xa9, 0xa4, 0x76, 0x40, 0xe3, 0x18, 0x02, 0x49, 0x79, 0x7a, 0xff, 0x14, 0xc5, + 0x20, 0xec, 0x45, 0x17, 0x96, 0x0c, 0x78, 0x34, 0x83, 0x44, 0xfa, 0x71, 0x81, 0x7b, 0x02, 0xf8, + 0x22, 0x0a, 0x80, 0x64, 0x44, 0xbd, 0x5f, 0x52, 0x53, 0x20, 0x29, 0xd4, 0x48, 0xce, 0x22, 0x65, + 0xb5, 0x76, 0x33, 0xa4, 0x21, 0x55, 0xd6, 0xe9, 0x49, 0xf1, 0xda, 0x2f, 0x0e, 0x45, 0x3b, 0x15, + 0x48, 0x71, 0xcd, 0xef, 0x08, 0x3f, 0x1c, 0x2e, 0x19, 0xe5, 0xd2, 0xd9, 0x3d, 0x8c, 0x54, 0x50, + 0x17, 0x3e, 0xcf, 0x41, 0x48, 0x7d, 0x8a, 0x1f, 0x70, 0x10, 0x74, 0xce, 0x03, 0xf0, 0x72, 0x6e, + 0x0b, 0x75, 0xae, 0xac, 0x7a, 0xef, 0x25, 0x39, 0x34, 0xc5, 0x91, 0xec, 0xc4, 0xdd, 0xa9, 0xe4, + 0x3e, 0xee, 0x7d, 0x5e, 0x41, 0xcc, 0x9f, 0x08, 0x3f, 0x3a, 0x12, 0x46, 0x30, 0x9a, 0x08, 0xd0, + 0xbf, 0x21, 0xdc, 0x60, 0x3e, 0x97, 0x91, 0x1f, 0x7b, 0x62, 0x1e, 0x04, 0x20, 0xd2, 0x30, 0xc8, + 0xaa, 0xf7, 0x46, 0xe4, 0xac, 0x4a, 0x49, 0xd9, 0xcf, 0x51, 0xda, 0x23, 0x25, 0x3d, 0xb8, 0xb5, + 0xfa, 0xf7, 0x58, 0x73, 0xef, 0xb1, 0x12, 0x6a, 0xb2, 0x6a, 0x65, 0x65, 0x96, 0xfe, 0x34, 0xad, + 0x6c, 0x0a, 0x81, 0x84, 0xc9, 0x7e, 0x65, 0xc8, 0xba, 0x4a, 0x67, 0x56, 0x0f, 0x39, 0x55, 0x7f, + 0x82, 0xef, 0x02, 0xe7, 0x94, 0x7b, 0x33, 0x10, 0xc2, 0x0f, 0xa1, 0x55, 0xeb, 0x20, 0xeb, 0xb6, + 0x7b, 0x27, 0x03, 0xdf, 0x28, 0xac, 0xf7, 0x07, 0xe1, 0x46, 0xa5, 0x12, 0xfd, 0x37, 0xc2, 0xd7, + 0x2a, 0x86, 0x7e, 0x99, 0xd9, 0xcb, 0x1f, 0xdf, 0x1e, 0x5f, 0x56, 0x54, 0x7d, 0xa0, 0xa9, 0x0d, + 0xbe, 0xd6, 0x56, 0x1b, 0x03, 0xad, 0x37, 0x06, 0xfa, 0xbf, 0x31, 0xd0, 0x8f, 0xad, 0xa1, 0xad, + 0xb7, 0x86, 0xf6, 0x77, 0x6b, 0x68, 0xf8, 0x79, 0x44, 0xcf, 0x33, 0x1d, 0x34, 0x2b, 0x7e, 0x4e, + 0xca, 0x73, 0xd0, 0xfb, 0x0f, 0x61, 0x55, 0x31, 0x2a, 0x6d, 0xed, 0xc4, 0x97, 0xbe, 0x1d, 0x25, + 0x12, 0x78, 0xe2, 0xc7, 0x76, 0x76, 0xcb, 0x2c, 0x43, 0x48, 0x4e, 0x2f, 0xf7, 0xaf, 0x5a, 0xff, + 0x2d, 0x83, 0x64, 0x5c, 0x68, 0x67, 0xae, 0xe4, 0x55, 0x91, 0x36, 0x0f, 0x45, 0xde, 0x75, 0x87, + 0x7b, 0xbc, 0x8f, 0xd7, 0x99, 0xc7, 0xb3, 0x9b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8e, 0x3d, 0x57, + 0xb0, 0x54, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ProfilesServiceClient is the client API for ProfilesService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProfilesServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportProfilesServiceRequest, opts ...grpc.CallOption) (*ExportProfilesServiceResponse, error) +} + +type profilesServiceClient struct { + cc *grpc.ClientConn +} + +func NewProfilesServiceClient(cc *grpc.ClientConn) ProfilesServiceClient { + return &profilesServiceClient{cc} +} + +func (c *profilesServiceClient) Export(ctx context.Context, in *ExportProfilesServiceRequest, opts ...grpc.CallOption) (*ExportProfilesServiceResponse, error) { + out := new(ExportProfilesServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.profiles.v1experimental.ProfilesService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProfilesServiceServer is the server API for ProfilesService service. +type ProfilesServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportProfilesServiceRequest) (*ExportProfilesServiceResponse, error) +} + +// UnimplementedProfilesServiceServer can be embedded to have forward compatible implementations. +type UnimplementedProfilesServiceServer struct { +} + +func (*UnimplementedProfilesServiceServer) Export(ctx context.Context, req *ExportProfilesServiceRequest) (*ExportProfilesServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterProfilesServiceServer(s *grpc.Server, srv ProfilesServiceServer) { + s.RegisterService(&_ProfilesService_serviceDesc, srv) +} + +func _ProfilesService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportProfilesServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProfilesServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.profiles.v1experimental.ProfilesService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProfilesServiceServer).Export(ctx, req.(*ExportProfilesServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ProfilesService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.profiles.v1experimental.ProfilesService", + HandlerType: (*ProfilesServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _ProfilesService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto", +} + +func (m *ExportProfilesServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportProfilesServiceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportProfilesServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceProfiles) > 0 { + for iNdEx := len(m.ResourceProfiles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfilesService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ExportProfilesServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportProfilesServiceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportProfilesServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfilesService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExportProfilesPartialSuccess) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportProfilesPartialSuccess) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportProfilesPartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ErrorMessage) > 0 { + i -= len(m.ErrorMessage) + copy(dAtA[i:], m.ErrorMessage) + i = encodeVarintProfilesService(dAtA, i, uint64(len(m.ErrorMessage))) + i-- + dAtA[i] = 0x12 + } + if m.RejectedProfiles != 0 { + i = encodeVarintProfilesService(dAtA, i, uint64(m.RejectedProfiles)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintProfilesService(dAtA []byte, offset int, v uint64) int { + offset -= sovProfilesService(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ExportProfilesServiceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceProfiles) > 0 { + for _, e := range m.ResourceProfiles { + l = e.Size() + n += 1 + l + sovProfilesService(uint64(l)) + } + } + return n +} + +func (m *ExportProfilesServiceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PartialSuccess.Size() + n += 1 + l + sovProfilesService(uint64(l)) + return n +} + +func (m *ExportProfilesPartialSuccess) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RejectedProfiles != 0 { + n += 1 + sovProfilesService(uint64(m.RejectedProfiles)) + } + l = len(m.ErrorMessage) + if l > 0 { + n += 1 + l + sovProfilesService(uint64(l)) + } + return n +} + +func sovProfilesService(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozProfilesService(x uint64) (n int) { + return sovProfilesService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ExportProfilesServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfilesService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportProfilesServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportProfilesServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfilesService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfilesService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfilesService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceProfiles = append(m.ResourceProfiles, &v1experimental.ResourceProfiles{}) + if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProfilesService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfilesService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportProfilesServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfilesService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportProfilesServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportProfilesServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfilesService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfilesService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfilesService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProfilesService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfilesService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportProfilesPartialSuccess) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfilesService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportProfilesPartialSuccess: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportProfilesPartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectedProfiles", wireType) + } + m.RejectedProfiles = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfilesService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RejectedProfiles |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfilesService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProfilesService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProfilesService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProfilesService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfilesService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipProfilesService(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProfilesService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProfilesService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProfilesService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthProfilesService + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupProfilesService + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthProfilesService + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthProfilesService = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowProfilesService = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupProfilesService = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/pprofextended.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/pprofextended.pb.go new file mode 100644 index 00000000000..64c91b3221e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/pprofextended.pb.go @@ -0,0 +1,4919 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/profiles/v1experimental/pprofextended.proto + +package v1experimental + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + + go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data" + v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Specifies the method of aggregating metric values, either DELTA (change since last report) +// or CUMULATIVE (total since a fixed start time). +type AggregationTemporality int32 + +const ( + // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. + AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0 + //* DELTA is an AggregationTemporality for a profiler which reports + //changes since last report time. Successive metrics contain aggregation of + //values from continuous and non-overlapping intervals. + // + //The values for a DELTA metric are based only on the time interval + //associated with one measurement cycle. There is no dependency on + //previous measurements like is the case for CUMULATIVE metrics. + // + //For example, consider a system measuring the number of requests that + //it receives and reports the sum of these requests every second as a + //DELTA metric: + // + //1. The system starts receiving at time=t_0. + //2. A request is received, the system measures 1 request. + //3. A request is received, the system measures 1 request. + //4. A request is received, the system measures 1 request. + //5. The 1 second collection cycle ends. A metric is exported for the + //number of requests received over the interval of time t_0 to + //t_0+1 with a value of 3. + //6. A request is received, the system measures 1 request. + //7. A request is received, the system measures 1 request. + //8. The 1 second collection cycle ends. A metric is exported for the + //number of requests received over the interval of time t_0+1 to + //t_0+2 with a value of 2. + AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1 + //* CUMULATIVE is an AggregationTemporality for a profiler which + //reports changes since a fixed start time. This means that current values + //of a CUMULATIVE metric depend on all previous measurements since the + //start time. Because of this, the sender is required to retain this state + //in some form. If this state is lost or invalidated, the CUMULATIVE metric + //values MUST be reset and a new fixed start time following the last + //reported measurement time sent MUST be used. + // + //For example, consider a system measuring the number of requests that + //it receives and reports the sum of these requests every second as a + //CUMULATIVE metric: + // + //1. The system starts receiving at time=t_0. + //2. A request is received, the system measures 1 request. + //3. A request is received, the system measures 1 request. + //4. A request is received, the system measures 1 request. + //5. The 1 second collection cycle ends. A metric is exported for the + //number of requests received over the interval of time t_0 to + //t_0+1 with a value of 3. + //6. A request is received, the system measures 1 request. + //7. A request is received, the system measures 1 request. + //8. The 1 second collection cycle ends. A metric is exported for the + //number of requests received over the interval of time t_0 to + //t_0+2 with a value of 5. + //9. The system experiences a fault and loses state. + //10. The system recovers and resumes receiving at time=t_1. + //11. A request is received, the system measures 1 request. + //12. The 1 second collection cycle ends. A metric is exported for the + //number of requests received over the interval of time t_1 to + //t_0+1 with a value of 1. + // + //Note: Even though, when reporting changes since last report time, using + //CUMULATIVE is valid, it is not recommended. + AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2 +) + +var AggregationTemporality_name = map[int32]string{ + 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED", + 1: "AGGREGATION_TEMPORALITY_DELTA", + 2: "AGGREGATION_TEMPORALITY_CUMULATIVE", +} + +var AggregationTemporality_value = map[string]int32{ + "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0, + "AGGREGATION_TEMPORALITY_DELTA": 1, + "AGGREGATION_TEMPORALITY_CUMULATIVE": 2, +} + +func (x AggregationTemporality) String() string { + return proto.EnumName(AggregationTemporality_name, int32(x)) +} + +func (AggregationTemporality) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_05f9ce3fdbeb046f, []int{0} +} + +// Indicates the semantics of the build_id field. +type BuildIdKind int32 + +const ( + // Linker-generated build ID, stored in the ELF binary notes. + BuildIdKind_BUILD_ID_LINKER BuildIdKind = 0 + // Build ID based on the content hash of the binary. Currently no particular + // hashing approach is standardized, so a given producer needs to define it + // themselves and thus unlike BUILD_ID_LINKER this kind of hash is producer-specific. + // We may choose to provide a standardized stable hash recommendation later. + BuildIdKind_BUILD_ID_BINARY_HASH BuildIdKind = 1 +) + +var BuildIdKind_name = map[int32]string{ + 0: "BUILD_ID_LINKER", + 1: "BUILD_ID_BINARY_HASH", +} + +var BuildIdKind_value = map[string]int32{ + "BUILD_ID_LINKER": 0, + "BUILD_ID_BINARY_HASH": 1, +} + +func (x BuildIdKind) String() string { + return proto.EnumName(BuildIdKind_name, int32(x)) +} + +func (BuildIdKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_05f9ce3fdbeb046f, []int{1} +} + +// Represents a complete profile, including sample types, samples, +// mappings to binaries, locations, functions, string table, and additional metadata. +type Profile struct { + // A description of the samples associated with each Sample.value. + // For a cpu profile this might be: + // [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]] + // For a heap profile, this might be: + // [["allocations","count"], ["space","bytes"]], + // If one of the values represents the number of events represented + // by the sample, by convention it should be at index 0 and use + // sample_type.unit == "count". + SampleType []ValueType `protobuf:"bytes,1,rep,name=sample_type,json=sampleType,proto3" json:"sample_type"` + // The set of samples recorded in this profile. + Sample []Sample `protobuf:"bytes,2,rep,name=sample,proto3" json:"sample"` + // Mapping from address ranges to the image/binary/library mapped + // into that address range. mapping[0] will be the main binary. + Mapping []Mapping `protobuf:"bytes,3,rep,name=mapping,proto3" json:"mapping"` + // Locations referenced by samples via location_indices. + Location []Location `protobuf:"bytes,4,rep,name=location,proto3" json:"location"` + // Array of locations referenced by samples. + LocationIndices []int64 `protobuf:"varint,15,rep,packed,name=location_indices,json=locationIndices,proto3" json:"location_indices,omitempty"` + // Functions referenced by locations. + Function []Function `protobuf:"bytes,5,rep,name=function,proto3" json:"function"` + // Lookup table for attributes. + AttributeTable []v1.KeyValue `protobuf:"bytes,16,rep,name=attribute_table,json=attributeTable,proto3" json:"attribute_table"` + // Represents a mapping between Attribute Keys and Units. + AttributeUnits []AttributeUnit `protobuf:"bytes,17,rep,name=attribute_units,json=attributeUnits,proto3" json:"attribute_units"` + // Lookup table for links. + LinkTable []Link `protobuf:"bytes,18,rep,name=link_table,json=linkTable,proto3" json:"link_table"` + // A common table for strings referenced by various messages. + // string_table[0] must always be "". + StringTable []string `protobuf:"bytes,6,rep,name=string_table,json=stringTable,proto3" json:"string_table,omitempty"` + // frames with Function.function_name fully matching the following + // regexp will be dropped from the samples, along with their successors. + DropFrames int64 `protobuf:"varint,7,opt,name=drop_frames,json=dropFrames,proto3" json:"drop_frames,omitempty"` + // frames with Function.function_name fully matching the following + // regexp will be kept, even if it matches drop_frames. + KeepFrames int64 `protobuf:"varint,8,opt,name=keep_frames,json=keepFrames,proto3" json:"keep_frames,omitempty"` + // Time of collection (UTC) represented as nanoseconds past the epoch. + TimeNanos int64 `protobuf:"varint,9,opt,name=time_nanos,json=timeNanos,proto3" json:"time_nanos,omitempty"` + // Duration of the profile, if a duration makes sense. + DurationNanos int64 `protobuf:"varint,10,opt,name=duration_nanos,json=durationNanos,proto3" json:"duration_nanos,omitempty"` + // The kind of events between sampled occurrences. + // e.g [ "cpu","cycles" ] or [ "heap","bytes" ] + PeriodType ValueType `protobuf:"bytes,11,opt,name=period_type,json=periodType,proto3" json:"period_type"` + // The number of events between sampled occurrences. + Period int64 `protobuf:"varint,12,opt,name=period,proto3" json:"period,omitempty"` + // Free-form text associated with the profile. The text is displayed as is + // to the user by the tools that read profiles (e.g. by pprof). This field + // should not be used to store any machine-readable information, it is only + // for human-friendly content. The profile must stay functional if this field + // is cleaned. + Comment []int64 `protobuf:"varint,13,rep,packed,name=comment,proto3" json:"comment,omitempty"` + // Index into the string table of the type of the preferred sample + // value. If unset, clients should default to the last sample value. + DefaultSampleType int64 `protobuf:"varint,14,opt,name=default_sample_type,json=defaultSampleType,proto3" json:"default_sample_type,omitempty"` +} + +func (m *Profile) Reset() { *m = Profile{} } +func (m *Profile) String() string { return proto.CompactTextString(m) } +func (*Profile) ProtoMessage() {} +func (*Profile) Descriptor() ([]byte, []int) { + return fileDescriptor_05f9ce3fdbeb046f, []int{0} +} +func (m *Profile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Profile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Profile.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Profile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Profile.Merge(m, src) +} +func (m *Profile) XXX_Size() int { + return m.Size() +} +func (m *Profile) XXX_DiscardUnknown() { + xxx_messageInfo_Profile.DiscardUnknown(m) +} + +var xxx_messageInfo_Profile proto.InternalMessageInfo + +func (m *Profile) GetSampleType() []ValueType { + if m != nil { + return m.SampleType + } + return nil +} + +func (m *Profile) GetSample() []Sample { + if m != nil { + return m.Sample + } + return nil +} + +func (m *Profile) GetMapping() []Mapping { + if m != nil { + return m.Mapping + } + return nil +} + +func (m *Profile) GetLocation() []Location { + if m != nil { + return m.Location + } + return nil +} + +func (m *Profile) GetLocationIndices() []int64 { + if m != nil { + return m.LocationIndices + } + return nil +} + +func (m *Profile) GetFunction() []Function { + if m != nil { + return m.Function + } + return nil +} + +func (m *Profile) GetAttributeTable() []v1.KeyValue { + if m != nil { + return m.AttributeTable + } + return nil +} + +func (m *Profile) GetAttributeUnits() []AttributeUnit { + if m != nil { + return m.AttributeUnits + } + return nil +} + +func (m *Profile) GetLinkTable() []Link { + if m != nil { + return m.LinkTable + } + return nil +} + +func (m *Profile) GetStringTable() []string { + if m != nil { + return m.StringTable + } + return nil +} + +func (m *Profile) GetDropFrames() int64 { + if m != nil { + return m.DropFrames + } + return 0 +} + +func (m *Profile) GetKeepFrames() int64 { + if m != nil { + return m.KeepFrames + } + return 0 +} + +func (m *Profile) GetTimeNanos() int64 { + if m != nil { + return m.TimeNanos + } + return 0 +} + +func (m *Profile) GetDurationNanos() int64 { + if m != nil { + return m.DurationNanos + } + return 0 +} + +func (m *Profile) GetPeriodType() ValueType { + if m != nil { + return m.PeriodType + } + return ValueType{} +} + +func (m *Profile) GetPeriod() int64 { + if m != nil { + return m.Period + } + return 0 +} + +func (m *Profile) GetComment() []int64 { + if m != nil { + return m.Comment + } + return nil +} + +func (m *Profile) GetDefaultSampleType() int64 { + if m != nil { + return m.DefaultSampleType + } + return 0 +} + +// Represents a mapping between Attribute Keys and Units. +type AttributeUnit struct { + // Index into string table. + AttributeKey int64 `protobuf:"varint,1,opt,name=attribute_key,json=attributeKey,proto3" json:"attribute_key,omitempty"` + // Index into string table. + Unit int64 `protobuf:"varint,2,opt,name=unit,proto3" json:"unit,omitempty"` +} + +func (m *AttributeUnit) Reset() { *m = AttributeUnit{} } +func (m *AttributeUnit) String() string { return proto.CompactTextString(m) } +func (*AttributeUnit) ProtoMessage() {} +func (*AttributeUnit) Descriptor() ([]byte, []int) { + return fileDescriptor_05f9ce3fdbeb046f, []int{1} +} +func (m *AttributeUnit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AttributeUnit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AttributeUnit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AttributeUnit) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributeUnit.Merge(m, src) +} +func (m *AttributeUnit) XXX_Size() int { + return m.Size() +} +func (m *AttributeUnit) XXX_DiscardUnknown() { + xxx_messageInfo_AttributeUnit.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributeUnit proto.InternalMessageInfo + +func (m *AttributeUnit) GetAttributeKey() int64 { + if m != nil { + return m.AttributeKey + } + return 0 +} + +func (m *AttributeUnit) GetUnit() int64 { + if m != nil { + return m.Unit + } + return 0 +} + +// A pointer from a profile Sample to a trace Span. +// Connects a profile sample to a trace span, identified by unique trace and span IDs. +type Link struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"` +} + +func (m *Link) Reset() { *m = Link{} } +func (m *Link) String() string { return proto.CompactTextString(m) } +func (*Link) ProtoMessage() {} +func (*Link) Descriptor() ([]byte, []int) { + return fileDescriptor_05f9ce3fdbeb046f, []int{2} +} +func (m *Link) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Link.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Link.Merge(m, src) +} +func (m *Link) XXX_Size() int { + return m.Size() +} +func (m *Link) XXX_DiscardUnknown() { + xxx_messageInfo_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Link proto.InternalMessageInfo + +// ValueType describes the type and units of a value, with an optional aggregation temporality. +type ValueType struct { + Type int64 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"` + Unit int64 `protobuf:"varint,2,opt,name=unit,proto3" json:"unit,omitempty"` + AggregationTemporality AggregationTemporality `protobuf:"varint,3,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.profiles.v1experimental.AggregationTemporality" json:"aggregation_temporality,omitempty"` +} + +func (m *ValueType) Reset() { *m = ValueType{} } +func (m *ValueType) String() string { return proto.CompactTextString(m) } +func (*ValueType) ProtoMessage() {} +func (*ValueType) Descriptor() ([]byte, []int) { + return fileDescriptor_05f9ce3fdbeb046f, []int{3} +} +func (m *ValueType) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValueType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValueType.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValueType) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValueType.Merge(m, src) +} +func (m *ValueType) XXX_Size() int { + return m.Size() +} +func (m *ValueType) XXX_DiscardUnknown() { + xxx_messageInfo_ValueType.DiscardUnknown(m) +} + +var xxx_messageInfo_ValueType proto.InternalMessageInfo + +func (m *ValueType) GetType() int64 { + if m != nil { + return m.Type + } + return 0 +} + +func (m *ValueType) GetUnit() int64 { + if m != nil { + return m.Unit + } + return 0 +} + +func (m *ValueType) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +// Each Sample records values encountered in some program +// context. The program context is typically a stack trace, perhaps +// augmented with auxiliary information like the thread-id, some +// indicator of a higher level request being handled etc. +type Sample struct { + // The indices recorded here correspond to locations in Profile.location. + // The leaf is at location_index[0]. [deprecated, superseded by locations_start_index / locations_length] + LocationIndex []uint64 `protobuf:"varint,1,rep,packed,name=location_index,json=locationIndex,proto3" json:"location_index,omitempty"` + // locations_start_index along with locations_length refers to to a slice of locations in Profile.location. + // Supersedes location_index. + LocationsStartIndex uint64 `protobuf:"varint,7,opt,name=locations_start_index,json=locationsStartIndex,proto3" json:"locations_start_index,omitempty"` + // locations_length along with locations_start_index refers to a slice of locations in Profile.location. + // Supersedes location_index. + LocationsLength uint64 `protobuf:"varint,8,opt,name=locations_length,json=locationsLength,proto3" json:"locations_length,omitempty"` + // A 128bit id that uniquely identifies this stacktrace, globally. Index into string table. [optional] + StacktraceIdIndex uint32 `protobuf:"varint,9,opt,name=stacktrace_id_index,json=stacktraceIdIndex,proto3" json:"stacktrace_id_index,omitempty"` + // The type and unit of each value is defined by the corresponding + // entry in Profile.sample_type. All samples must have the same + // number of values, the same as the length of Profile.sample_type. + // When aggregating multiple samples into a single sample, the + // result has a list of values that is the element-wise sum of the + // lists of the originals. + Value []int64 `protobuf:"varint,2,rep,packed,name=value,proto3" json:"value,omitempty"` + // label includes additional context for this sample. It can include + // things like a thread id, allocation size, etc. + // + // NOTE: While possible, having multiple values for the same label key is + // strongly discouraged and should never be used. Most tools (e.g. pprof) do + // not have good (or any) support for multi-value labels. And an even more + // discouraged case is having a string label and a numeric label of the same + // name on a sample. Again, possible to express, but should not be used. + // [deprecated, superseded by attributes] + Label []Label `protobuf:"bytes,3,rep,name=label,proto3" json:"label"` + // References to attributes in Profile.attribute_table. [optional] + Attributes []uint64 `protobuf:"varint,10,rep,packed,name=attributes,proto3" json:"attributes,omitempty"` + // Reference to link in Profile.link_table. [optional] + Link uint64 `protobuf:"varint,12,opt,name=link,proto3" json:"link,omitempty"` + // Timestamps associated with Sample represented in nanoseconds. These timestamps are expected + // to fall within the Profile's time range. [optional] + TimestampsUnixNano []uint64 `protobuf:"varint,13,rep,packed,name=timestamps_unix_nano,json=timestampsUnixNano,proto3" json:"timestamps_unix_nano,omitempty"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (m *Sample) String() string { return proto.CompactTextString(m) } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_05f9ce3fdbeb046f, []int{4} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +func (m *Sample) GetLocationIndex() []uint64 { + if m != nil { + return m.LocationIndex + } + return nil +} + +func (m *Sample) GetLocationsStartIndex() uint64 { + if m != nil { + return m.LocationsStartIndex + } + return 0 +} + +func (m *Sample) GetLocationsLength() uint64 { + if m != nil { + return m.LocationsLength + } + return 0 +} + +func (m *Sample) GetStacktraceIdIndex() uint32 { + if m != nil { + return m.StacktraceIdIndex + } + return 0 +} + +func (m *Sample) GetValue() []int64 { + if m != nil { + return m.Value + } + return nil +} + +func (m *Sample) GetLabel() []Label { + if m != nil { + return m.Label + } + return nil +} + +func (m *Sample) GetAttributes() []uint64 { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Sample) GetLink() uint64 { + if m != nil { + return m.Link + } + return 0 +} + +func (m *Sample) GetTimestampsUnixNano() []uint64 { + if m != nil { + return m.TimestampsUnixNano + } + return nil +} + +// Provides additional context for a sample, +// such as thread ID or allocation size, with optional units. [deprecated] +type Label struct { + Key int64 `protobuf:"varint,1,opt,name=key,proto3" json:"key,omitempty"` + // At most one of the following must be present + Str int64 `protobuf:"varint,2,opt,name=str,proto3" json:"str,omitempty"` + Num int64 `protobuf:"varint,3,opt,name=num,proto3" json:"num,omitempty"` + // Should only be present when num is present. + // Specifies the units of num. + // Use arbitrary string (for example, "requests") as a custom count unit. + // If no unit is specified, consumer may apply heuristic to deduce the unit. + // Consumers may also interpret units like "bytes" and "kilobytes" as memory + // units and units like "seconds" and "nanoseconds" as time units, + // and apply appropriate unit conversions to these. + NumUnit int64 `protobuf:"varint,4,opt,name=num_unit,json=numUnit,proto3" json:"num_unit,omitempty"` +} + +func (m *Label) Reset() { *m = Label{} } +func (m *Label) String() string { return proto.CompactTextString(m) } +func (*Label) ProtoMessage() {} +func (*Label) Descriptor() ([]byte, []int) { + return fileDescriptor_05f9ce3fdbeb046f, []int{5} +} +func (m *Label) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Label) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Label.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Label) XXX_Merge(src proto.Message) { + xxx_messageInfo_Label.Merge(m, src) +} +func (m *Label) XXX_Size() int { + return m.Size() +} +func (m *Label) XXX_DiscardUnknown() { + xxx_messageInfo_Label.DiscardUnknown(m) +} + +var xxx_messageInfo_Label proto.InternalMessageInfo + +func (m *Label) GetKey() int64 { + if m != nil { + return m.Key + } + return 0 +} + +func (m *Label) GetStr() int64 { + if m != nil { + return m.Str + } + return 0 +} + +func (m *Label) GetNum() int64 { + if m != nil { + return m.Num + } + return 0 +} + +func (m *Label) GetNumUnit() int64 { + if m != nil { + return m.NumUnit + } + return 0 +} + +// Describes the mapping of a binary in memory, including its address range, +// file offset, and metadata like build ID +type Mapping struct { + // Unique nonzero id for the mapping. [deprecated] + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Address at which the binary (or DLL) is loaded into memory. + MemoryStart uint64 `protobuf:"varint,2,opt,name=memory_start,json=memoryStart,proto3" json:"memory_start,omitempty"` + // The limit of the address range occupied by this mapping. + MemoryLimit uint64 `protobuf:"varint,3,opt,name=memory_limit,json=memoryLimit,proto3" json:"memory_limit,omitempty"` + // Offset in the binary that corresponds to the first mapped address. + FileOffset uint64 `protobuf:"varint,4,opt,name=file_offset,json=fileOffset,proto3" json:"file_offset,omitempty"` + // The object this entry is loaded from. This can be a filename on + // disk for the main binary and shared libraries, or virtual + // abstractions like "[vdso]". + Filename int64 `protobuf:"varint,5,opt,name=filename,proto3" json:"filename,omitempty"` + // A string that uniquely identifies a particular program version + // with high probability. E.g., for binaries generated by GNU tools, + // it could be the contents of the .note.gnu.build-id field. + BuildId int64 `protobuf:"varint,6,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + // Specifies the kind of build id. See BuildIdKind enum for more details [optional] + BuildIdKind BuildIdKind `protobuf:"varint,11,opt,name=build_id_kind,json=buildIdKind,proto3,enum=opentelemetry.proto.profiles.v1experimental.BuildIdKind" json:"build_id_kind,omitempty"` + // References to attributes in Profile.attribute_table. [optional] + Attributes []uint64 `protobuf:"varint,12,rep,packed,name=attributes,proto3" json:"attributes,omitempty"` + // The following fields indicate the resolution of symbolic info. + HasFunctions bool `protobuf:"varint,7,opt,name=has_functions,json=hasFunctions,proto3" json:"has_functions,omitempty"` + HasFilenames bool `protobuf:"varint,8,opt,name=has_filenames,json=hasFilenames,proto3" json:"has_filenames,omitempty"` + HasLineNumbers bool `protobuf:"varint,9,opt,name=has_line_numbers,json=hasLineNumbers,proto3" json:"has_line_numbers,omitempty"` + HasInlineFrames bool `protobuf:"varint,10,opt,name=has_inline_frames,json=hasInlineFrames,proto3" json:"has_inline_frames,omitempty"` +} + +func (m *Mapping) Reset() { *m = Mapping{} } +func (m *Mapping) String() string { return proto.CompactTextString(m) } +func (*Mapping) ProtoMessage() {} +func (*Mapping) Descriptor() ([]byte, []int) { + return fileDescriptor_05f9ce3fdbeb046f, []int{6} +} +func (m *Mapping) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Mapping.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Mapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mapping.Merge(m, src) +} +func (m *Mapping) XXX_Size() int { + return m.Size() +} +func (m *Mapping) XXX_DiscardUnknown() { + xxx_messageInfo_Mapping.DiscardUnknown(m) +} + +var xxx_messageInfo_Mapping proto.InternalMessageInfo + +func (m *Mapping) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Mapping) GetMemoryStart() uint64 { + if m != nil { + return m.MemoryStart + } + return 0 +} + +func (m *Mapping) GetMemoryLimit() uint64 { + if m != nil { + return m.MemoryLimit + } + return 0 +} + +func (m *Mapping) GetFileOffset() uint64 { + if m != nil { + return m.FileOffset + } + return 0 +} + +func (m *Mapping) GetFilename() int64 { + if m != nil { + return m.Filename + } + return 0 +} + +func (m *Mapping) GetBuildId() int64 { + if m != nil { + return m.BuildId + } + return 0 +} + +func (m *Mapping) GetBuildIdKind() BuildIdKind { + if m != nil { + return m.BuildIdKind + } + return BuildIdKind_BUILD_ID_LINKER +} + +func (m *Mapping) GetAttributes() []uint64 { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Mapping) GetHasFunctions() bool { + if m != nil { + return m.HasFunctions + } + return false +} + +func (m *Mapping) GetHasFilenames() bool { + if m != nil { + return m.HasFilenames + } + return false +} + +func (m *Mapping) GetHasLineNumbers() bool { + if m != nil { + return m.HasLineNumbers + } + return false +} + +func (m *Mapping) GetHasInlineFrames() bool { + if m != nil { + return m.HasInlineFrames + } + return false +} + +// Describes function and line table debug information. +type Location struct { + // Unique nonzero id for the location. A profile could use + // instruction addresses or any integer sequence as ids. [deprecated] + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // The index of the corresponding profile.Mapping for this location. + // It can be unset if the mapping is unknown or not applicable for + // this profile type. + MappingIndex uint64 `protobuf:"varint,2,opt,name=mapping_index,json=mappingIndex,proto3" json:"mapping_index,omitempty"` + // The instruction address for this location, if available. It + // should be within [Mapping.memory_start...Mapping.memory_limit] + // for the corresponding mapping. A non-leaf address may be in the + // middle of a call instruction. It is up to display tools to find + // the beginning of the instruction if necessary. + Address uint64 `protobuf:"varint,3,opt,name=address,proto3" json:"address,omitempty"` + // Multiple line indicates this location has inlined functions, + // where the last entry represents the caller into which the + // preceding entries were inlined. + // + // E.g., if memcpy() is inlined into printf: + // line[0].function_name == "memcpy" + // line[1].function_name == "printf" + Line []Line `protobuf:"bytes,4,rep,name=line,proto3" json:"line"` + // Provides an indication that multiple symbols map to this location's + // address, for example due to identical code folding by the linker. In that + // case the line information above represents one of the multiple + // symbols. This field must be recomputed when the symbolization state of the + // profile changes. + IsFolded bool `protobuf:"varint,5,opt,name=is_folded,json=isFolded,proto3" json:"is_folded,omitempty"` + // Type of frame (e.g. kernel, native, python, hotspot, php). Index into string table. + TypeIndex uint32 `protobuf:"varint,6,opt,name=type_index,json=typeIndex,proto3" json:"type_index,omitempty"` + // References to attributes in Profile.attribute_table. [optional] + Attributes []uint64 `protobuf:"varint,7,rep,packed,name=attributes,proto3" json:"attributes,omitempty"` +} + +func (m *Location) Reset() { *m = Location{} } +func (m *Location) String() string { return proto.CompactTextString(m) } +func (*Location) ProtoMessage() {} +func (*Location) Descriptor() ([]byte, []int) { + return fileDescriptor_05f9ce3fdbeb046f, []int{7} +} +func (m *Location) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Location.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_Location.Merge(m, src) +} +func (m *Location) XXX_Size() int { + return m.Size() +} +func (m *Location) XXX_DiscardUnknown() { + xxx_messageInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_Location proto.InternalMessageInfo + +func (m *Location) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Location) GetMappingIndex() uint64 { + if m != nil { + return m.MappingIndex + } + return 0 +} + +func (m *Location) GetAddress() uint64 { + if m != nil { + return m.Address + } + return 0 +} + +func (m *Location) GetLine() []Line { + if m != nil { + return m.Line + } + return nil +} + +func (m *Location) GetIsFolded() bool { + if m != nil { + return m.IsFolded + } + return false +} + +func (m *Location) GetTypeIndex() uint32 { + if m != nil { + return m.TypeIndex + } + return 0 +} + +func (m *Location) GetAttributes() []uint64 { + if m != nil { + return m.Attributes + } + return nil +} + +// Details a specific line in a source code, linked to a function. +type Line struct { + // The index of the corresponding profile.Function for this line. + FunctionIndex uint64 `protobuf:"varint,1,opt,name=function_index,json=functionIndex,proto3" json:"function_index,omitempty"` + // Line number in source code. + Line int64 `protobuf:"varint,2,opt,name=line,proto3" json:"line,omitempty"` + // Column number in source code. + Column int64 `protobuf:"varint,3,opt,name=column,proto3" json:"column,omitempty"` +} + +func (m *Line) Reset() { *m = Line{} } +func (m *Line) String() string { return proto.CompactTextString(m) } +func (*Line) ProtoMessage() {} +func (*Line) Descriptor() ([]byte, []int) { + return fileDescriptor_05f9ce3fdbeb046f, []int{8} +} +func (m *Line) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Line) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Line.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Line) XXX_Merge(src proto.Message) { + xxx_messageInfo_Line.Merge(m, src) +} +func (m *Line) XXX_Size() int { + return m.Size() +} +func (m *Line) XXX_DiscardUnknown() { + xxx_messageInfo_Line.DiscardUnknown(m) +} + +var xxx_messageInfo_Line proto.InternalMessageInfo + +func (m *Line) GetFunctionIndex() uint64 { + if m != nil { + return m.FunctionIndex + } + return 0 +} + +func (m *Line) GetLine() int64 { + if m != nil { + return m.Line + } + return 0 +} + +func (m *Line) GetColumn() int64 { + if m != nil { + return m.Column + } + return 0 +} + +// Describes a function, including its human-readable name, system name, +// source file, and starting line number in the source. +type Function struct { + // Unique nonzero id for the function. [deprecated] + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Name of the function, in human-readable form if available. + Name int64 `protobuf:"varint,2,opt,name=name,proto3" json:"name,omitempty"` + // Name of the function, as identified by the system. + // For instance, it can be a C++ mangled name. + SystemName int64 `protobuf:"varint,3,opt,name=system_name,json=systemName,proto3" json:"system_name,omitempty"` + // Source file containing the function. + Filename int64 `protobuf:"varint,4,opt,name=filename,proto3" json:"filename,omitempty"` + // Line number in source file. + StartLine int64 `protobuf:"varint,5,opt,name=start_line,json=startLine,proto3" json:"start_line,omitempty"` +} + +func (m *Function) Reset() { *m = Function{} } +func (m *Function) String() string { return proto.CompactTextString(m) } +func (*Function) ProtoMessage() {} +func (*Function) Descriptor() ([]byte, []int) { + return fileDescriptor_05f9ce3fdbeb046f, []int{9} +} +func (m *Function) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Function) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Function.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Function) XXX_Merge(src proto.Message) { + xxx_messageInfo_Function.Merge(m, src) +} +func (m *Function) XXX_Size() int { + return m.Size() +} +func (m *Function) XXX_DiscardUnknown() { + xxx_messageInfo_Function.DiscardUnknown(m) +} + +var xxx_messageInfo_Function proto.InternalMessageInfo + +func (m *Function) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Function) GetName() int64 { + if m != nil { + return m.Name + } + return 0 +} + +func (m *Function) GetSystemName() int64 { + if m != nil { + return m.SystemName + } + return 0 +} + +func (m *Function) GetFilename() int64 { + if m != nil { + return m.Filename + } + return 0 +} + +func (m *Function) GetStartLine() int64 { + if m != nil { + return m.StartLine + } + return 0 +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.profiles.v1experimental.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value) + proto.RegisterEnum("opentelemetry.proto.profiles.v1experimental.BuildIdKind", BuildIdKind_name, BuildIdKind_value) + proto.RegisterType((*Profile)(nil), "opentelemetry.proto.profiles.v1experimental.Profile") + proto.RegisterType((*AttributeUnit)(nil), "opentelemetry.proto.profiles.v1experimental.AttributeUnit") + proto.RegisterType((*Link)(nil), "opentelemetry.proto.profiles.v1experimental.Link") + proto.RegisterType((*ValueType)(nil), "opentelemetry.proto.profiles.v1experimental.ValueType") + proto.RegisterType((*Sample)(nil), "opentelemetry.proto.profiles.v1experimental.Sample") + proto.RegisterType((*Label)(nil), "opentelemetry.proto.profiles.v1experimental.Label") + proto.RegisterType((*Mapping)(nil), "opentelemetry.proto.profiles.v1experimental.Mapping") + proto.RegisterType((*Location)(nil), "opentelemetry.proto.profiles.v1experimental.Location") + proto.RegisterType((*Line)(nil), "opentelemetry.proto.profiles.v1experimental.Line") + proto.RegisterType((*Function)(nil), "opentelemetry.proto.profiles.v1experimental.Function") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/profiles/v1experimental/pprofextended.proto", fileDescriptor_05f9ce3fdbeb046f) +} + +var fileDescriptor_05f9ce3fdbeb046f = []byte{ + // 1483 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0xcd, 0x4f, 0x1b, 0x47, + 0x1b, 0xc7, 0x1f, 0xf8, 0xe3, 0x31, 0x06, 0x33, 0xe1, 0xe5, 0xdd, 0x37, 0xaf, 0x02, 0xc4, 0xa8, + 0x0d, 0x25, 0x92, 0x29, 0xa4, 0xad, 0xd2, 0xaa, 0x52, 0x6b, 0x82, 0x49, 0x56, 0x38, 0x86, 0x2e, + 0x86, 0x96, 0x2a, 0xd1, 0x6a, 0xf1, 0x0e, 0x66, 0xc4, 0xee, 0xec, 0x6a, 0x77, 0x8c, 0xb0, 0xd4, + 0x53, 0x8f, 0x51, 0x0f, 0x3d, 0xf7, 0x4f, 0xe8, 0xad, 0x7f, 0x41, 0xaf, 0x39, 0xe6, 0x52, 0xa9, + 0xea, 0x21, 0xaa, 0x92, 0xbf, 0xa1, 0xf7, 0x6a, 0x9e, 0x99, 0xb5, 0xcd, 0x47, 0x0e, 0x6e, 0x2f, + 0x68, 0x9e, 0xdf, 0xfc, 0xe6, 0x37, 0xcf, 0xec, 0xf3, 0x65, 0xe0, 0x8b, 0x20, 0xa4, 0x5c, 0x50, + 0x8f, 0xfa, 0x54, 0x44, 0xfd, 0xb5, 0x30, 0x0a, 0x44, 0x20, 0xff, 0x9e, 0x30, 0x8f, 0xc6, 0x6b, + 0xe7, 0xeb, 0xf4, 0x22, 0xa4, 0x11, 0xf3, 0x29, 0x17, 0x8e, 0xb7, 0x16, 0xca, 0x0d, 0x7a, 0x21, + 0x28, 0x77, 0xa9, 0x5b, 0x43, 0x2e, 0xb9, 0x7f, 0x49, 0x40, 0x81, 0xb5, 0x44, 0xa0, 0x76, 0x59, + 0xe0, 0xf6, 0x5c, 0x37, 0xe8, 0x06, 0xea, 0x0e, 0xb9, 0x52, 0xec, 0xdb, 0xab, 0x37, 0xf9, 0xd0, + 0x09, 0x7c, 0x3f, 0xe0, 0x6b, 0xe7, 0xeb, 0x7a, 0xa5, 0xb8, 0xd5, 0xbf, 0x0a, 0x90, 0xdf, 0x53, + 0xea, 0xe4, 0x39, 0x94, 0x62, 0xc7, 0x0f, 0x3d, 0x6a, 0x8b, 0x7e, 0x48, 0x8d, 0xd4, 0x52, 0x66, + 0xa5, 0xb4, 0xf1, 0x49, 0x6d, 0x0c, 0x87, 0x6a, 0x87, 0x8e, 0xd7, 0xa3, 0xed, 0x7e, 0x48, 0x37, + 0xb3, 0x2f, 0x5f, 0x2f, 0x4e, 0x58, 0xa0, 0x04, 0x25, 0x42, 0xbe, 0x82, 0x9c, 0xb2, 0x8c, 0x34, + 0x2a, 0x3f, 0x18, 0x4b, 0x79, 0x1f, 0x8f, 0x6a, 0x59, 0x2d, 0x44, 0xda, 0x90, 0xf7, 0x9d, 0x30, + 0x64, 0xbc, 0x6b, 0x64, 0x50, 0xf3, 0xa3, 0xb1, 0x34, 0x9f, 0xaa, 0xb3, 0x5a, 0x34, 0x91, 0x22, + 0x5f, 0x43, 0xc1, 0x0b, 0x3a, 0x8e, 0x60, 0x01, 0x37, 0xb2, 0x28, 0xfb, 0xf1, 0x58, 0xb2, 0x4d, + 0x7d, 0x58, 0xeb, 0x0e, 0xc4, 0xc8, 0x07, 0x50, 0x49, 0xd6, 0x36, 0xe3, 0x2e, 0xeb, 0xd0, 0xd8, + 0x98, 0x59, 0xca, 0xac, 0x64, 0xac, 0x99, 0x04, 0x37, 0x15, 0x2c, 0x7d, 0x38, 0xe9, 0xf1, 0x0e, + 0xfa, 0x30, 0xf9, 0x0f, 0x7c, 0xd8, 0xd6, 0x87, 0x13, 0x1f, 0x12, 0x31, 0x72, 0x08, 0x33, 0x8e, + 0x10, 0x11, 0x3b, 0xee, 0x09, 0x6a, 0x0b, 0xe7, 0xd8, 0xa3, 0x46, 0x05, 0xf5, 0xef, 0xdd, 0xa8, + 0xaf, 0x93, 0xe5, 0x7c, 0xbd, 0xb6, 0x43, 0xfb, 0x18, 0x5d, 0xad, 0x38, 0x3d, 0x50, 0x69, 0x4b, + 0x11, 0xc2, 0x46, 0x75, 0x7b, 0x9c, 0x89, 0xd8, 0x98, 0x45, 0xdd, 0xcf, 0xc6, 0xf2, 0xbb, 0x9e, + 0x68, 0x1c, 0x70, 0x26, 0xae, 0x5d, 0x25, 0xc1, 0x98, 0x1c, 0x02, 0x78, 0x8c, 0x9f, 0x69, 0xef, + 0x09, 0xde, 0xb2, 0x3e, 0x5e, 0x84, 0x18, 0x3f, 0xd3, 0xe2, 0x45, 0x29, 0xa5, 0x9e, 0x70, 0x17, + 0xa6, 0x62, 0x11, 0x31, 0xde, 0xd5, 0xca, 0xb9, 0xa5, 0xcc, 0x4a, 0xd1, 0x2a, 0x29, 0x4c, 0x51, + 0x16, 0xa1, 0xe4, 0x46, 0x41, 0x68, 0x9f, 0x44, 0x8e, 0x4f, 0x63, 0x23, 0xbf, 0x94, 0x5a, 0xc9, + 0x58, 0x20, 0xa1, 0x6d, 0x44, 0x24, 0xe1, 0x8c, 0xd2, 0x01, 0xa1, 0xa0, 0x08, 0x12, 0xd2, 0x84, + 0x3b, 0x00, 0x82, 0xf9, 0xd4, 0xe6, 0x0e, 0x0f, 0x62, 0xa3, 0x88, 0xfb, 0x45, 0x89, 0xb4, 0x24, + 0x40, 0xde, 0x83, 0x69, 0xb7, 0x17, 0xa9, 0x14, 0x51, 0x14, 0x40, 0x4a, 0x39, 0x41, 0x15, 0xed, + 0x39, 0x94, 0xe4, 0x73, 0x02, 0x57, 0x95, 0x6a, 0x69, 0x29, 0xf5, 0xef, 0x4b, 0x55, 0x09, 0x62, + 0xa9, 0xce, 0x43, 0x4e, 0x59, 0xc6, 0x14, 0xde, 0xae, 0x2d, 0x62, 0x40, 0x5e, 0x26, 0x04, 0xe5, + 0xc2, 0x28, 0x63, 0xde, 0x26, 0x26, 0xa9, 0xc1, 0x2d, 0x97, 0x9e, 0x38, 0x3d, 0x4f, 0xd8, 0xa3, + 0x3d, 0x64, 0x1a, 0x8f, 0xcf, 0xea, 0xad, 0xfd, 0x41, 0x33, 0xa8, 0x3e, 0x81, 0xf2, 0xa5, 0x50, + 0x93, 0x65, 0x28, 0x0f, 0xf3, 0xe7, 0x8c, 0xf6, 0x8d, 0x14, 0x1e, 0x9d, 0x1a, 0x80, 0x3b, 0xb4, + 0x4f, 0x08, 0x64, 0x65, 0x6a, 0x19, 0x69, 0xdc, 0xc3, 0x75, 0xf5, 0xd7, 0x14, 0x64, 0x65, 0x3c, + 0xc9, 0x33, 0x28, 0x88, 0xc8, 0xe9, 0x50, 0x9b, 0xb9, 0x78, 0x78, 0x6a, 0xb3, 0x2e, 0x1f, 0xf6, + 0xc7, 0xeb, 0xc5, 0x4f, 0xbb, 0xc1, 0x95, 0x4f, 0xc3, 0x64, 0x43, 0xf4, 0x3c, 0xda, 0x11, 0x41, + 0xb4, 0x16, 0xba, 0x8e, 0x70, 0xd6, 0x18, 0x17, 0x34, 0xe2, 0x8e, 0xb7, 0x26, 0xad, 0x5a, 0x5b, + 0x2a, 0x99, 0x5b, 0x56, 0x1e, 0x25, 0x4d, 0x97, 0x1c, 0x41, 0x3e, 0x0e, 0x1d, 0x2e, 0xc5, 0xd3, + 0x28, 0xfe, 0xa5, 0x16, 0x7f, 0x38, 0xbe, 0xf8, 0x7e, 0xe8, 0x70, 0x73, 0xcb, 0xca, 0x49, 0x41, + 0xd3, 0xad, 0xfe, 0x92, 0x82, 0xe2, 0x20, 0x1a, 0xf2, 0x8d, 0xba, 0xfd, 0xe2, 0x1b, 0x85, 0xc6, + 0xae, 0xbe, 0x9b, 0x7c, 0x07, 0xff, 0x75, 0xba, 0xdd, 0x88, 0x76, 0x55, 0xb2, 0x08, 0xea, 0x87, + 0x41, 0xe4, 0x78, 0x4c, 0xf4, 0x8d, 0xcc, 0x52, 0x6a, 0x65, 0x7a, 0xe3, 0xd1, 0x78, 0x85, 0x37, + 0xd4, 0x6a, 0x0f, 0xa5, 0xac, 0x79, 0xe7, 0x46, 0xbc, 0xfa, 0x22, 0x03, 0x39, 0x15, 0x4e, 0x99, + 0xb2, 0xa3, 0x5d, 0x8d, 0x5e, 0xe0, 0xe4, 0xc8, 0x5a, 0xe5, 0x91, 0x9e, 0x46, 0x2f, 0xc8, 0x06, + 0xfc, 0x27, 0x01, 0x62, 0x3b, 0x16, 0x4e, 0x24, 0x34, 0x5b, 0x16, 0x51, 0xd6, 0xba, 0x35, 0xd8, + 0xdc, 0x97, 0x7b, 0xea, 0xcc, 0x48, 0xc3, 0x8c, 0x6d, 0x8f, 0xf2, 0xae, 0x38, 0xc5, 0x92, 0xca, + 0x0e, 0x1b, 0x66, 0xdc, 0x44, 0x58, 0x26, 0x60, 0x2c, 0x9c, 0xce, 0x59, 0x92, 0x02, 0x5a, 0x5c, + 0x16, 0x58, 0xd9, 0x9a, 0x1d, 0x6e, 0x99, 0xae, 0x92, 0x9e, 0x83, 0xc9, 0x73, 0xf9, 0xcd, 0x71, + 0x18, 0x65, 0x2c, 0x65, 0x90, 0x16, 0x4c, 0x7a, 0xce, 0x31, 0xf5, 0xf4, 0x38, 0xd9, 0x18, 0xaf, + 0xab, 0xc8, 0x93, 0xba, 0x9a, 0x94, 0x0c, 0x59, 0x00, 0x18, 0x24, 0xb0, 0x2c, 0x65, 0xf9, 0x5d, + 0x46, 0x10, 0x19, 0x58, 0xd9, 0x7f, 0xb0, 0xcc, 0xb2, 0x16, 0xae, 0xc9, 0x87, 0x30, 0x27, 0xfb, + 0x41, 0x2c, 0x1c, 0x3f, 0x8c, 0x65, 0x2b, 0xbd, 0xc0, 0x4e, 0x80, 0x15, 0x97, 0xb5, 0xc8, 0x70, + 0xef, 0x80, 0xb3, 0x0b, 0xd9, 0x0e, 0xaa, 0xdf, 0xc0, 0x24, 0xde, 0x4d, 0x2a, 0x90, 0x19, 0x96, + 0x8e, 0x5c, 0x4a, 0x24, 0x16, 0x91, 0x4e, 0x1c, 0xb9, 0x94, 0x08, 0xef, 0xf9, 0x98, 0x23, 0x19, + 0x4b, 0x2e, 0xc9, 0xff, 0xa0, 0xc0, 0x7b, 0x3e, 0x36, 0x6d, 0x23, 0x8b, 0x70, 0x9e, 0xf7, 0x7c, + 0x59, 0x95, 0xd5, 0xdf, 0x32, 0x90, 0xd7, 0x53, 0x92, 0x4c, 0x43, 0x5a, 0x57, 0x56, 0xd6, 0x4a, + 0x33, 0x57, 0xb6, 0x4b, 0x9f, 0xfa, 0x41, 0xd4, 0x57, 0xd1, 0xc4, 0x3b, 0xb2, 0x56, 0x49, 0x61, + 0x18, 0xc4, 0x11, 0x8a, 0xc7, 0x7c, 0x26, 0xf0, 0xd2, 0x01, 0xa5, 0x29, 0x21, 0xd9, 0x30, 0xe5, + 0xc7, 0xb4, 0x83, 0x93, 0x93, 0x98, 0xaa, 0xfb, 0xb3, 0x16, 0x48, 0x68, 0x17, 0x11, 0x72, 0x1b, + 0x0a, 0xd2, 0xe2, 0x8e, 0x4f, 0x8d, 0x49, 0xf4, 0x6e, 0x60, 0x4b, 0xcf, 0x8f, 0x7b, 0xcc, 0x73, + 0x65, 0x55, 0xe6, 0x94, 0xe7, 0x68, 0x9b, 0x2e, 0x79, 0x06, 0xe5, 0x64, 0xcb, 0x3e, 0x63, 0xdc, + 0xc5, 0x1e, 0x39, 0xbd, 0xf1, 0x70, 0xac, 0x88, 0x6e, 0x2a, 0xb1, 0x1d, 0xc6, 0x5d, 0xab, 0x74, + 0x3c, 0x34, 0xae, 0xc4, 0x75, 0xea, 0x5a, 0x5c, 0x97, 0xa1, 0x7c, 0xea, 0xc4, 0x76, 0x32, 0x75, + 0xd5, 0xa4, 0x28, 0x58, 0x53, 0xa7, 0x4e, 0x9c, 0x4c, 0xe6, 0x21, 0x49, 0xbf, 0x46, 0x4d, 0x0b, + 0x4d, 0x4a, 0x30, 0xb2, 0x02, 0x15, 0x49, 0xf2, 0x18, 0xa7, 0x36, 0xef, 0xf9, 0xc7, 0x34, 0x52, + 0x53, 0xa3, 0x60, 0x4d, 0x9f, 0x3a, 0x71, 0x93, 0x71, 0xda, 0x52, 0x28, 0x59, 0x85, 0x59, 0xc9, + 0x64, 0x1c, 0xb9, 0x7a, 0x00, 0x01, 0x52, 0x67, 0x4e, 0x9d, 0xd8, 0x44, 0x5c, 0x4d, 0xa1, 0xea, + 0xf7, 0x69, 0x28, 0x24, 0x3f, 0x53, 0xae, 0x05, 0x76, 0x19, 0xca, 0xfa, 0xa7, 0x90, 0x2e, 0x22, + 0x15, 0xd9, 0x29, 0x0d, 0xaa, 0xfa, 0x31, 0x20, 0xef, 0xb8, 0x6e, 0x44, 0xe3, 0x58, 0x47, 0x35, + 0x31, 0xc9, 0x0e, 0xe6, 0x34, 0xd5, 0x3f, 0x9d, 0xc6, 0x1e, 0xcc, 0xc9, 0x3c, 0x42, 0x11, 0xf2, + 0x7f, 0x28, 0xb2, 0xd8, 0x3e, 0x09, 0x3c, 0x97, 0xba, 0x18, 0xfe, 0x82, 0x55, 0x60, 0xf1, 0x36, + 0xda, 0x38, 0x4b, 0xfb, 0x21, 0xd5, 0x5e, 0xe6, 0xb0, 0xd4, 0x8b, 0x12, 0x51, 0x2e, 0x5e, 0x0e, + 0x52, 0xfe, 0x6a, 0x90, 0xaa, 0x47, 0x38, 0x38, 0xb0, 0x81, 0x25, 0x81, 0x1a, 0x34, 0x30, 0xf9, + 0xa2, 0x72, 0x82, 0x2a, 0x39, 0xa2, 0xdf, 0xa5, 0x9b, 0x30, 0xba, 0x37, 0x0f, 0xb9, 0x4e, 0xe0, + 0xf5, 0x7c, 0xae, 0xeb, 0x49, 0x5b, 0xd5, 0x17, 0x29, 0x28, 0x24, 0x81, 0xbe, 0xf6, 0x7d, 0x09, + 0x64, 0x31, 0x9b, 0xb5, 0x10, 0x66, 0xf2, 0x22, 0x94, 0xe2, 0x7e, 0x2c, 0xa8, 0x6f, 0xe3, 0x96, + 0x52, 0x03, 0x05, 0xb5, 0x24, 0x61, 0xb4, 0x0c, 0xb2, 0x57, 0xca, 0xe0, 0x0e, 0x80, 0x6a, 0xa8, + 0xe8, 0x9f, 0x2a, 0x92, 0x22, 0x22, 0xf2, 0x7d, 0xab, 0x3f, 0xa4, 0x60, 0xfe, 0xe6, 0xf6, 0x4e, + 0xee, 0xc1, 0x72, 0xfd, 0xf1, 0x63, 0xab, 0xf1, 0xb8, 0xde, 0x36, 0x77, 0x5b, 0x76, 0xbb, 0xf1, + 0x74, 0x6f, 0xd7, 0xaa, 0x37, 0xcd, 0xf6, 0x91, 0x7d, 0xd0, 0xda, 0xdf, 0x6b, 0x3c, 0x32, 0xb7, + 0xcd, 0xc6, 0x56, 0x65, 0x82, 0xdc, 0x85, 0x3b, 0xef, 0x22, 0x6e, 0x35, 0x9a, 0xed, 0x7a, 0x25, + 0x45, 0xde, 0x87, 0xea, 0xbb, 0x28, 0x8f, 0x0e, 0x9e, 0x1e, 0x34, 0xeb, 0x6d, 0xf3, 0xb0, 0x51, + 0x49, 0xaf, 0x7e, 0x0e, 0xa5, 0x91, 0xba, 0x22, 0xb7, 0x60, 0x66, 0xf3, 0xc0, 0x6c, 0x6e, 0xd9, + 0xe6, 0x96, 0xdd, 0x34, 0x5b, 0x3b, 0x0d, 0xab, 0x32, 0x41, 0x0c, 0x98, 0x1b, 0x80, 0x9b, 0x66, + 0xab, 0x6e, 0x1d, 0xd9, 0x4f, 0xea, 0xfb, 0x4f, 0x2a, 0xa9, 0xcd, 0x9f, 0x52, 0x2f, 0xdf, 0x2c, + 0xa4, 0x5e, 0xbd, 0x59, 0x48, 0xfd, 0xf9, 0x66, 0x21, 0xf5, 0xe3, 0xdb, 0x85, 0x89, 0x57, 0x6f, + 0x17, 0x26, 0x7e, 0x7f, 0xbb, 0x30, 0xf1, 0xad, 0x35, 0xf6, 0x24, 0x56, 0xff, 0x1b, 0x75, 0x29, + 0x7f, 0xd7, 0xbf, 0x68, 0x3f, 0xa7, 0xef, 0xef, 0x86, 0x94, 0xb7, 0x07, 0x8a, 0x7b, 0x98, 0xbe, + 0x7b, 0x49, 0xfa, 0x1e, 0xae, 0x37, 0x46, 0xd8, 0xc7, 0x39, 0xd4, 0x7b, 0xf0, 0x77, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xef, 0x03, 0x47, 0x6d, 0x06, 0x0e, 0x00, 0x00, +} + +func (m *Profile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Profile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LinkTable) > 0 { + for iNdEx := len(m.LinkTable) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LinkTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPprofextended(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + if len(m.AttributeUnits) > 0 { + for iNdEx := len(m.AttributeUnits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AttributeUnits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPprofextended(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + if len(m.AttributeTable) > 0 { + for iNdEx := len(m.AttributeTable) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AttributeTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPprofextended(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if len(m.LocationIndices) > 0 { + dAtA2 := make([]byte, len(m.LocationIndices)*10) + var j1 int + for _, num1 := range m.LocationIndices { + num := uint64(num1) + for num >= 1<<7 { + dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA2[j1] = uint8(num) + j1++ + } + i -= j1 + copy(dAtA[i:], dAtA2[:j1]) + i = encodeVarintPprofextended(dAtA, i, uint64(j1)) + i-- + dAtA[i] = 0x7a + } + if m.DefaultSampleType != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.DefaultSampleType)) + i-- + dAtA[i] = 0x70 + } + if len(m.Comment) > 0 { + dAtA4 := make([]byte, len(m.Comment)*10) + var j3 int + for _, num1 := range m.Comment { + num := uint64(num1) + for num >= 1<<7 { + dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j3++ + } + dAtA4[j3] = uint8(num) + j3++ + } + i -= j3 + copy(dAtA[i:], dAtA4[:j3]) + i = encodeVarintPprofextended(dAtA, i, uint64(j3)) + i-- + dAtA[i] = 0x6a + } + if m.Period != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Period)) + i-- + dAtA[i] = 0x60 + } + { + size, err := m.PeriodType.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPprofextended(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + if m.DurationNanos != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.DurationNanos)) + i-- + dAtA[i] = 0x50 + } + if m.TimeNanos != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.TimeNanos)) + i-- + dAtA[i] = 0x48 + } + if m.KeepFrames != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.KeepFrames)) + i-- + dAtA[i] = 0x40 + } + if m.DropFrames != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.DropFrames)) + i-- + dAtA[i] = 0x38 + } + if len(m.StringTable) > 0 { + for iNdEx := len(m.StringTable) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StringTable[iNdEx]) + copy(dAtA[i:], m.StringTable[iNdEx]) + i = encodeVarintPprofextended(dAtA, i, uint64(len(m.StringTable[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Function) > 0 { + for iNdEx := len(m.Function) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Function[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPprofextended(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Location) > 0 { + for iNdEx := len(m.Location) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Location[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPprofextended(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Mapping) > 0 { + for iNdEx := len(m.Mapping) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mapping[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPprofextended(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Sample) > 0 { + for iNdEx := len(m.Sample) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sample[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPprofextended(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.SampleType) > 0 { + for iNdEx := len(m.SampleType) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SampleType[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPprofextended(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AttributeUnit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttributeUnit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AttributeUnit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Unit != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Unit)) + i-- + dAtA[i] = 0x10 + } + if m.AttributeKey != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.AttributeKey)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Link) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Link) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.SpanId.Size() + i -= size + if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintPprofextended(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.TraceId.Size() + i -= size + if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintPprofextended(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValueType) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValueType) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValueType) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AggregationTemporality != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.AggregationTemporality)) + i-- + dAtA[i] = 0x18 + } + if m.Unit != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Unit)) + i-- + dAtA[i] = 0x10 + } + if m.Type != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TimestampsUnixNano) > 0 { + dAtA7 := make([]byte, len(m.TimestampsUnixNano)*10) + var j6 int + for _, num := range m.TimestampsUnixNano { + for num >= 1<<7 { + dAtA7[j6] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j6++ + } + dAtA7[j6] = uint8(num) + j6++ + } + i -= j6 + copy(dAtA[i:], dAtA7[:j6]) + i = encodeVarintPprofextended(dAtA, i, uint64(j6)) + i-- + dAtA[i] = 0x6a + } + if m.Link != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Link)) + i-- + dAtA[i] = 0x60 + } + if len(m.Attributes) > 0 { + dAtA9 := make([]byte, len(m.Attributes)*10) + var j8 int + for _, num := range m.Attributes { + for num >= 1<<7 { + dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j8++ + } + dAtA9[j8] = uint8(num) + j8++ + } + i -= j8 + copy(dAtA[i:], dAtA9[:j8]) + i = encodeVarintPprofextended(dAtA, i, uint64(j8)) + i-- + dAtA[i] = 0x52 + } + if m.StacktraceIdIndex != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.StacktraceIdIndex)) + i-- + dAtA[i] = 0x48 + } + if m.LocationsLength != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.LocationsLength)) + i-- + dAtA[i] = 0x40 + } + if m.LocationsStartIndex != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.LocationsStartIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.Label) > 0 { + for iNdEx := len(m.Label) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Label[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPprofextended(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Value) > 0 { + dAtA11 := make([]byte, len(m.Value)*10) + var j10 int + for _, num1 := range m.Value { + num := uint64(num1) + for num >= 1<<7 { + dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j10++ + } + dAtA11[j10] = uint8(num) + j10++ + } + i -= j10 + copy(dAtA[i:], dAtA11[:j10]) + i = encodeVarintPprofextended(dAtA, i, uint64(j10)) + i-- + dAtA[i] = 0x12 + } + if len(m.LocationIndex) > 0 { + dAtA13 := make([]byte, len(m.LocationIndex)*10) + var j12 int + for _, num := range m.LocationIndex { + for num >= 1<<7 { + dAtA13[j12] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j12++ + } + dAtA13[j12] = uint8(num) + j12++ + } + i -= j12 + copy(dAtA[i:], dAtA13[:j12]) + i = encodeVarintPprofextended(dAtA, i, uint64(j12)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Label) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Label) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NumUnit != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.NumUnit)) + i-- + dAtA[i] = 0x20 + } + if m.Num != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Num)) + i-- + dAtA[i] = 0x18 + } + if m.Str != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Str)) + i-- + dAtA[i] = 0x10 + } + if m.Key != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Key)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Mapping) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mapping) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Mapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + dAtA15 := make([]byte, len(m.Attributes)*10) + var j14 int + for _, num := range m.Attributes { + for num >= 1<<7 { + dAtA15[j14] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j14++ + } + dAtA15[j14] = uint8(num) + j14++ + } + i -= j14 + copy(dAtA[i:], dAtA15[:j14]) + i = encodeVarintPprofextended(dAtA, i, uint64(j14)) + i-- + dAtA[i] = 0x62 + } + if m.BuildIdKind != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.BuildIdKind)) + i-- + dAtA[i] = 0x58 + } + if m.HasInlineFrames { + i-- + if m.HasInlineFrames { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.HasLineNumbers { + i-- + if m.HasLineNumbers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if m.HasFilenames { + i-- + if m.HasFilenames { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.HasFunctions { + i-- + if m.HasFunctions { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.BuildId != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.BuildId)) + i-- + dAtA[i] = 0x30 + } + if m.Filename != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Filename)) + i-- + dAtA[i] = 0x28 + } + if m.FileOffset != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.FileOffset)) + i-- + dAtA[i] = 0x20 + } + if m.MemoryLimit != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.MemoryLimit)) + i-- + dAtA[i] = 0x18 + } + if m.MemoryStart != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.MemoryStart)) + i-- + dAtA[i] = 0x10 + } + if m.Id != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Location) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Location) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Location) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + dAtA17 := make([]byte, len(m.Attributes)*10) + var j16 int + for _, num := range m.Attributes { + for num >= 1<<7 { + dAtA17[j16] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j16++ + } + dAtA17[j16] = uint8(num) + j16++ + } + i -= j16 + copy(dAtA[i:], dAtA17[:j16]) + i = encodeVarintPprofextended(dAtA, i, uint64(j16)) + i-- + dAtA[i] = 0x3a + } + if m.TypeIndex != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.TypeIndex)) + i-- + dAtA[i] = 0x30 + } + if m.IsFolded { + i-- + if m.IsFolded { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.Line) > 0 { + for iNdEx := len(m.Line) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Line[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPprofextended(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Address != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Address)) + i-- + dAtA[i] = 0x18 + } + if m.MappingIndex != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.MappingIndex)) + i-- + dAtA[i] = 0x10 + } + if m.Id != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Line) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Line) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Line) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Column != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Column)) + i-- + dAtA[i] = 0x18 + } + if m.Line != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Line)) + i-- + dAtA[i] = 0x10 + } + if m.FunctionIndex != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.FunctionIndex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Function) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Function) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Function) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.StartLine != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.StartLine)) + i-- + dAtA[i] = 0x28 + } + if m.Filename != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Filename)) + i-- + dAtA[i] = 0x20 + } + if m.SystemName != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.SystemName)) + i-- + dAtA[i] = 0x18 + } + if m.Name != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Name)) + i-- + dAtA[i] = 0x10 + } + if m.Id != 0 { + i = encodeVarintPprofextended(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintPprofextended(dAtA []byte, offset int, v uint64) int { + offset -= sovPprofextended(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Profile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SampleType) > 0 { + for _, e := range m.SampleType { + l = e.Size() + n += 1 + l + sovPprofextended(uint64(l)) + } + } + if len(m.Sample) > 0 { + for _, e := range m.Sample { + l = e.Size() + n += 1 + l + sovPprofextended(uint64(l)) + } + } + if len(m.Mapping) > 0 { + for _, e := range m.Mapping { + l = e.Size() + n += 1 + l + sovPprofextended(uint64(l)) + } + } + if len(m.Location) > 0 { + for _, e := range m.Location { + l = e.Size() + n += 1 + l + sovPprofextended(uint64(l)) + } + } + if len(m.Function) > 0 { + for _, e := range m.Function { + l = e.Size() + n += 1 + l + sovPprofextended(uint64(l)) + } + } + if len(m.StringTable) > 0 { + for _, s := range m.StringTable { + l = len(s) + n += 1 + l + sovPprofextended(uint64(l)) + } + } + if m.DropFrames != 0 { + n += 1 + sovPprofextended(uint64(m.DropFrames)) + } + if m.KeepFrames != 0 { + n += 1 + sovPprofextended(uint64(m.KeepFrames)) + } + if m.TimeNanos != 0 { + n += 1 + sovPprofextended(uint64(m.TimeNanos)) + } + if m.DurationNanos != 0 { + n += 1 + sovPprofextended(uint64(m.DurationNanos)) + } + l = m.PeriodType.Size() + n += 1 + l + sovPprofextended(uint64(l)) + if m.Period != 0 { + n += 1 + sovPprofextended(uint64(m.Period)) + } + if len(m.Comment) > 0 { + l = 0 + for _, e := range m.Comment { + l += sovPprofextended(uint64(e)) + } + n += 1 + sovPprofextended(uint64(l)) + l + } + if m.DefaultSampleType != 0 { + n += 1 + sovPprofextended(uint64(m.DefaultSampleType)) + } + if len(m.LocationIndices) > 0 { + l = 0 + for _, e := range m.LocationIndices { + l += sovPprofextended(uint64(e)) + } + n += 1 + sovPprofextended(uint64(l)) + l + } + if len(m.AttributeTable) > 0 { + for _, e := range m.AttributeTable { + l = e.Size() + n += 2 + l + sovPprofextended(uint64(l)) + } + } + if len(m.AttributeUnits) > 0 { + for _, e := range m.AttributeUnits { + l = e.Size() + n += 2 + l + sovPprofextended(uint64(l)) + } + } + if len(m.LinkTable) > 0 { + for _, e := range m.LinkTable { + l = e.Size() + n += 2 + l + sovPprofextended(uint64(l)) + } + } + return n +} + +func (m *AttributeUnit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AttributeKey != 0 { + n += 1 + sovPprofextended(uint64(m.AttributeKey)) + } + if m.Unit != 0 { + n += 1 + sovPprofextended(uint64(m.Unit)) + } + return n +} + +func (m *Link) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.TraceId.Size() + n += 1 + l + sovPprofextended(uint64(l)) + l = m.SpanId.Size() + n += 1 + l + sovPprofextended(uint64(l)) + return n +} + +func (m *ValueType) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovPprofextended(uint64(m.Type)) + } + if m.Unit != 0 { + n += 1 + sovPprofextended(uint64(m.Unit)) + } + if m.AggregationTemporality != 0 { + n += 1 + sovPprofextended(uint64(m.AggregationTemporality)) + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LocationIndex) > 0 { + l = 0 + for _, e := range m.LocationIndex { + l += sovPprofextended(uint64(e)) + } + n += 1 + sovPprofextended(uint64(l)) + l + } + if len(m.Value) > 0 { + l = 0 + for _, e := range m.Value { + l += sovPprofextended(uint64(e)) + } + n += 1 + sovPprofextended(uint64(l)) + l + } + if len(m.Label) > 0 { + for _, e := range m.Label { + l = e.Size() + n += 1 + l + sovPprofextended(uint64(l)) + } + } + if m.LocationsStartIndex != 0 { + n += 1 + sovPprofextended(uint64(m.LocationsStartIndex)) + } + if m.LocationsLength != 0 { + n += 1 + sovPprofextended(uint64(m.LocationsLength)) + } + if m.StacktraceIdIndex != 0 { + n += 1 + sovPprofextended(uint64(m.StacktraceIdIndex)) + } + if len(m.Attributes) > 0 { + l = 0 + for _, e := range m.Attributes { + l += sovPprofextended(uint64(e)) + } + n += 1 + sovPprofextended(uint64(l)) + l + } + if m.Link != 0 { + n += 1 + sovPprofextended(uint64(m.Link)) + } + if len(m.TimestampsUnixNano) > 0 { + l = 0 + for _, e := range m.TimestampsUnixNano { + l += sovPprofextended(uint64(e)) + } + n += 1 + sovPprofextended(uint64(l)) + l + } + return n +} + +func (m *Label) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Key != 0 { + n += 1 + sovPprofextended(uint64(m.Key)) + } + if m.Str != 0 { + n += 1 + sovPprofextended(uint64(m.Str)) + } + if m.Num != 0 { + n += 1 + sovPprofextended(uint64(m.Num)) + } + if m.NumUnit != 0 { + n += 1 + sovPprofextended(uint64(m.NumUnit)) + } + return n +} + +func (m *Mapping) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovPprofextended(uint64(m.Id)) + } + if m.MemoryStart != 0 { + n += 1 + sovPprofextended(uint64(m.MemoryStart)) + } + if m.MemoryLimit != 0 { + n += 1 + sovPprofextended(uint64(m.MemoryLimit)) + } + if m.FileOffset != 0 { + n += 1 + sovPprofextended(uint64(m.FileOffset)) + } + if m.Filename != 0 { + n += 1 + sovPprofextended(uint64(m.Filename)) + } + if m.BuildId != 0 { + n += 1 + sovPprofextended(uint64(m.BuildId)) + } + if m.HasFunctions { + n += 2 + } + if m.HasFilenames { + n += 2 + } + if m.HasLineNumbers { + n += 2 + } + if m.HasInlineFrames { + n += 2 + } + if m.BuildIdKind != 0 { + n += 1 + sovPprofextended(uint64(m.BuildIdKind)) + } + if len(m.Attributes) > 0 { + l = 0 + for _, e := range m.Attributes { + l += sovPprofextended(uint64(e)) + } + n += 1 + sovPprofextended(uint64(l)) + l + } + return n +} + +func (m *Location) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovPprofextended(uint64(m.Id)) + } + if m.MappingIndex != 0 { + n += 1 + sovPprofextended(uint64(m.MappingIndex)) + } + if m.Address != 0 { + n += 1 + sovPprofextended(uint64(m.Address)) + } + if len(m.Line) > 0 { + for _, e := range m.Line { + l = e.Size() + n += 1 + l + sovPprofextended(uint64(l)) + } + } + if m.IsFolded { + n += 2 + } + if m.TypeIndex != 0 { + n += 1 + sovPprofextended(uint64(m.TypeIndex)) + } + if len(m.Attributes) > 0 { + l = 0 + for _, e := range m.Attributes { + l += sovPprofextended(uint64(e)) + } + n += 1 + sovPprofextended(uint64(l)) + l + } + return n +} + +func (m *Line) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FunctionIndex != 0 { + n += 1 + sovPprofextended(uint64(m.FunctionIndex)) + } + if m.Line != 0 { + n += 1 + sovPprofextended(uint64(m.Line)) + } + if m.Column != 0 { + n += 1 + sovPprofextended(uint64(m.Column)) + } + return n +} + +func (m *Function) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovPprofextended(uint64(m.Id)) + } + if m.Name != 0 { + n += 1 + sovPprofextended(uint64(m.Name)) + } + if m.SystemName != 0 { + n += 1 + sovPprofextended(uint64(m.SystemName)) + } + if m.Filename != 0 { + n += 1 + sovPprofextended(uint64(m.Filename)) + } + if m.StartLine != 0 { + n += 1 + sovPprofextended(uint64(m.StartLine)) + } + return n +} + +func sovPprofextended(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPprofextended(x uint64) (n int) { + return sovPprofextended(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Profile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Profile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Profile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SampleType = append(m.SampleType, ValueType{}) + if err := m.SampleType[len(m.SampleType)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sample = append(m.Sample, Sample{}) + if err := m.Sample[len(m.Sample)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mapping", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mapping = append(m.Mapping, Mapping{}) + if err := m.Mapping[len(m.Mapping)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Location", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Location = append(m.Location, Location{}) + if err := m.Location[len(m.Location)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Function", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Function = append(m.Function, Function{}) + if err := m.Function[len(m.Function)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringTable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StringTable = append(m.StringTable, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DropFrames", wireType) + } + m.DropFrames = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DropFrames |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepFrames", wireType) + } + m.KeepFrames = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KeepFrames |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeNanos", wireType) + } + m.TimeNanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeNanos |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationNanos", wireType) + } + m.DurationNanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationNanos |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeriodType", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PeriodType.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) + } + m.Period = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Period |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Comment = append(m.Comment, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Comment) == 0 { + m.Comment = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Comment = append(m.Comment, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Comment", wireType) + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultSampleType", wireType) + } + m.DefaultSampleType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DefaultSampleType |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LocationIndices = append(m.LocationIndices, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LocationIndices) == 0 { + m.LocationIndices = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LocationIndices = append(m.LocationIndices, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LocationIndices", wireType) + } + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeTable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AttributeTable = append(m.AttributeTable, v1.KeyValue{}) + if err := m.AttributeTable[len(m.AttributeTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeUnits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AttributeUnits = append(m.AttributeUnits, AttributeUnit{}) + if err := m.AttributeUnits[len(m.AttributeUnits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LinkTable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LinkTable = append(m.LinkTable, Link{}) + if err := m.LinkTable[len(m.LinkTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPprofextended(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPprofextended + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttributeUnit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttributeUnit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttributeUnit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeKey", wireType) + } + m.AttributeKey = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AttributeKey |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + m.Unit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Unit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPprofextended(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPprofextended + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Link) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Link: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPprofextended(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPprofextended + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValueType) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValueType: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValueType: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + m.Unit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Unit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + m.AggregationTemporality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPprofextended(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPprofextended + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LocationIndex = append(m.LocationIndex, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LocationIndex) == 0 { + m.LocationIndex = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LocationIndex = append(m.LocationIndex, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LocationIndex", wireType) + } + case 2: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = append(m.Value, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Value) == 0 { + m.Value = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = append(m.Value, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Label = append(m.Label, Label{}) + if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LocationsStartIndex", wireType) + } + m.LocationsStartIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LocationsStartIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LocationsLength", wireType) + } + m.LocationsLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LocationsLength |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StacktraceIdIndex", wireType) + } + m.StacktraceIdIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StacktraceIdIndex |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Attributes = append(m.Attributes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Attributes) == 0 { + m.Attributes = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Attributes = append(m.Attributes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Link", wireType) + } + m.Link = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Link |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimestampsUnixNano = append(m.TimestampsUnixNano, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.TimestampsUnixNano) == 0 { + m.TimestampsUnixNano = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimestampsUnixNano = append(m.TimestampsUnixNano, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampsUnixNano", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipPprofextended(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPprofextended + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Label) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Label: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Label: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + m.Key = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Key |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Str", wireType) + } + m.Str = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Str |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Num", wireType) + } + m.Num = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Num |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumUnit", wireType) + } + m.NumUnit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumUnit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPprofextended(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPprofextended + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mapping) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryStart", wireType) + } + m.MemoryStart = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryStart |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryLimit", wireType) + } + m.MemoryLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryLimit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FileOffset", wireType) + } + m.FileOffset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FileOffset |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) + } + m.Filename = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Filename |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildId", wireType) + } + m.BuildId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BuildId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HasFunctions", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HasFunctions = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HasFilenames", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HasFilenames = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HasLineNumbers", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HasLineNumbers = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HasInlineFrames", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HasInlineFrames = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildIdKind", wireType) + } + m.BuildIdKind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BuildIdKind |= BuildIdKind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Attributes = append(m.Attributes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Attributes) == 0 { + m.Attributes = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Attributes = append(m.Attributes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipPprofextended(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPprofextended + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Location) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Location: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Location: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MappingIndex", wireType) + } + m.MappingIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MappingIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + m.Address = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Address |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Line = append(m.Line, Line{}) + if err := m.Line[len(m.Line)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsFolded", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsFolded = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeIndex", wireType) + } + m.TypeIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TypeIndex |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Attributes = append(m.Attributes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthPprofextended + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPprofextended + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Attributes) == 0 { + m.Attributes = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Attributes = append(m.Attributes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipPprofextended(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPprofextended + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Line) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Line: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Line: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FunctionIndex", wireType) + } + m.FunctionIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FunctionIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) + } + m.Line = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Line |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType) + } + m.Column = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Column |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPprofextended(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPprofextended + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Function) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Function: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Function: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + m.Name = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Name |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SystemName", wireType) + } + m.SystemName = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SystemName |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) + } + m.Filename = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Filename |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartLine", wireType) + } + m.StartLine = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPprofextended + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartLine |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPprofextended(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPprofextended + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPprofextended(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPprofextended + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPprofextended + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPprofextended + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPprofextended + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPprofextended + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPprofextended + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPprofextended = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPprofextended = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPprofextended = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/profiles.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/profiles.pb.go new file mode 100644 index 00000000000..6e4662c248b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/profiles.pb.go @@ -0,0 +1,1482 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/profiles/v1experimental/profiles.proto + +package v1experimental + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + + v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" + v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ProfilesData represents the profiles data that can be stored in persistent storage, +// OR can be embedded by other protocols that transfer OTLP profiles data but do not +// implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type ProfilesData struct { + // An array of ResourceProfiles. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceProfiles []*ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"` +} + +func (m *ProfilesData) Reset() { *m = ProfilesData{} } +func (m *ProfilesData) String() string { return proto.CompactTextString(m) } +func (*ProfilesData) ProtoMessage() {} +func (*ProfilesData) Descriptor() ([]byte, []int) { + return fileDescriptor_394731f2296acea3, []int{0} +} +func (m *ProfilesData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProfilesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProfilesData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProfilesData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProfilesData.Merge(m, src) +} +func (m *ProfilesData) XXX_Size() int { + return m.Size() +} +func (m *ProfilesData) XXX_DiscardUnknown() { + xxx_messageInfo_ProfilesData.DiscardUnknown(m) +} + +var xxx_messageInfo_ProfilesData proto.InternalMessageInfo + +func (m *ProfilesData) GetResourceProfiles() []*ResourceProfiles { + if m != nil { + return m.ResourceProfiles + } + return nil +} + +// A collection of ScopeProfiles from a Resource. +type ResourceProfiles struct { + // The resource for the profiles in this message. + // If this field is not set then no resource info is known. + Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` + // A list of ScopeProfiles that originate from a resource. + ScopeProfiles []*ScopeProfiles `protobuf:"bytes,2,rep,name=scope_profiles,json=scopeProfiles,proto3" json:"scope_profiles,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_profiles" field which have their own schema_url field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (m *ResourceProfiles) Reset() { *m = ResourceProfiles{} } +func (m *ResourceProfiles) String() string { return proto.CompactTextString(m) } +func (*ResourceProfiles) ProtoMessage() {} +func (*ResourceProfiles) Descriptor() ([]byte, []int) { + return fileDescriptor_394731f2296acea3, []int{1} +} +func (m *ResourceProfiles) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceProfiles.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceProfiles) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceProfiles.Merge(m, src) +} +func (m *ResourceProfiles) XXX_Size() int { + return m.Size() +} +func (m *ResourceProfiles) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceProfiles.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceProfiles proto.InternalMessageInfo + +func (m *ResourceProfiles) GetResource() v1.Resource { + if m != nil { + return m.Resource + } + return v1.Resource{} +} + +func (m *ResourceProfiles) GetScopeProfiles() []*ScopeProfiles { + if m != nil { + return m.ScopeProfiles + } + return nil +} + +func (m *ResourceProfiles) GetSchemaUrl() string { + if m != nil { + return m.SchemaUrl + } + return "" +} + +// A collection of ProfileContainers produced by an InstrumentationScope. +type ScopeProfiles struct { + // The instrumentation scope information for the profiles in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"` + // A list of ProfileContainers that originate from an instrumentation scope. + Profiles []*ProfileContainer `protobuf:"bytes,2,rep,name=profiles,proto3" json:"profiles,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the metric data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all profiles in the "profiles" field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (m *ScopeProfiles) Reset() { *m = ScopeProfiles{} } +func (m *ScopeProfiles) String() string { return proto.CompactTextString(m) } +func (*ScopeProfiles) ProtoMessage() {} +func (*ScopeProfiles) Descriptor() ([]byte, []int) { + return fileDescriptor_394731f2296acea3, []int{2} +} +func (m *ScopeProfiles) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopeProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScopeProfiles.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ScopeProfiles) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopeProfiles.Merge(m, src) +} +func (m *ScopeProfiles) XXX_Size() int { + return m.Size() +} +func (m *ScopeProfiles) XXX_DiscardUnknown() { + xxx_messageInfo_ScopeProfiles.DiscardUnknown(m) +} + +var xxx_messageInfo_ScopeProfiles proto.InternalMessageInfo + +func (m *ScopeProfiles) GetScope() v11.InstrumentationScope { + if m != nil { + return m.Scope + } + return v11.InstrumentationScope{} +} + +func (m *ScopeProfiles) GetProfiles() []*ProfileContainer { + if m != nil { + return m.Profiles + } + return nil +} + +func (m *ScopeProfiles) GetSchemaUrl() string { + if m != nil { + return m.SchemaUrl + } + return "" +} + +// A ProfileContainer represents a single profile. It wraps pprof profile with OpenTelemetry specific metadata. +type ProfileContainer struct { + // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with + // all zeroes is considered invalid. + // + // This field is required. + ProfileId []byte `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + // start_time_unix_nano is the start time of the profile. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // end_time_unix_nano is the end time of the profile. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present] + OriginalPayloadFormat string `protobuf:"bytes,6,opt,name=original_payload_format,json=originalPayloadFormat,proto3" json:"original_payload_format,omitempty"` + // Original payload can be stored in this field. This can be useful for users who want to get the original payload. + // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec. + // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload. + // If the original payload is in pprof format, it SHOULD not be included in this field. + // The field is optional, however if it is present `profile` MUST be present and contain the same profiling information. + OriginalPayload []byte `protobuf:"bytes,7,opt,name=original_payload,json=originalPayload,proto3" json:"original_payload,omitempty"` + // This is a reference to a pprof profile. Required, even when original_payload is present. + Profile Profile `protobuf:"bytes,8,opt,name=profile,proto3" json:"profile"` +} + +func (m *ProfileContainer) Reset() { *m = ProfileContainer{} } +func (m *ProfileContainer) String() string { return proto.CompactTextString(m) } +func (*ProfileContainer) ProtoMessage() {} +func (*ProfileContainer) Descriptor() ([]byte, []int) { + return fileDescriptor_394731f2296acea3, []int{3} +} +func (m *ProfileContainer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProfileContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProfileContainer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProfileContainer) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProfileContainer.Merge(m, src) +} +func (m *ProfileContainer) XXX_Size() int { + return m.Size() +} +func (m *ProfileContainer) XXX_DiscardUnknown() { + xxx_messageInfo_ProfileContainer.DiscardUnknown(m) +} + +var xxx_messageInfo_ProfileContainer proto.InternalMessageInfo + +func (m *ProfileContainer) GetProfileId() []byte { + if m != nil { + return m.ProfileId + } + return nil +} + +func (m *ProfileContainer) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *ProfileContainer) GetEndTimeUnixNano() uint64 { + if m != nil { + return m.EndTimeUnixNano + } + return 0 +} + +func (m *ProfileContainer) GetAttributes() []v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *ProfileContainer) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +func (m *ProfileContainer) GetOriginalPayloadFormat() string { + if m != nil { + return m.OriginalPayloadFormat + } + return "" +} + +func (m *ProfileContainer) GetOriginalPayload() []byte { + if m != nil { + return m.OriginalPayload + } + return nil +} + +func (m *ProfileContainer) GetProfile() Profile { + if m != nil { + return m.Profile + } + return Profile{} +} + +func init() { + proto.RegisterType((*ProfilesData)(nil), "opentelemetry.proto.profiles.v1experimental.ProfilesData") + proto.RegisterType((*ResourceProfiles)(nil), "opentelemetry.proto.profiles.v1experimental.ResourceProfiles") + proto.RegisterType((*ScopeProfiles)(nil), "opentelemetry.proto.profiles.v1experimental.ScopeProfiles") + proto.RegisterType((*ProfileContainer)(nil), "opentelemetry.proto.profiles.v1experimental.ProfileContainer") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/profiles/v1experimental/profiles.proto", fileDescriptor_394731f2296acea3) +} + +var fileDescriptor_394731f2296acea3 = []byte{ + // 652 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x51, 0x6b, 0xdb, 0x3a, + 0x14, 0xc7, 0xe3, 0xa6, 0x4d, 0x52, 0xb5, 0xb9, 0x4d, 0x45, 0xef, 0xbd, 0xa6, 0x70, 0x73, 0x43, + 0x5e, 0x96, 0xae, 0x60, 0x93, 0x76, 0x8c, 0x51, 0x18, 0x63, 0xed, 0x36, 0xe8, 0xca, 0xd6, 0xe0, + 0xb5, 0x85, 0xed, 0xc5, 0xa8, 0xf1, 0x69, 0xa6, 0x61, 0x4b, 0x46, 0x96, 0x43, 0xba, 0x4f, 0xb1, + 0xcf, 0xb1, 0x4f, 0xd2, 0xc7, 0xee, 0x6d, 0x6c, 0x30, 0x46, 0xfb, 0xb2, 0x7e, 0x8b, 0x61, 0x59, + 0xf6, 0x12, 0x93, 0x51, 0xb2, 0x17, 0x23, 0x9f, 0xf3, 0x3f, 0xbf, 0xa3, 0xff, 0x91, 0x10, 0xda, + 0xe1, 0x21, 0x30, 0x09, 0x3e, 0x04, 0x20, 0xc5, 0xb9, 0x1d, 0x0a, 0x2e, 0x79, 0xf2, 0x3d, 0xa3, + 0x3e, 0x44, 0xf6, 0xb0, 0x0b, 0xa3, 0x10, 0x04, 0x0d, 0x80, 0x49, 0xe2, 0xe7, 0x71, 0x4b, 0xc9, + 0xf0, 0xe6, 0x44, 0x6d, 0x1a, 0xb4, 0x72, 0xcd, 0x64, 0xed, 0xfa, 0xda, 0x80, 0x0f, 0x78, 0x8a, + 0x4f, 0x56, 0xa9, 0x7a, 0xfd, 0xee, 0xb4, 0xf6, 0x7d, 0x1e, 0x04, 0x9c, 0xd9, 0xc3, 0xae, 0x5e, + 0x69, 0xad, 0x35, 0x4d, 0x2b, 0x20, 0xe2, 0xb1, 0xe8, 0x43, 0xa2, 0xce, 0xd6, 0x5a, 0xff, 0x68, + 0x26, 0x6b, 0x49, 0x02, 0x46, 0x12, 0x98, 0x07, 0x5e, 0x0a, 0x68, 0xbf, 0x47, 0xcb, 0x3d, 0x2d, + 0x7f, 0x42, 0x24, 0xc1, 0xef, 0xd0, 0x6a, 0xd6, 0xc2, 0xcd, 0x38, 0xa6, 0xd1, 0x2a, 0x77, 0x96, + 0xb6, 0x1e, 0x5a, 0x33, 0xcc, 0xc2, 0x72, 0x34, 0x25, 0xa3, 0x3b, 0x0d, 0x51, 0x88, 0xb4, 0x6f, + 0x0c, 0xd4, 0x28, 0xca, 0xf0, 0x01, 0xaa, 0x65, 0x42, 0xd3, 0x68, 0x19, 0x9d, 0xa5, 0xad, 0x8d, + 0xa9, 0x7d, 0xf3, 0x41, 0x0c, 0xbb, 0x79, 0xaf, 0xdd, 0xf9, 0x8b, 0x6f, 0xff, 0x97, 0x9c, 0x1c, + 0x80, 0x09, 0xfa, 0x2b, 0xea, 0xf3, 0x70, 0xcc, 0xca, 0x9c, 0xb2, 0xb2, 0x33, 0x93, 0x95, 0x57, + 0x09, 0x22, 0xf7, 0x51, 0x8f, 0xc6, 0x7f, 0xf1, 0x7f, 0x08, 0x45, 0xfd, 0xb7, 0x10, 0x10, 0x37, + 0x16, 0xbe, 0x59, 0x6e, 0x19, 0x9d, 0x45, 0x67, 0x31, 0x8d, 0x1c, 0x0b, 0xff, 0x79, 0xa5, 0xf6, + 0xa3, 0xda, 0xb8, 0xa9, 0xb6, 0xbf, 0x18, 0xa8, 0x3e, 0xc1, 0xc1, 0x87, 0x68, 0x41, 0x91, 0xb4, + 0xcb, 0xed, 0xa9, 0x5b, 0xd2, 0x97, 0x63, 0xd8, 0xb5, 0xf6, 0x59, 0x24, 0x45, 0xac, 0x76, 0x24, + 0x29, 0x67, 0x8a, 0xa5, 0xfd, 0xa6, 0x1c, 0xfc, 0x1a, 0xd5, 0x0a, 0x36, 0x67, 0x3b, 0x31, 0xbd, + 0xb3, 0x3d, 0xce, 0x24, 0xa1, 0x0c, 0x84, 0x93, 0xe3, 0x6e, 0x31, 0xd9, 0xfe, 0x54, 0x46, 0x8d, + 0x62, 0x75, 0x52, 0xa3, 0xeb, 0x5d, 0xea, 0x29, 0x93, 0xcb, 0xce, 0xa2, 0x8e, 0xec, 0x7b, 0xd8, + 0x46, 0x6b, 0x91, 0x24, 0x42, 0xba, 0x92, 0x06, 0xe0, 0xc6, 0x8c, 0x8e, 0x5c, 0x46, 0x18, 0x37, + 0xe7, 0x5a, 0x46, 0xa7, 0xe2, 0xac, 0xaa, 0xdc, 0x11, 0x0d, 0xe0, 0x98, 0xd1, 0xd1, 0x4b, 0xc2, + 0x38, 0xde, 0x44, 0x18, 0x98, 0x57, 0x94, 0x97, 0x95, 0x7c, 0x05, 0x98, 0x37, 0x21, 0x7e, 0x81, + 0x10, 0x91, 0x52, 0xd0, 0xd3, 0x58, 0x42, 0x64, 0xce, 0xab, 0x69, 0xdc, 0xb9, 0x65, 0xc2, 0x07, + 0x70, 0x7e, 0x42, 0xfc, 0x38, 0x9b, 0xea, 0x18, 0x00, 0x3f, 0x40, 0xa6, 0x27, 0x78, 0x18, 0x82, + 0xe7, 0xfe, 0x8a, 0xba, 0x7d, 0x1e, 0x33, 0x69, 0x2e, 0xb4, 0x8c, 0x4e, 0xdd, 0xf9, 0x47, 0xe7, + 0x1f, 0xe7, 0xe9, 0xbd, 0x24, 0x8b, 0xef, 0xa3, 0x7f, 0xb9, 0xa0, 0x03, 0xca, 0x88, 0xef, 0x86, + 0xe4, 0xdc, 0xe7, 0xc4, 0x73, 0xcf, 0xb8, 0x08, 0x88, 0x34, 0x2b, 0x6a, 0x8c, 0x7f, 0x67, 0xe9, + 0x5e, 0x9a, 0x7d, 0xa6, 0x92, 0x78, 0x03, 0x35, 0x8a, 0x75, 0x66, 0x55, 0xcd, 0x70, 0xa5, 0x50, + 0x80, 0x8f, 0x50, 0x55, 0x8f, 0xd5, 0xac, 0xa9, 0xab, 0x74, 0xef, 0x4f, 0x8e, 0x5d, 0xbb, 0xce, + 0x50, 0xbb, 0x5f, 0x8d, 0x8b, 0xab, 0xa6, 0x71, 0x79, 0xd5, 0x34, 0xbe, 0x5f, 0x35, 0x8d, 0x0f, + 0xd7, 0xcd, 0xd2, 0xe5, 0x75, 0xb3, 0xf4, 0xf9, 0xba, 0x59, 0x42, 0x16, 0xe5, 0xb3, 0x74, 0xd8, + 0xad, 0x67, 0x77, 0xbe, 0x97, 0xc8, 0x7a, 0xc6, 0x1b, 0x67, 0x50, 0x04, 0xd0, 0xe4, 0x45, 0xf4, + 0x7d, 0xe8, 0x4b, 0x2e, 0xec, 0xd0, 0x23, 0x92, 0xd8, 0x94, 0x49, 0x10, 0x8c, 0xf8, 0xb6, 0xfa, + 0x53, 0x1d, 0x06, 0xc0, 0x7e, 0xf7, 0xb8, 0x7d, 0x9c, 0xdb, 0x3c, 0x0c, 0x81, 0x1d, 0xe5, 0x44, + 0xd5, 0x2b, 0x33, 0x17, 0x59, 0x27, 0xdd, 0xa7, 0x63, 0xea, 0xd3, 0x8a, 0xe2, 0x6d, 0xff, 0x0c, + 0x00, 0x00, 0xff, 0xff, 0xe1, 0x89, 0x49, 0xa4, 0x1b, 0x06, 0x00, 0x00, +} + +func (m *ProfilesData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProfilesData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProfilesData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceProfiles) > 0 { + for iNdEx := len(m.ResourceProfiles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceProfiles) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceProfiles) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0x1a + } + if len(m.ScopeProfiles) > 0 { + for iNdEx := len(m.ScopeProfiles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ScopeProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScopeProfiles) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopeProfiles) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopeProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0x1a + } + if len(m.Profiles) > 0 { + for iNdEx := len(m.Profiles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Profiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProfileContainer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProfileContainer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProfileContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Profile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + if len(m.OriginalPayload) > 0 { + i -= len(m.OriginalPayload) + copy(dAtA[i:], m.OriginalPayload) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayload))) + i-- + dAtA[i] = 0x3a + } + if len(m.OriginalPayloadFormat) > 0 { + i -= len(m.OriginalPayloadFormat) + copy(dAtA[i:], m.OriginalPayloadFormat) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayloadFormat))) + i-- + dAtA[i] = 0x32 + } + if m.DroppedAttributesCount != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.DroppedAttributesCount)) + i-- + dAtA[i] = 0x28 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.EndTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.EndTimeUnixNano)) + i-- + dAtA[i] = 0x19 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + if len(m.ProfileId) > 0 { + i -= len(m.ProfileId) + copy(dAtA[i:], m.ProfileId) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.ProfileId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintProfiles(dAtA []byte, offset int, v uint64) int { + offset -= sovProfiles(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ProfilesData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceProfiles) > 0 { + for _, e := range m.ResourceProfiles { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + return n +} + +func (m *ResourceProfiles) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Resource.Size() + n += 1 + l + sovProfiles(uint64(l)) + if len(m.ScopeProfiles) > 0 { + for _, e := range m.ScopeProfiles { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sovProfiles(uint64(l)) + } + return n +} + +func (m *ScopeProfiles) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Scope.Size() + n += 1 + l + sovProfiles(uint64(l)) + if len(m.Profiles) > 0 { + for _, e := range m.Profiles { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sovProfiles(uint64(l)) + } + return n +} + +func (m *ProfileContainer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProfileId) + if l > 0 { + n += 1 + l + sovProfiles(uint64(l)) + } + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.EndTimeUnixNano != 0 { + n += 9 + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + if m.DroppedAttributesCount != 0 { + n += 1 + sovProfiles(uint64(m.DroppedAttributesCount)) + } + l = len(m.OriginalPayloadFormat) + if l > 0 { + n += 1 + l + sovProfiles(uint64(l)) + } + l = len(m.OriginalPayload) + if l > 0 { + n += 1 + l + sovProfiles(uint64(l)) + } + l = m.Profile.Size() + n += 1 + l + sovProfiles(uint64(l)) + return n +} + +func sovProfiles(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozProfiles(x uint64) (n int) { + return sovProfiles(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ProfilesData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProfilesData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProfilesData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceProfiles = append(m.ResourceProfiles, &ResourceProfiles{}) + if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceProfiles) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceProfiles: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceProfiles: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScopeProfiles = append(m.ScopeProfiles, &ScopeProfiles{}) + if err := m.ScopeProfiles[len(m.ScopeProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScopeProfiles) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopeProfiles: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopeProfiles: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Profiles = append(m.Profiles, &ProfileContainer{}) + if err := m.Profiles[len(m.Profiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProfileContainer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProfileContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProfileContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProfileId = append(m.ProfileId[:0], dAtA[iNdEx:postIndex]...) + if m.ProfileId == nil { + m.ProfileId = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimeUnixNano", wireType) + } + m.EndTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.EndTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v11.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + m.DroppedAttributesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedAttributesCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OriginalPayloadFormat = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OriginalPayload = append(m.OriginalPayload[:0], dAtA[iNdEx:postIndex]...) + if m.OriginalPayload == nil { + m.OriginalPayload = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Profile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Profile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipProfiles(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProfiles + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProfiles + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProfiles + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthProfiles + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupProfiles + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthProfiles + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthProfiles = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowProfiles = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupProfiles = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go new file mode 100644 index 00000000000..564c8945862 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal" + +import ( + otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental" + otlpprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental" +) + +type Profiles struct { + orig *otlpcollectorprofile.ExportProfilesServiceRequest + state *State +} + +func GetOrigProfiles(ms Profiles) *otlpcollectorprofile.ExportProfilesServiceRequest { + return ms.orig +} + +func GetProfilesState(ms Profiles) *State { + return ms.state +} + +func SetProfilesState(ms Profiles, state State) { + *ms.state = state +} + +func NewProfiles(orig *otlpcollectorprofile.ExportProfilesServiceRequest, state *State) Profiles { + return Profiles{orig: orig, state: state} +} + +// ProfilesToProto internal helper to convert Profiles to protobuf representation. +func ProfilesToProto(l Profiles) otlpprofile.ProfilesData { + return otlpprofile.ProfilesData{ + ResourceProfiles: l.orig.ResourceProfiles, + } +} + +// ProfilesFromProto internal helper to convert protobuf representation to Profiles. +// This function set exclusive state assuming that it's called only once per Profiles. +func ProfilesFromProto(orig otlpprofile.ProfilesData) Profiles { + state := StateMutable + return NewProfiles(&otlpcollectorprofile.ExportProfilesServiceRequest{ + ResourceProfiles: orig.ResourceProfiles, + }, &state) +} diff --git a/vendor/go.opentelemetry.io/collector/processor/processor.go b/vendor/go.opentelemetry.io/collector/processor/processor.go index 98feba69a57..4618fa006fe 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processor.go +++ b/vendor/go.opentelemetry.io/collector/processor/processor.go @@ -37,7 +37,12 @@ type Logs interface { } // CreateSettings is passed to Create* functions in Factory. -type CreateSettings struct { +// +// Deprecated: [v0.103.0] Use processor.Settings instead. +type CreateSettings = Settings + +// Settings is passed to Create* functions in Factory. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -57,7 +62,7 @@ type Factory interface { // CreateTracesProcessor creates a TracesProcessor based on this config. // If the processor type does not support tracing or if the config is not valid, // an error will be returned instead. - CreateTracesProcessor(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) + CreateTracesProcessor(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) // TracesProcessorStability gets the stability level of the TracesProcessor. TracesProcessorStability() component.StabilityLevel @@ -65,7 +70,7 @@ type Factory interface { // CreateMetricsProcessor creates a MetricsProcessor based on this config. // If the processor type does not support metrics or if the config is not valid, // an error will be returned instead. - CreateMetricsProcessor(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Metrics, error) + CreateMetricsProcessor(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Metrics) (Metrics, error) // MetricsProcessorStability gets the stability level of the MetricsProcessor. MetricsProcessorStability() component.StabilityLevel @@ -73,7 +78,7 @@ type Factory interface { // CreateLogsProcessor creates a LogsProcessor based on the config. // If the processor type does not support logs or if the config is not valid, // an error will be returned instead. - CreateLogsProcessor(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Logs, error) + CreateLogsProcessor(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Logs) (Logs, error) // LogsProcessorStability gets the stability level of the LogsProcessor. LogsProcessorStability() component.StabilityLevel @@ -97,12 +102,12 @@ func (f factoryOptionFunc) applyProcessorFactoryOption(o *factory) { } // CreateTracesFunc is the equivalent of Factory.CreateTraces(). -type CreateTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Traces, error) +type CreateTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Traces, error) // CreateTracesProcessor implements Factory.CreateTracesProcessor(). func (f CreateTracesFunc) CreateTracesProcessor( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) { if f == nil { @@ -112,12 +117,12 @@ func (f CreateTracesFunc) CreateTracesProcessor( } // CreateMetricsFunc is the equivalent of Factory.CreateMetrics(). -type CreateMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Metrics, error) +type CreateMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Metrics, error) // CreateMetricsProcessor implements Factory.CreateMetricsProcessor(). func (f CreateMetricsFunc) CreateMetricsProcessor( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (Metrics, error) { @@ -128,12 +133,12 @@ func (f CreateMetricsFunc) CreateMetricsProcessor( } // CreateLogsFunc is the equivalent of Factory.CreateLogs(). -type CreateLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Logs, error) +type CreateLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Logs, error) // CreateLogsProcessor implements Factory.CreateLogsProcessor(). func (f CreateLogsFunc) CreateLogsProcessor( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Logs, ) (Logs, error) { @@ -233,7 +238,7 @@ func NewBuilder(cfgs map[component.ID]component.Config, factories map[component. } // CreateTraces creates a Traces processor based on the settings and config. -func (b *Builder) CreateTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Traces, error) { +func (b *Builder) CreateTraces(ctx context.Context, set Settings, next consumer.Traces) (Traces, error) { if next == nil { return nil, errNilNextConsumer } @@ -252,7 +257,7 @@ func (b *Builder) CreateTraces(ctx context.Context, set CreateSettings, next con } // CreateMetrics creates a Metrics processor based on the settings and config. -func (b *Builder) CreateMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Metrics, error) { +func (b *Builder) CreateMetrics(ctx context.Context, set Settings, next consumer.Metrics) (Metrics, error) { if next == nil { return nil, errNilNextConsumer } @@ -271,7 +276,7 @@ func (b *Builder) CreateMetrics(ctx context.Context, set CreateSettings, next co } // CreateLogs creates a Logs processor based on the settings and config. -func (b *Builder) CreateLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Logs, error) { +func (b *Builder) CreateLogs(ctx context.Context, set Settings, next consumer.Logs) (Logs, error) { if next == nil { return nil, errNilNextConsumer } diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/documentation.md b/vendor/go.opentelemetry.io/collector/processor/processorhelper/documentation.md index 073fc9a7219..bdd45b6ff18 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/documentation.md +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/documentation.md @@ -54,6 +54,30 @@ Number of spans that were dropped. | ---- | ----------- | ---------- | --------- | | 1 | Sum | Int | true | +### processor_inserted_log_records + +Number of log records that were inserted. + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### processor_inserted_metric_points + +Number of metric points that were inserted. + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### processor_inserted_spans + +Number of spans that were inserted. + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + ### processor_refused_log_records Number of log records that were rejected by the next component in the pipeline. diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/internal/metadata/generated_telemetry.go index 568b689edd5..df28bde4d58 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/internal/metadata/generated_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/internal/metadata/generated_telemetry.go @@ -24,12 +24,16 @@ func Tracer(settings component.TelemetrySettings) trace.Tracer { // TelemetryBuilder provides an interface for components to report telemetry // as defined in metadata and user config. type TelemetryBuilder struct { + meter metric.Meter ProcessorAcceptedLogRecords metric.Int64Counter ProcessorAcceptedMetricPoints metric.Int64Counter ProcessorAcceptedSpans metric.Int64Counter ProcessorDroppedLogRecords metric.Int64Counter ProcessorDroppedMetricPoints metric.Int64Counter ProcessorDroppedSpans metric.Int64Counter + ProcessorInsertedLogRecords metric.Int64Counter + ProcessorInsertedMetricPoints metric.Int64Counter + ProcessorInsertedSpans metric.Int64Counter ProcessorRefusedLogRecords metric.Int64Counter ProcessorRefusedMetricPoints metric.Int64Counter ProcessorRefusedSpans metric.Int64Counter @@ -53,64 +57,79 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...teleme for _, op := range options { op(&builder) } - var ( - err, errs error - meter metric.Meter - ) + var err, errs error if builder.level >= configtelemetry.LevelBasic { - meter = Meter(settings) + builder.meter = Meter(settings) } else { - meter = noop.Meter{} + builder.meter = noop.Meter{} } - builder.ProcessorAcceptedLogRecords, err = meter.Int64Counter( + builder.ProcessorAcceptedLogRecords, err = builder.meter.Int64Counter( "processor_accepted_log_records", metric.WithDescription("Number of log records successfully pushed into the next component in the pipeline."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ProcessorAcceptedMetricPoints, err = meter.Int64Counter( + builder.ProcessorAcceptedMetricPoints, err = builder.meter.Int64Counter( "processor_accepted_metric_points", metric.WithDescription("Number of metric points successfully pushed into the next component in the pipeline."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ProcessorAcceptedSpans, err = meter.Int64Counter( + builder.ProcessorAcceptedSpans, err = builder.meter.Int64Counter( "processor_accepted_spans", metric.WithDescription("Number of spans successfully pushed into the next component in the pipeline."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ProcessorDroppedLogRecords, err = meter.Int64Counter( + builder.ProcessorDroppedLogRecords, err = builder.meter.Int64Counter( "processor_dropped_log_records", metric.WithDescription("Number of log records that were dropped."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ProcessorDroppedMetricPoints, err = meter.Int64Counter( + builder.ProcessorDroppedMetricPoints, err = builder.meter.Int64Counter( "processor_dropped_metric_points", metric.WithDescription("Number of metric points that were dropped."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ProcessorDroppedSpans, err = meter.Int64Counter( + builder.ProcessorDroppedSpans, err = builder.meter.Int64Counter( "processor_dropped_spans", metric.WithDescription("Number of spans that were dropped."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ProcessorRefusedLogRecords, err = meter.Int64Counter( + builder.ProcessorInsertedLogRecords, err = builder.meter.Int64Counter( + "processor_inserted_log_records", + metric.WithDescription("Number of log records that were inserted."), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.ProcessorInsertedMetricPoints, err = builder.meter.Int64Counter( + "processor_inserted_metric_points", + metric.WithDescription("Number of metric points that were inserted."), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.ProcessorInsertedSpans, err = builder.meter.Int64Counter( + "processor_inserted_spans", + metric.WithDescription("Number of spans that were inserted."), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.ProcessorRefusedLogRecords, err = builder.meter.Int64Counter( "processor_refused_log_records", metric.WithDescription("Number of log records that were rejected by the next component in the pipeline."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ProcessorRefusedMetricPoints, err = meter.Int64Counter( + builder.ProcessorRefusedMetricPoints, err = builder.meter.Int64Counter( "processor_refused_metric_points", metric.WithDescription("Number of metric points that were rejected by the next component in the pipeline."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ProcessorRefusedSpans, err = meter.Int64Counter( + builder.ProcessorRefusedSpans, err = builder.meter.Int64Counter( "processor_refused_spans", metric.WithDescription("Number of spans that were rejected by the next component in the pipeline."), metric.WithUnit("1"), diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go index ade2f45a385..d593a5f74a5 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go @@ -28,7 +28,7 @@ type logProcessor struct { // NewLogsProcessor creates a processor.Logs that ensure context propagation and the right tags are set. func NewLogsProcessor( _ context.Context, - set processor.CreateSettings, + set processor.Settings, _ component.Config, nextConsumer consumer.Logs, logsFunc ProcessLogsFunc, diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/metadata.yaml b/vendor/go.opentelemetry.io/collector/processor/processorhelper/metadata.yaml index 7e0c72f5c9c..2b6ec764d1e 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/metadata.yaml @@ -33,6 +33,14 @@ telemetry: value_type: int monotonic: true + processor_inserted_spans: + enabled: true + description: Number of spans that were inserted. + unit: 1 + sum: + value_type: int + monotonic: true + processor_accepted_metric_points: enabled: true description: Number of metric points successfully pushed into the next component in the pipeline. @@ -57,6 +65,14 @@ telemetry: value_type: int monotonic: true + processor_inserted_metric_points: + enabled: true + description: Number of metric points that were inserted. + unit: 1 + sum: + value_type: int + monotonic: true + processor_accepted_log_records: enabled: true description: Number of log records successfully pushed into the next component in the pipeline. @@ -79,4 +95,12 @@ telemetry: unit: 1 sum: value_type: int - monotonic: true \ No newline at end of file + monotonic: true + + processor_inserted_log_records: + enabled: true + description: Number of log records that were inserted. + unit: 1 + sum: + value_type: int + monotonic: true diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go index ac3802722d0..dc3388444c6 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go @@ -28,7 +28,7 @@ type metricsProcessor struct { // NewMetricsProcessor creates a processor.Metrics that ensure context propagation and the right tags are set. func NewMetricsProcessor( _ context.Context, - set processor.CreateSettings, + set processor.Settings, _ component.Config, nextConsumer consumer.Metrics, metricsFunc ProcessMetricsFunc, diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/obsreport.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/obsreport.go index 51aa26a9a88..2a3bd0dc754 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/obsreport.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/obsreport.go @@ -9,7 +9,6 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - "go.uber.org/zap" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" @@ -33,8 +32,6 @@ func BuildCustomMetricName(configType, metric string) string { // ObsReport is a helper to add observability to a processor. type ObsReport struct { - logger *zap.Logger - otelAttrs []attribute.KeyValue telemetryBuilder *metadata.TelemetryBuilder } @@ -42,7 +39,7 @@ type ObsReport struct { // ObsReportSettings are settings for creating an ObsReport. type ObsReportSettings struct { ProcessorID component.ID - ProcessorCreateSettings processor.CreateSettings + ProcessorCreateSettings processor.Settings } // NewObsReport creates a new Processor. @@ -56,7 +53,6 @@ func newObsReport(cfg ObsReportSettings) (*ObsReport, error) { return nil, err } return &ObsReport{ - logger: cfg.ProcessorCreateSettings.Logger, otelAttrs: []attribute.KeyValue{ attribute.String(obsmetrics.ProcessorKey, cfg.ProcessorID.String()), }, @@ -64,69 +60,88 @@ func newObsReport(cfg ObsReportSettings) (*ObsReport, error) { }, nil } -func (or *ObsReport) recordData(ctx context.Context, dataType component.DataType, accepted, refused, dropped int64) { - var acceptedCount, refusedCount, droppedCount metric.Int64Counter +func (or *ObsReport) recordData(ctx context.Context, dataType component.DataType, accepted, refused, dropped, inserted int64) { + var acceptedCount, refusedCount, droppedCount, insertedCount metric.Int64Counter switch dataType { case component.DataTypeTraces: acceptedCount = or.telemetryBuilder.ProcessorAcceptedSpans refusedCount = or.telemetryBuilder.ProcessorRefusedSpans droppedCount = or.telemetryBuilder.ProcessorDroppedSpans + insertedCount = or.telemetryBuilder.ProcessorInsertedSpans case component.DataTypeMetrics: acceptedCount = or.telemetryBuilder.ProcessorAcceptedMetricPoints refusedCount = or.telemetryBuilder.ProcessorRefusedMetricPoints droppedCount = or.telemetryBuilder.ProcessorDroppedMetricPoints + insertedCount = or.telemetryBuilder.ProcessorInsertedMetricPoints case component.DataTypeLogs: acceptedCount = or.telemetryBuilder.ProcessorAcceptedLogRecords refusedCount = or.telemetryBuilder.ProcessorRefusedLogRecords droppedCount = or.telemetryBuilder.ProcessorDroppedLogRecords + insertedCount = or.telemetryBuilder.ProcessorInsertedLogRecords } acceptedCount.Add(ctx, accepted, metric.WithAttributes(or.otelAttrs...)) refusedCount.Add(ctx, refused, metric.WithAttributes(or.otelAttrs...)) droppedCount.Add(ctx, dropped, metric.WithAttributes(or.otelAttrs...)) + insertedCount.Add(ctx, inserted, metric.WithAttributes(or.otelAttrs...)) } // TracesAccepted reports that the trace data was accepted. func (or *ObsReport) TracesAccepted(ctx context.Context, numSpans int) { - or.recordData(ctx, component.DataTypeTraces, int64(numSpans), int64(0), int64(0)) + or.recordData(ctx, component.DataTypeTraces, int64(numSpans), int64(0), int64(0), int64(0)) } // TracesRefused reports that the trace data was refused. func (or *ObsReport) TracesRefused(ctx context.Context, numSpans int) { - or.recordData(ctx, component.DataTypeTraces, int64(0), int64(numSpans), int64(0)) + or.recordData(ctx, component.DataTypeTraces, int64(0), int64(numSpans), int64(0), int64(0)) } // TracesDropped reports that the trace data was dropped. func (or *ObsReport) TracesDropped(ctx context.Context, numSpans int) { - or.recordData(ctx, component.DataTypeTraces, int64(0), int64(0), int64(numSpans)) + or.recordData(ctx, component.DataTypeTraces, int64(0), int64(0), int64(numSpans), int64(0)) +} + +// TracesInserted reports that the trace data was inserted. +func (or *ObsReport) TracesInserted(ctx context.Context, numSpans int) { + or.recordData(ctx, component.DataTypeTraces, int64(0), int64(0), int64(0), int64(numSpans)) } // MetricsAccepted reports that the metrics were accepted. func (or *ObsReport) MetricsAccepted(ctx context.Context, numPoints int) { - or.recordData(ctx, component.DataTypeMetrics, int64(numPoints), int64(0), int64(0)) + or.recordData(ctx, component.DataTypeMetrics, int64(numPoints), int64(0), int64(0), int64(0)) } // MetricsRefused reports that the metrics were refused. func (or *ObsReport) MetricsRefused(ctx context.Context, numPoints int) { - or.recordData(ctx, component.DataTypeMetrics, int64(0), int64(numPoints), int64(0)) + or.recordData(ctx, component.DataTypeMetrics, int64(0), int64(numPoints), int64(0), int64(0)) } // MetricsDropped reports that the metrics were dropped. func (or *ObsReport) MetricsDropped(ctx context.Context, numPoints int) { - or.recordData(ctx, component.DataTypeMetrics, int64(0), int64(0), int64(numPoints)) + or.recordData(ctx, component.DataTypeMetrics, int64(0), int64(0), int64(numPoints), int64(0)) +} + +// MetricsInserted reports that the metrics were inserted. +func (or *ObsReport) MetricsInserted(ctx context.Context, numPoints int) { + or.recordData(ctx, component.DataTypeMetrics, int64(0), int64(0), int64(0), int64(numPoints)) } // LogsAccepted reports that the logs were accepted. func (or *ObsReport) LogsAccepted(ctx context.Context, numRecords int) { - or.recordData(ctx, component.DataTypeLogs, int64(numRecords), int64(0), int64(0)) + or.recordData(ctx, component.DataTypeLogs, int64(numRecords), int64(0), int64(0), int64(0)) } // LogsRefused reports that the logs were refused. func (or *ObsReport) LogsRefused(ctx context.Context, numRecords int) { - or.recordData(ctx, component.DataTypeLogs, int64(0), int64(numRecords), int64(0)) + or.recordData(ctx, component.DataTypeLogs, int64(0), int64(numRecords), int64(0), int64(0)) } // LogsDropped reports that the logs were dropped. func (or *ObsReport) LogsDropped(ctx context.Context, numRecords int) { - or.recordData(ctx, component.DataTypeLogs, int64(0), int64(0), int64(numRecords)) + or.recordData(ctx, component.DataTypeLogs, int64(0), int64(0), int64(numRecords), int64(0)) +} + +// LogsInserted reports that the logs were inserted. +func (or *ObsReport) LogsInserted(ctx context.Context, numRecords int) { + or.recordData(ctx, component.DataTypeLogs, int64(0), int64(0), int64(0), int64(numRecords)) } diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go index 578f65c7efa..9f24c2a8b0f 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go @@ -28,7 +28,7 @@ type tracesProcessor struct { // NewTracesProcessor creates a processor.Traces that ensure context propagation and the right tags are set. func NewTracesProcessor( _ context.Context, - set processor.CreateSettings, + set processor.Settings, _ component.Config, nextConsumer consumer.Traces, tracesFunc ProcessTracesFunc, diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md index a5572071629..255395fb5d6 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md @@ -5,13 +5,14 @@ | ------------- |-----------| | Stability | [beta]: logs | | | [stable]: traces, metrics | -| Distributions | [core], [contrib] | +| Distributions | [core], [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fotlp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fotlp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fotlp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fotlp) | [beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [stable]: https://github.com/open-telemetry/opentelemetry-collector#stable [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s Receives data via gRPC or HTTP using [OTLP]( diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go index 4b148c40be3..22181929879 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go @@ -64,7 +64,7 @@ func createDefaultConfig() component.Config { // createTraces creates a trace receiver based on provided config. func createTraces( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { @@ -87,7 +87,7 @@ func createTraces( // createMetrics creates a metrics receiver based on provided config. func createMetrics( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, consumer consumer.Metrics, ) (receiver.Metrics, error) { @@ -110,7 +110,7 @@ func createMetrics( // createLog creates a log receiver based on provided config. func createLog( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, consumer consumer.Logs, ) (receiver.Logs, error) { diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml index c3036a3341d..db490cfe6fe 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml @@ -5,4 +5,4 @@ status: stability: stable: [traces, metrics] beta: [logs] - distributions: [core, contrib] + distributions: [core, contrib, k8s] diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go index 43eed9b821a..95d782b880e 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go @@ -40,13 +40,13 @@ type otlpReceiver struct { obsrepGRPC *receiverhelper.ObsReport obsrepHTTP *receiverhelper.ObsReport - settings *receiver.CreateSettings + settings *receiver.Settings } // newOtlpReceiver just creates the OpenTelemetry receiver services. It is the caller's // responsibility to invoke the respective Start*Reception methods as well // as the various Stop*Reception methods to end it. -func newOtlpReceiver(cfg *Config, set *receiver.CreateSettings) (*otlpReceiver, error) { +func newOtlpReceiver(cfg *Config, set *receiver.Settings) (*otlpReceiver, error) { r := &otlpReceiver{ cfg: cfg, nextTraces: nil, diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiver.go b/vendor/go.opentelemetry.io/collector/receiver/receiver.go index 3b0f0ac8371..3122f6bcd6d 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiver.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receiver.go @@ -46,7 +46,12 @@ type Logs interface { } // CreateSettings configures Receiver creators. -type CreateSettings struct { +// +// Deprecated: [v0.103.0] Use receiver.Settings instead. +type CreateSettings = Settings + +// Settings configures Receiver creators. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -66,7 +71,7 @@ type Factory interface { // CreateTracesReceiver creates a TracesReceiver based on this config. // If the receiver type does not support tracing or if the config is not valid // an error will be returned instead. `nextConsumer` is never nil. - CreateTracesReceiver(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) + CreateTracesReceiver(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) // TracesReceiverStability gets the stability level of the TracesReceiver. TracesReceiverStability() component.StabilityLevel @@ -74,7 +79,7 @@ type Factory interface { // CreateMetricsReceiver creates a MetricsReceiver based on this config. // If the receiver type does not support metrics or if the config is not valid // an error will be returned instead. `nextConsumer` is never nil. - CreateMetricsReceiver(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Metrics, error) + CreateMetricsReceiver(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Metrics) (Metrics, error) // MetricsReceiverStability gets the stability level of the MetricsReceiver. MetricsReceiverStability() component.StabilityLevel @@ -82,7 +87,7 @@ type Factory interface { // CreateLogsReceiver creates a LogsReceiver based on this config. // If the receiver type does not support the data type or if the config is not valid // an error will be returned instead. `nextConsumer` is never nil. - CreateLogsReceiver(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Logs, error) + CreateLogsReceiver(ctx context.Context, set Settings, cfg component.Config, nextConsumer consumer.Logs) (Logs, error) // LogsReceiverStability gets the stability level of the LogsReceiver. LogsReceiverStability() component.StabilityLevel @@ -104,12 +109,12 @@ func (f factoryOptionFunc) applyOption(o *factory) { } // CreateTracesFunc is the equivalent of Factory.CreateTraces. -type CreateTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Traces, error) +type CreateTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Traces, error) // CreateTracesReceiver implements Factory.CreateTracesReceiver(). func (f CreateTracesFunc) CreateTracesReceiver( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) { if f == nil { @@ -119,12 +124,12 @@ func (f CreateTracesFunc) CreateTracesReceiver( } // CreateMetricsFunc is the equivalent of Factory.CreateMetrics. -type CreateMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Metrics, error) +type CreateMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Metrics, error) // CreateMetricsReceiver implements Factory.CreateMetricsReceiver(). func (f CreateMetricsFunc) CreateMetricsReceiver( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (Metrics, error) { @@ -135,12 +140,12 @@ func (f CreateMetricsFunc) CreateMetricsReceiver( } // CreateLogsFunc is the equivalent of ReceiverFactory.CreateLogsReceiver(). -type CreateLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Logs, error) +type CreateLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Logs, error) // CreateLogsReceiver implements Factory.CreateLogsReceiver(). func (f CreateLogsFunc) CreateLogsReceiver( ctx context.Context, - set CreateSettings, + set Settings, cfg component.Config, nextConsumer consumer.Logs, ) (Logs, error) { @@ -240,7 +245,7 @@ func NewBuilder(cfgs map[component.ID]component.Config, factories map[component. } // CreateTraces creates a Traces receiver based on the settings and config. -func (b *Builder) CreateTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Traces, error) { +func (b *Builder) CreateTraces(ctx context.Context, set Settings, next consumer.Traces) (Traces, error) { if next == nil { return nil, errNilNextConsumer } @@ -259,7 +264,7 @@ func (b *Builder) CreateTraces(ctx context.Context, set CreateSettings, next con } // CreateMetrics creates a Metrics receiver based on the settings and config. -func (b *Builder) CreateMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Metrics, error) { +func (b *Builder) CreateMetrics(ctx context.Context, set Settings, next consumer.Metrics) (Metrics, error) { if next == nil { return nil, errNilNextConsumer } @@ -278,7 +283,7 @@ func (b *Builder) CreateMetrics(ctx context.Context, set CreateSettings, next co } // CreateLogs creates a Logs receiver based on the settings and config. -func (b *Builder) CreateLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Logs, error) { +func (b *Builder) CreateLogs(ctx context.Context, set Settings, next consumer.Logs) (Logs, error) { if next == nil { return nil, errNilNextConsumer } diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go index ee70b2e498f..0300e413abd 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go @@ -24,6 +24,7 @@ func Tracer(settings component.TelemetrySettings) trace.Tracer { // TelemetryBuilder provides an interface for components to report telemetry // as defined in metadata and user config. type TelemetryBuilder struct { + meter metric.Meter ReceiverAcceptedLogRecords metric.Int64Counter ReceiverAcceptedMetricPoints metric.Int64Counter ReceiverAcceptedSpans metric.Int64Counter @@ -50,46 +51,43 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...teleme for _, op := range options { op(&builder) } - var ( - err, errs error - meter metric.Meter - ) + var err, errs error if builder.level >= configtelemetry.LevelBasic { - meter = Meter(settings) + builder.meter = Meter(settings) } else { - meter = noop.Meter{} + builder.meter = noop.Meter{} } - builder.ReceiverAcceptedLogRecords, err = meter.Int64Counter( + builder.ReceiverAcceptedLogRecords, err = builder.meter.Int64Counter( "receiver_accepted_log_records", metric.WithDescription("Number of log records successfully pushed into the pipeline."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ReceiverAcceptedMetricPoints, err = meter.Int64Counter( + builder.ReceiverAcceptedMetricPoints, err = builder.meter.Int64Counter( "receiver_accepted_metric_points", metric.WithDescription("Number of metric points successfully pushed into the pipeline."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ReceiverAcceptedSpans, err = meter.Int64Counter( + builder.ReceiverAcceptedSpans, err = builder.meter.Int64Counter( "receiver_accepted_spans", metric.WithDescription("Number of spans successfully pushed into the pipeline."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ReceiverRefusedLogRecords, err = meter.Int64Counter( + builder.ReceiverRefusedLogRecords, err = builder.meter.Int64Counter( "receiver_refused_log_records", metric.WithDescription("Number of log records that could not be pushed into the pipeline."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ReceiverRefusedMetricPoints, err = meter.Int64Counter( + builder.ReceiverRefusedMetricPoints, err = builder.meter.Int64Counter( "receiver_refused_metric_points", metric.WithDescription("Number of metric points that could not be pushed into the pipeline."), metric.WithUnit("1"), ) errs = errors.Join(errs, err) - builder.ReceiverRefusedSpans, err = meter.Int64Counter( + builder.ReceiverRefusedSpans, err = builder.meter.Int64Counter( "receiver_refused_spans", metric.WithDescription("Number of spans that could not be pushed into the pipeline."), metric.WithUnit("1"), diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go index 18ff7556454..f3904061329 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go @@ -12,7 +12,6 @@ import ( "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configtelemetry" @@ -28,7 +27,6 @@ type ObsReport struct { transport string longLivedCtx bool tracer trace.Tracer - logger *zap.Logger otelAttrs []attribute.KeyValue telemetryBuilder *metadata.TelemetryBuilder @@ -44,7 +42,7 @@ type ObsReportSettings struct { // eg.: a gRPC stream, for which many batches of data are received in individual // operations without a corresponding new context per operation. LongLivedCtx bool - ReceiverCreateSettings receiver.CreateSettings + ReceiverCreateSettings receiver.Settings } // NewObsReport creates a new ObsReport. @@ -63,7 +61,6 @@ func newReceiver(cfg ObsReportSettings) (*ObsReport, error) { transport: cfg.Transport, longLivedCtx: cfg.LongLivedCtx, tracer: cfg.ReceiverCreateSettings.TracerProvider.Tracer(cfg.ReceiverID.String()), - logger: cfg.ReceiverCreateSettings.Logger, otelAttrs: []attribute.KeyValue{ attribute.String(obsmetrics.ReceiverKey, cfg.ReceiverID.String()), diff --git a/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go b/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go index 6951b5f41f4..d52e1d3a60f 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go @@ -113,11 +113,11 @@ func checkConsumeContractScenario(params CheckConsumeContractParams, decisionFun var err error switch params.DataType { case component.DataTypeLogs: - receiver, err = params.Factory.CreateLogsReceiver(ctx, NewNopCreateSettings(), params.Config, consumer) + receiver, err = params.Factory.CreateLogsReceiver(ctx, NewNopSettings(), params.Config, consumer) case component.DataTypeTraces: - receiver, err = params.Factory.CreateTracesReceiver(ctx, NewNopCreateSettings(), params.Config, consumer) + receiver, err = params.Factory.CreateTracesReceiver(ctx, NewNopSettings(), params.Config, consumer) case component.DataTypeMetrics: - receiver, err = params.Factory.CreateMetricsReceiver(ctx, NewNopCreateSettings(), params.Config, consumer) + receiver, err = params.Factory.CreateMetricsReceiver(ctx, NewNopSettings(), params.Config, consumer) default: require.FailNow(params.T, "must specify a valid DataType to test for") } diff --git a/vendor/go.opentelemetry.io/collector/receiver/receivertest/nop_receiver.go b/vendor/go.opentelemetry.io/collector/receiver/receivertest/nop_receiver.go index e9cec06ca1b..0b07398a9ee 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receivertest/nop_receiver.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receivertest/nop_receiver.go @@ -17,8 +17,15 @@ import ( var defaultComponentType = component.MustNewType("nop") // NewNopCreateSettings returns a new nop settings for Create*Receiver functions. -func NewNopCreateSettings() receiver.CreateSettings { - return receiver.CreateSettings{ +// +// Deprecated: [v0.103.0] Use receivertest.NewNopSettings instead. +func NewNopCreateSettings() receiver.Settings { + return NewNopSettings() +} + +// NewNopSettings returns a new nop settings for Create*Receiver functions. +func NewNopSettings() receiver.Settings { + return receiver.Settings{ ID: component.NewIDWithName(defaultComponentType, uuid.NewString()), TelemetrySettings: componenttest.NewNopTelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo(), @@ -56,15 +63,15 @@ func NewNopFactoryForType(dataType component.DataType) receiver.Factory { type nopConfig struct{} -func createTraces(context.Context, receiver.CreateSettings, component.Config, consumer.Traces) (receiver.Traces, error) { +func createTraces(context.Context, receiver.Settings, component.Config, consumer.Traces) (receiver.Traces, error) { return nopInstance, nil } -func createMetrics(context.Context, receiver.CreateSettings, component.Config, consumer.Metrics) (receiver.Metrics, error) { +func createMetrics(context.Context, receiver.Settings, component.Config, consumer.Metrics) (receiver.Metrics, error) { return nopInstance, nil } -func createLogs(context.Context, receiver.CreateSettings, component.Config, consumer.Logs) (receiver.Logs, error) { +func createLogs(context.Context, receiver.Settings, component.Config, consumer.Logs) (receiver.Logs, error) { return nopInstance, nil } diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/doc.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/doc.go new file mode 100644 index 00000000000..ad9e94a49f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.25.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/collector/semconv/v1.25.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_attribute_group.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_attribute_group.go new file mode 100644 index 00000000000..0d8fc75a410 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_attribute_group.go @@ -0,0 +1,4796 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// Attributes for Events represented using Log Records. +const ( + // Identifies the class / type of event. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as attribute names. Notably, + // event names are namespaced to avoid collisions and provide a clean separation + // of semantics for events in separate domains like browser, mobile, and + // kubernetes. + AttributeEventName = "event.name" +) + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // A unique identifier for the Log Record. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an Universally Unique Lexicographically Sortable Identifier + // (ULID), but other identifiers (e.g. UUID) may be used as needed. + AttributeLogRecordUID = "log.record.uid" +) + +// Describes Log attributes +const ( + // The stream associated with the log. See below for a list of well-known values. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeLogIostream = "log.iostream" +) + +const ( + // Logs from stdout stream + AttributeLogIostreamStdout = "stdout" + // Events from stderr stream + AttributeLogIostreamStderr = "stderr" +) + +// A file to which log was emitted. +const ( + // The basename of the file. + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'audit.log' + AttributeLogFileName = "log.file.name" + // The basename of the file, with symlinks resolved. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'uuid.log' + AttributeLogFileNameResolved = "log.file.name_resolved" + // The full path to the file. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + AttributeLogFilePath = "log.file.path" + // The full path to the file, with symlinks resolved. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + AttributeLogFilePathResolved = "log.file.path_resolved" +) + +// Describes Database attributes +const ( + // The name of the connection pool; unique within the instrumented application. In + // case the connection pool implementation doesn't provide a name, instrumentation + // should use a combination of server.address and server.port attributes formatted + // as server.address:server.port. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'myDataSource' + AttributePoolName = "pool.name" + // The state of a connection in the pool + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + // Examples: 'idle' + AttributeState = "state" +) + +const ( + // idle + AttributeStateIdle = "idle" + // used + AttributeStateUsed = "used" +) + +// ASP.NET Core attributes +const ( + // Rate-limiting result, shows whether the lease was acquired or contains a + // rejection reason + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AttributeAspnetcoreRateLimitingResult = "aspnetcore.rate_limiting.result" + // Full type name of the IExceptionHandler implementation that handled the + // exception. + // + // Type: string + // Requirement Level: Conditionally Required - if and only if the exception was + // handled by this handler. + // Stability: stable + // Examples: 'Contoso.MyHandler' + AttributeAspnetcoreDiagnosticsHandlerType = "aspnetcore.diagnostics.handler.type" + // Rate limiting policy name. + // + // Type: string + // Requirement Level: Conditionally Required - if the matched endpoint for the + // request had a rate-limiting policy. + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AttributeAspnetcoreRateLimitingPolicy = "aspnetcore.rate_limiting.policy" + // Flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // Requirement Level: Conditionally Required - if and only if the request was not + // handled. + // Stability: stable + // Examples: True + AttributeAspnetcoreRequestIsUnhandled = "aspnetcore.request.is_unhandled" + // A value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // Requirement Level: Conditionally Required - If and only if a route was + // successfully matched. + // Stability: stable + // Examples: True + AttributeAspnetcoreRoutingIsFallback = "aspnetcore.routing.is_fallback" +) + +const ( + // Lease was acquired + AttributeAspnetcoreRateLimitingResultAcquired = "acquired" + // Lease request was rejected by the endpoint limiter + AttributeAspnetcoreRateLimitingResultEndpointLimiter = "endpoint_limiter" + // Lease request was rejected by the global limiter + AttributeAspnetcoreRateLimitingResultGlobalLimiter = "global_limiter" + // Lease request was canceled + AttributeAspnetcoreRateLimitingResultRequestCanceled = "request_canceled" +) + +// SignalR attributes +const ( + // SignalR HTTP connection closure status. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + AttributeSignalrConnectionStatus = "signalr.connection.status" + // SignalR transport type + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + AttributeSignalrTransport = "signalr.transport" +) + +const ( + // The connection was closed normally + AttributeSignalrConnectionStatusNormalClosure = "normal_closure" + // The connection was closed due to a timeout + AttributeSignalrConnectionStatusTimeout = "timeout" + // The connection was closed because the app is shutting down + AttributeSignalrConnectionStatusAppShutdown = "app_shutdown" +) + +const ( + // ServerSentEvents protocol + AttributeSignalrTransportServerSentEvents = "server_sent_events" + // LongPolling protocol + AttributeSignalrTransportLongPolling = "long_polling" + // WebSockets protocol + AttributeSignalrTransportWebSockets = "web_sockets" +) + +// Describes JVM buffer metric attributes. +const ( + // Name of the buffer pool. + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via BufferPoolMXBean#getName(). + AttributeJvmBufferPoolName = "jvm.buffer.pool.name" +) + +// Describes JVM memory metric attributes. +const ( + // Name of the memory pool. + // + // Type: string + // Requirement Level: Recommended + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via MemoryPoolMXBean#getName(). + AttributeJvmMemoryPoolName = "jvm.memory.pool.name" + // The type of memory. + // + // Type: Enum + // Requirement Level: Recommended + // Stability: stable + // Examples: 'heap', 'non_heap' + AttributeJvmMemoryType = "jvm.memory.type" +) + +const ( + // Heap memory + AttributeJvmMemoryTypeHeap = "heap" + // Non-heap memory + AttributeJvmMemoryTypeNonHeap = "non_heap" +) + +// Attributes for process CPU metrics. +const ( + // The CPU state for this data point. A process SHOULD be characterized either by + // data points with no state labels, or only data points with state labels. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeProcessCPUState = "process.cpu.state" +) + +const ( + // system + AttributeProcessCPUStateSystem = "system" + // user + AttributeProcessCPUStateUser = "user" + // wait + AttributeProcessCPUStateWait = "wait" +) + +// Describes System metric attributes +const ( + // The device identifier + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '(identifier)' + AttributeSystemDevice = "system.device" +) + +// Describes System CPU metric attributes +const ( + // The logical CPU number [0..n-1] + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1 + AttributeSystemCPULogicalNumber = "system.cpu.logical_number" + // The CPU state for this data point. A system's CPU SHOULD be characterized + // either by data points with no state labels, or only data points with state + // labels. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + AttributeSystemCPUState = "system.cpu.state" +) + +const ( + // user + AttributeSystemCPUStateUser = "user" + // system + AttributeSystemCPUStateSystem = "system" + // nice + AttributeSystemCPUStateNice = "nice" + // idle + AttributeSystemCPUStateIdle = "idle" + // iowait + AttributeSystemCPUStateIowait = "iowait" + // interrupt + AttributeSystemCPUStateInterrupt = "interrupt" + // steal + AttributeSystemCPUStateSteal = "steal" +) + +// Describes System Memory metric attributes +const ( + // The memory state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'free', 'cached' + AttributeSystemMemoryState = "system.memory.state" +) + +const ( + // used + AttributeSystemMemoryStateUsed = "used" + // free + AttributeSystemMemoryStateFree = "free" + // shared + AttributeSystemMemoryStateShared = "shared" + // buffers + AttributeSystemMemoryStateBuffers = "buffers" + // cached + AttributeSystemMemoryStateCached = "cached" +) + +// Describes System Memory Paging metric attributes +const ( + // The paging access direction + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'in' + AttributeSystemPagingDirection = "system.paging.direction" + // The memory paging state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'free' + AttributeSystemPagingState = "system.paging.state" + // The memory paging type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'minor' + AttributeSystemPagingType = "system.paging.type" +) + +const ( + // in + AttributeSystemPagingDirectionIn = "in" + // out + AttributeSystemPagingDirectionOut = "out" +) + +const ( + // used + AttributeSystemPagingStateUsed = "used" + // free + AttributeSystemPagingStateFree = "free" +) + +const ( + // major + AttributeSystemPagingTypeMajor = "major" + // minor + AttributeSystemPagingTypeMinor = "minor" +) + +// Describes Filesystem metric attributes +const ( + // The filesystem mode + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'rw, ro' + AttributeSystemFilesystemMode = "system.filesystem.mode" + // The filesystem mount path + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/mnt/data' + AttributeSystemFilesystemMountpoint = "system.filesystem.mountpoint" + // The filesystem state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'used' + AttributeSystemFilesystemState = "system.filesystem.state" + // The filesystem type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ext4' + AttributeSystemFilesystemType = "system.filesystem.type" +) + +const ( + // used + AttributeSystemFilesystemStateUsed = "used" + // free + AttributeSystemFilesystemStateFree = "free" + // reserved + AttributeSystemFilesystemStateReserved = "reserved" +) + +const ( + // fat32 + AttributeSystemFilesystemTypeFat32 = "fat32" + // exfat + AttributeSystemFilesystemTypeExfat = "exfat" + // ntfs + AttributeSystemFilesystemTypeNtfs = "ntfs" + // refs + AttributeSystemFilesystemTypeRefs = "refs" + // hfsplus + AttributeSystemFilesystemTypeHfsplus = "hfsplus" + // ext4 + AttributeSystemFilesystemTypeExt4 = "ext4" +) + +// Describes Network metric attributes +const ( + // A stateless protocol MUST NOT set this attribute + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'close_wait' + AttributeSystemNetworkState = "system.network.state" +) + +const ( + // close + AttributeSystemNetworkStateClose = "close" + // close_wait + AttributeSystemNetworkStateCloseWait = "close_wait" + // closing + AttributeSystemNetworkStateClosing = "closing" + // delete + AttributeSystemNetworkStateDelete = "delete" + // established + AttributeSystemNetworkStateEstablished = "established" + // fin_wait_1 + AttributeSystemNetworkStateFinWait1 = "fin_wait_1" + // fin_wait_2 + AttributeSystemNetworkStateFinWait2 = "fin_wait_2" + // last_ack + AttributeSystemNetworkStateLastAck = "last_ack" + // listen + AttributeSystemNetworkStateListen = "listen" + // syn_recv + AttributeSystemNetworkStateSynRecv = "syn_recv" + // syn_sent + AttributeSystemNetworkStateSynSent = "syn_sent" + // time_wait + AttributeSystemNetworkStateTimeWait = "time_wait" +) + +// Describes System Process metric attributes +const ( + // The process state, e.g., Linux Process State Codes + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'running' + AttributeSystemProcessStatus = "system.process.status" +) + +const ( + // running + AttributeSystemProcessStatusRunning = "running" + // sleeping + AttributeSystemProcessStatusSleeping = "sleeping" + // stopped + AttributeSystemProcessStatusStopped = "stopped" + // defunct + AttributeSystemProcessStatusDefunct = "defunct" +) + +// The Android platform on which the Android application is running. +const ( + // Uniquely identifies the framework API revision offered by a version + // (os.version) of the android operating system. More information can be found + // here. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '33', '32' + AttributeAndroidOSAPILevel = "android.os.api_level" +) + +// Attributes for AWS DynamoDB. +const ( + // The JSON-serialized value of each item in the AttributeDefinitions request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AttributeAWSDynamoDBAttributeDefinitions = "aws.dynamodb.attribute_definitions" + // The value of the AttributesToGet request parameter. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AttributeAWSDynamoDBAttributesToGet = "aws.dynamodb.attributes_to_get" + // The value of the ConsistentRead request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBConsistentRead = "aws.dynamodb.consistent_read" + // The JSON-serialized value of each item in the ConsumedCapacity response field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AttributeAWSDynamoDBConsumedCapacity = "aws.dynamodb.consumed_capacity" + // The value of the Count response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBCount = "aws.dynamodb.count" + // The value of the ExclusiveStartTableName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AttributeAWSDynamoDBExclusiveStartTable = "aws.dynamodb.exclusive_start_table" + // The JSON-serialized value of each item in the GlobalSecondaryIndexUpdates + // request field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates = "aws.dynamodb.global_secondary_index_updates" + // The JSON-serialized value of each item of the GlobalSecondaryIndexes request + // field + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexes = "aws.dynamodb.global_secondary_indexes" + // The value of the IndexName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'name_to_group' + AttributeAWSDynamoDBIndexName = "aws.dynamodb.index_name" + // The JSON-serialized value of the ItemCollectionMetrics response field. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AttributeAWSDynamoDBItemCollectionMetrics = "aws.dynamodb.item_collection_metrics" + // The value of the Limit request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBLimit = "aws.dynamodb.limit" + // The JSON-serialized value of each item of the LocalSecondaryIndexes request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AttributeAWSDynamoDBLocalSecondaryIndexes = "aws.dynamodb.local_secondary_indexes" + // The value of the ProjectionExpression request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AttributeAWSDynamoDBProjection = "aws.dynamodb.projection" + // The value of the ProvisionedThroughput.ReadCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedReadCapacity = "aws.dynamodb.provisioned_read_capacity" + // The value of the ProvisionedThroughput.WriteCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedWriteCapacity = "aws.dynamodb.provisioned_write_capacity" + // The value of the ScanIndexForward request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBScanForward = "aws.dynamodb.scan_forward" + // The value of the ScannedCount response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 50 + AttributeAWSDynamoDBScannedCount = "aws.dynamodb.scanned_count" + // The value of the Segment request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBSegment = "aws.dynamodb.segment" + // The value of the Select request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AttributeAWSDynamoDBSelect = "aws.dynamodb.select" + // The number of items in the TableNames response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 20 + AttributeAWSDynamoDBTableCount = "aws.dynamodb.table_count" + // The keys in the RequestItems object field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AttributeAWSDynamoDBTableNames = "aws.dynamodb.table_names" + // The value of the TotalSegments request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeAWSDynamoDBTotalSegments = "aws.dynamodb.total_segments" +) + +// The web browser attributes +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.brands). + AttributeBrowserBrands = "browser.brands" + // Preferred language of the user using the browser + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // navigator.language. + AttributeBrowserLanguage = "browser.language" + // A boolean that is true if the browser is running on a mobile device + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.mobile). If unavailable, this attribute SHOULD be left + // unset. + AttributeBrowserMobile = "browser.mobile" + // The platform on which the browser is running + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.platform). If unavailable, the legacy + // navigator.platform API SHOULD NOT be used instead and this attribute SHOULD be + // left unset in order for the values to be consistent. + // The list of possible values is defined in the W3C User-Agent Client Hints + // specification. Note that some (but not all) of these values can overlap with + // values in the os.type and os.name attributes. However, for consistency, the + // values in the browser.platform attribute should capture the exact value that + // the user agent provides. + AttributeBrowserPlatform = "browser.platform" +) + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // Client address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through an + // intermediary, client.address SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + AttributeClientAddress = "client.address" + // Client port number. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, client.port SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + AttributeClientPort = "client.port" +) + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // The cloud account ID the resource is assigned to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + AttributeCloudAccountID = "cloud.account.id" + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and + // Google Cloud. + AttributeCloudAvailabilityZone = "cloud.availability_zone" + // The cloud platform in use. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // cloud.provider. + AttributeCloudPlatform = "cloud.platform" + // Name of the cloud provider. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeCloudProvider = "cloud.provider" + // The geographical region the resource is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // Alibaba Cloud regions, AWS regions, Azure regions, Google Cloud regions, or + // Tencent Cloud regions. + AttributeCloudRegion = "cloud.region" + // Cloud provider-specific native identifier of the monitored cloud resource (e.g. + // an ARN on AWS, a fully qualified resource ID on Azure, a full resource name on + // GCP) + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', '//run.googl + // eapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', '/sub + // scriptions//resourceGroups//providers/Microsoft.Web/sites + // //functions/' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so it may be necessary to set cloud.resource_id as a span attribute instead.The + // exact value to use for cloud.resource_id depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute and + // they apply:
    + //
  • AWS Lambda: The function ARN. + // Take care not to use the "invoked ARN" directly but replace any + // alias suffix + // with the resolved function version, as the same runtime instance may be + // invokable with + // multiple different aliases.
  • + //
  • GCP: The URI of the resource
  • + //
  • Azure: The Fully Qualified Resource ID of the invoked function, + // not the function app, having the form + // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // ites//functions/. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider.
  • + //
+ AttributeCloudResourceID = "cloud.resource_id" +) + +const ( + // Alibaba Cloud Elastic Compute Service + AttributeCloudPlatformAlibabaCloudECS = "alibaba_cloud_ecs" + // Alibaba Cloud Function Compute + AttributeCloudPlatformAlibabaCloudFc = "alibaba_cloud_fc" + // Red Hat OpenShift on Alibaba Cloud + AttributeCloudPlatformAlibabaCloudOpenshift = "alibaba_cloud_openshift" + // AWS Elastic Compute Cloud + AttributeCloudPlatformAWSEC2 = "aws_ec2" + // AWS Elastic Container Service + AttributeCloudPlatformAWSECS = "aws_ecs" + // AWS Elastic Kubernetes Service + AttributeCloudPlatformAWSEKS = "aws_eks" + // AWS Lambda + AttributeCloudPlatformAWSLambda = "aws_lambda" + // AWS Elastic Beanstalk + AttributeCloudPlatformAWSElasticBeanstalk = "aws_elastic_beanstalk" + // AWS App Runner + AttributeCloudPlatformAWSAppRunner = "aws_app_runner" + // Red Hat OpenShift on AWS (ROSA) + AttributeCloudPlatformAWSOpenshift = "aws_openshift" + // Azure Virtual Machines + AttributeCloudPlatformAzureVM = "azure_vm" + // Azure Container Apps + AttributeCloudPlatformAzureContainerApps = "azure_container_apps" + // Azure Container Instances + AttributeCloudPlatformAzureContainerInstances = "azure_container_instances" + // Azure Kubernetes Service + AttributeCloudPlatformAzureAKS = "azure_aks" + // Azure Functions + AttributeCloudPlatformAzureFunctions = "azure_functions" + // Azure App Service + AttributeCloudPlatformAzureAppService = "azure_app_service" + // Azure Red Hat OpenShift + AttributeCloudPlatformAzureOpenshift = "azure_openshift" + // Google Bare Metal Solution (BMS) + AttributeCloudPlatformGCPBareMetalSolution = "gcp_bare_metal_solution" + // Google Cloud Compute Engine (GCE) + AttributeCloudPlatformGCPComputeEngine = "gcp_compute_engine" + // Google Cloud Run + AttributeCloudPlatformGCPCloudRun = "gcp_cloud_run" + // Google Cloud Kubernetes Engine (GKE) + AttributeCloudPlatformGCPKubernetesEngine = "gcp_kubernetes_engine" + // Google Cloud Functions (GCF) + AttributeCloudPlatformGCPCloudFunctions = "gcp_cloud_functions" + // Google Cloud App Engine (GAE) + AttributeCloudPlatformGCPAppEngine = "gcp_app_engine" + // Red Hat OpenShift on Google Cloud + AttributeCloudPlatformGCPOpenshift = "gcp_openshift" + // Red Hat OpenShift on IBM Cloud + AttributeCloudPlatformIbmCloudOpenshift = "ibm_cloud_openshift" + // Tencent Cloud Cloud Virtual Machine (CVM) + AttributeCloudPlatformTencentCloudCvm = "tencent_cloud_cvm" + // Tencent Cloud Elastic Kubernetes Service (EKS) + AttributeCloudPlatformTencentCloudEKS = "tencent_cloud_eks" + // Tencent Cloud Serverless Cloud Function (SCF) + AttributeCloudPlatformTencentCloudScf = "tencent_cloud_scf" +) + +const ( + // Alibaba Cloud + AttributeCloudProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeCloudProviderAWS = "aws" + // Microsoft Azure + AttributeCloudProviderAzure = "azure" + // Google Cloud Platform + AttributeCloudProviderGCP = "gcp" + // Heroku Platform as a Service + AttributeCloudProviderHeroku = "heroku" + // IBM Cloud + AttributeCloudProviderIbmCloud = "ibm_cloud" + // Tencent Cloud + AttributeCloudProviderTencentCloud = "tencent_cloud" +) + +// Attributes for CloudEvents. +const ( + // The event_id uniquely identifies the event. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + AttributeCloudeventsEventID = "cloudevents.event_id" + // The source identifies the context in which an event happened. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + AttributeCloudeventsEventSource = "cloudevents.event_source" + // The version of the CloudEvents specification which the event uses. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.0' + AttributeCloudeventsEventSpecVersion = "cloudevents.event_spec_version" + // The subject of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + AttributeCloudeventsEventSubject = "cloudevents.event_subject" + // The event_type contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + AttributeCloudeventsEventType = "cloudevents.event_type" +) + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // The column number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 16 + AttributeCodeColumn = "code.column" + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + AttributeCodeFilepath = "code.filepath" + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'serveRequest' + AttributeCodeFunction = "code.function" + // The line number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeCodeLineNumber = "code.lineno" + // The "namespace" within which code.function is defined. Usually the + // qualified class or module name, such that code.namespace + some separator + + // code.function form a unique identifier for the code unit. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + AttributeCodeNamespace = "code.namespace" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeCodeStacktrace = "code.stacktrace" +) + +// A container instance. +const ( + // The command used to run the container (i.e. the command name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + AttributeContainerCommand = "container.command" + // All the command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + AttributeContainerCommandArgs = "container.command_args" + // The full command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + AttributeContainerCommandLine = "container.command_line" + // The CPU state for this data point. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + AttributeContainerCPUState = "container.cpu.state" + // Container ID. Usually a UUID, as for example used to identify Docker + // containers. The UUID might be abbreviated. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + AttributeContainerID = "container.id" + // Runtime specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; container.image.id corresponds + // to the Image field from the Docker container inspect API endpoint. + // K8S defines a link to the container registry repository with digest "imageID": + // "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e + // 8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625". + // The ID is assinged by the container runtime and can vary in different + // environments. Consider using oci.manifest.digest if it is important to identify + // the same image in different environments/runtimes. + AttributeContainerImageID = "container.image.id" + // Name of the image the container was built on. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + AttributeContainerImageName = "container.image.name" + // Repo digests of the container image as provided by the container runtime. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d7 + // 02d249a0ccb', 'internal.registry.example.com:5000/example@sha256:b69959407d21e8 + // a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: Docker and CRI report those under the RepoDigests field. + AttributeContainerImageRepoDigests = "container.image.repo_digests" + // Container image tags. An example can be found in Docker Image Inspect. Should + // be only the section of the full name for example from + // registry.example.com/my-org/my-image:. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + AttributeContainerImageTags = "container.image.tags" + // Container name used by container runtime. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + AttributeContainerName = "container.name" + // The container runtime managing this container. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + AttributeContainerRuntime = "container.runtime" +) + +const ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + AttributeContainerCPUStateUser = "user" + // When CPU is used by the system (host OS) + AttributeContainerCPUStateSystem = "system" + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + AttributeContainerCPUStateKernel = "kernel" +) + +// The attributes used to describe telemetry in the context of databases. +const ( + // The consistency level of the query. Based on consistency values from CQL. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraConsistencyLevel = "db.cassandra.consistency_level" + // The data center of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-west-2' + AttributeDBCassandraCoordinatorDC = "db.cassandra.coordinator.dc" + // The ID of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + AttributeDBCassandraCoordinatorID = "db.cassandra.coordinator.id" + // Whether or not the query is idempotent. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraIdempotence = "db.cassandra.idempotence" + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 5000 + AttributeDBCassandraPageSize = "db.cassandra.page_size" + // The number of times a query was speculatively executed. Not set or 0 if the + // query was not executed speculatively. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 2 + AttributeDBCassandraSpeculativeExecutionCount = "db.cassandra.speculative_execution_count" + // The name of the primary Cassandra table that the operation is acting upon, + // including the keyspace name (if applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // db.statement just to get this property, but it should be set if it is provided + // by the library being instrumented. If the operation is acting upon an anonymous + // table, or more than one table, this value MUST NOT be set. + AttributeDBCassandraTable = "db.cassandra.table" + // Unique Cosmos client instance id. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + AttributeDBCosmosDBClientID = "db.cosmosdb.client_id" + // Cosmos client connection mode. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBConnectionMode = "db.cosmosdb.connection_mode" + // Cosmos DB container name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'anystring' + AttributeDBCosmosDBContainer = "db.cosmosdb.container" + // CosmosDB Operation Type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBOperationType = "db.cosmosdb.operation_type" + // RU consumed for that operation + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + AttributeDBCosmosDBRequestCharge = "db.cosmosdb.request_charge" + // Request payload size in bytes + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBRequestContentLength = "db.cosmosdb.request_content_length" + // Cosmos DB status code. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 200, 201 + AttributeDBCosmosDBStatusCode = "db.cosmosdb.status_code" + // Cosmos DB sub status code. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1000, 1002 + AttributeDBCosmosDBSubStatusCode = "db.cosmosdb.sub_status_code" + // Represents the identifier of an Elasticsearch cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + AttributeDBElasticsearchClusterName = "db.elasticsearch.cluster.name" + // An identifier (address, unique name, or any other identifier) of the database + // instance that is executing queries or mutations on the current connection. This + // is useful in cases where the database is running in a clustered environment and + // the instrumentation is able to record the node executing the query. The client + // may obtain this value in databases like MySQL using queries like select + // @@hostname. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mysql-e26b99z.example.com' + AttributeDBInstanceID = "db.instance.id" + // The MongoDB collection being accessed within the database stated in db.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'customers', 'products' + AttributeDBMongoDBCollection = "db.mongodb.collection" + // The Microsoft SQL Server instance name connecting to. This name is used to + // determine the port of a named instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MSSQLSERVER' + // Note: If setting a db.mssql.instance_name, server.port is no longer required + // (but still recommended if non-standard). + AttributeDBMSSQLInstanceName = "db.mssql.instance_name" + // This attribute is used to report the name of the database being accessed. For + // commands that switch the database, this should be set to the target database + // (even if the command fails). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema name), the + // database name to be used is the more specific layer (e.g. Oracle schema name). + AttributeDBName = "db.name" + // The name of the operation being executed, e.g. the MongoDB command name such as + // findAndModify, or the SQL keyword. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of db.statement just to get this property, but it should be + // set if the operation name is provided by the library being instrumented. If the + // SQL statement has an ambiguous operation, or performs more than one operation, + // this value may be omitted. + AttributeDBOperation = "db.operation" + // The index of the database being accessed as used in the SELECT command, + // provided as an integer. To be used instead of the generic db.name attribute. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1, 15 + AttributeDBRedisDBIndex = "db.redis.database_index" + // The name of the primary table that the operation is acting upon, including the + // database name (if applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of db.statement + // just to get this property, but it should be set if it is provided by the + // library being instrumented. If the operation is acting upon an anonymous table, + // or more than one table, this value MUST NOT be set. + AttributeDBSQLTable = "db.sql.table" + // The database statement being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + AttributeDBStatement = "db.statement" + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBSystem = "db.system" + // Username for accessing the database. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'readonly_user', 'reporting_user' + AttributeDBUser = "db.user" +) + +const ( + // all + AttributeDBCassandraConsistencyLevelAll = "all" + // each_quorum + AttributeDBCassandraConsistencyLevelEachQuorum = "each_quorum" + // quorum + AttributeDBCassandraConsistencyLevelQuorum = "quorum" + // local_quorum + AttributeDBCassandraConsistencyLevelLocalQuorum = "local_quorum" + // one + AttributeDBCassandraConsistencyLevelOne = "one" + // two + AttributeDBCassandraConsistencyLevelTwo = "two" + // three + AttributeDBCassandraConsistencyLevelThree = "three" + // local_one + AttributeDBCassandraConsistencyLevelLocalOne = "local_one" + // any + AttributeDBCassandraConsistencyLevelAny = "any" + // serial + AttributeDBCassandraConsistencyLevelSerial = "serial" + // local_serial + AttributeDBCassandraConsistencyLevelLocalSerial = "local_serial" +) + +const ( + // Gateway (HTTP) connections mode + AttributeDBCosmosDBConnectionModeGateway = "gateway" + // Direct connection + AttributeDBCosmosDBConnectionModeDirect = "direct" +) + +const ( + // invalid + AttributeDBCosmosDBOperationTypeInvalid = "Invalid" + // create + AttributeDBCosmosDBOperationTypeCreate = "Create" + // patch + AttributeDBCosmosDBOperationTypePatch = "Patch" + // read + AttributeDBCosmosDBOperationTypeRead = "Read" + // read_feed + AttributeDBCosmosDBOperationTypeReadFeed = "ReadFeed" + // delete + AttributeDBCosmosDBOperationTypeDelete = "Delete" + // replace + AttributeDBCosmosDBOperationTypeReplace = "Replace" + // execute + AttributeDBCosmosDBOperationTypeExecute = "Execute" + // query + AttributeDBCosmosDBOperationTypeQuery = "Query" + // head + AttributeDBCosmosDBOperationTypeHead = "Head" + // head_feed + AttributeDBCosmosDBOperationTypeHeadFeed = "HeadFeed" + // upsert + AttributeDBCosmosDBOperationTypeUpsert = "Upsert" + // batch + AttributeDBCosmosDBOperationTypeBatch = "Batch" + // query_plan + AttributeDBCosmosDBOperationTypeQueryPlan = "QueryPlan" + // execute_javascript + AttributeDBCosmosDBOperationTypeExecuteJavascript = "ExecuteJavaScript" +) + +const ( + // Some other SQL database. Fallback only. See notes + AttributeDBSystemOtherSQL = "other_sql" + // Microsoft SQL Server + AttributeDBSystemMSSQL = "mssql" + // Microsoft SQL Server Compact + AttributeDBSystemMssqlcompact = "mssqlcompact" + // MySQL + AttributeDBSystemMySQL = "mysql" + // Oracle Database + AttributeDBSystemOracle = "oracle" + // IBM DB2 + AttributeDBSystemDB2 = "db2" + // PostgreSQL + AttributeDBSystemPostgreSQL = "postgresql" + // Amazon Redshift + AttributeDBSystemRedshift = "redshift" + // Apache Hive + AttributeDBSystemHive = "hive" + // Cloudscape + AttributeDBSystemCloudscape = "cloudscape" + // HyperSQL DataBase + AttributeDBSystemHSQLDB = "hsqldb" + // Progress Database + AttributeDBSystemProgress = "progress" + // SAP MaxDB + AttributeDBSystemMaxDB = "maxdb" + // SAP HANA + AttributeDBSystemHanaDB = "hanadb" + // Ingres + AttributeDBSystemIngres = "ingres" + // FirstSQL + AttributeDBSystemFirstSQL = "firstsql" + // EnterpriseDB + AttributeDBSystemEDB = "edb" + // InterSystems Caché + AttributeDBSystemCache = "cache" + // Adabas (Adaptable Database System) + AttributeDBSystemAdabas = "adabas" + // Firebird + AttributeDBSystemFirebird = "firebird" + // Apache Derby + AttributeDBSystemDerby = "derby" + // FileMaker + AttributeDBSystemFilemaker = "filemaker" + // Informix + AttributeDBSystemInformix = "informix" + // InstantDB + AttributeDBSystemInstantDB = "instantdb" + // InterBase + AttributeDBSystemInterbase = "interbase" + // MariaDB + AttributeDBSystemMariaDB = "mariadb" + // Netezza + AttributeDBSystemNetezza = "netezza" + // Pervasive PSQL + AttributeDBSystemPervasive = "pervasive" + // PointBase + AttributeDBSystemPointbase = "pointbase" + // SQLite + AttributeDBSystemSqlite = "sqlite" + // Sybase + AttributeDBSystemSybase = "sybase" + // Teradata + AttributeDBSystemTeradata = "teradata" + // Vertica + AttributeDBSystemVertica = "vertica" + // H2 + AttributeDBSystemH2 = "h2" + // ColdFusion IMQ + AttributeDBSystemColdfusion = "coldfusion" + // Apache Cassandra + AttributeDBSystemCassandra = "cassandra" + // Apache HBase + AttributeDBSystemHBase = "hbase" + // MongoDB + AttributeDBSystemMongoDB = "mongodb" + // Redis + AttributeDBSystemRedis = "redis" + // Couchbase + AttributeDBSystemCouchbase = "couchbase" + // CouchDB + AttributeDBSystemCouchDB = "couchdb" + // Microsoft Azure Cosmos DB + AttributeDBSystemCosmosDB = "cosmosdb" + // Amazon DynamoDB + AttributeDBSystemDynamoDB = "dynamodb" + // Neo4j + AttributeDBSystemNeo4j = "neo4j" + // Apache Geode + AttributeDBSystemGeode = "geode" + // Elasticsearch + AttributeDBSystemElasticsearch = "elasticsearch" + // Memcached + AttributeDBSystemMemcached = "memcached" + // CockroachDB + AttributeDBSystemCockroachdb = "cockroachdb" + // OpenSearch + AttributeDBSystemOpensearch = "opensearch" + // ClickHouse + AttributeDBSystemClickhouse = "clickhouse" + // Cloud Spanner + AttributeDBSystemSpanner = "spanner" + // Trino + AttributeDBSystemTrino = "trino" +) + +// Attributes for software deployments. +const ( + // Name of the deployment environment (aka deployment tier). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: deployment.environment does not affect the uniqueness constraints defined + // through + // the service.namespace, service.name and service.instance.id resource + // attributes. + // This implies that resources carrying the following attribute combinations MUST + // be + // considered to be identifying the same service:
    + //
  • service.name=frontend, deployment.environment=production
  • + //
  • service.name=frontend, deployment.environment=staging.
  • + //
+ AttributeDeploymentEnvironment = "deployment.environment" +) + +// "Describes deprecated db attributes." +const ( + // Deprecated, use server.address, server.port attributes instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: "Replaced by `server.address` and `server.port`." + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + AttributeDBConnectionString = "db.connection_string" + // Deprecated, use db.instance.id instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `db.instance.id`. + // Examples: 'instance-0000000001' + AttributeDBElasticsearchNodeName = "db.elasticsearch.node.name" + // Removed, no replacement at this time. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Removed as not used. + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + AttributeDBJDBCDriverClassname = "db.jdbc.driver_classname" +) + +// Describes deprecated HTTP attributes. +const ( + // Deprecated, use network.protocol.name instead. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.protocol.name`. + AttributeHTTPFlavor = "http.flavor" + // Deprecated, use http.request.method instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `http.request.method`. + // Examples: 'GET', 'POST', 'HEAD' + AttributeHTTPMethod = "http.method" + // Deprecated, use http.request.header.content-length instead. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `http.request.header.content-length`. + // Examples: 3495 + AttributeHTTPRequestContentLength = "http.request_content_length" + // Deprecated, use http.response.header.content-length instead. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `http.response.header.content-length`. + // Examples: 3495 + AttributeHTTPResponseContentLength = "http.response_content_length" + // Deprecated, use url.scheme instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `url.scheme` instead. + // Examples: 'http', 'https' + AttributeHTTPScheme = "http.scheme" + // Deprecated, use http.response.status_code instead. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `http.response.status_code`. + // Examples: 200 + AttributeHTTPStatusCode = "http.status_code" + // Deprecated, use url.path and url.query instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Split to `url.path` and `url.query. + // Examples: '/search?q=OpenTelemetry#SemConv' + AttributeHTTPTarget = "http.target" + // Deprecated, use url.full instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `url.full`. + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + AttributeHTTPURL = "http.url" + // Deprecated, use user_agent.original instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `user_agent.original`. + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU iPhone + // OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + AttributeHTTPUserAgent = "http.user_agent" +) + +const ( + // HTTP/1.0 + AttributeHTTPFlavorHTTP10 = "1.0" + // HTTP/1.1 + AttributeHTTPFlavorHTTP11 = "1.1" + // HTTP/2 + AttributeHTTPFlavorHTTP20 = "2.0" + // HTTP/3 + AttributeHTTPFlavorHTTP30 = "3.0" + // SPDY protocol + AttributeHTTPFlavorSPDY = "SPDY" + // QUIC protocol + AttributeHTTPFlavorQUIC = "QUIC" +) + +// Describes deprecated messaging attributes. +const ( + // "Deprecated, use messaging.destination.partition.id instead." + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `messaging.destination.partition.id`. + // Examples: 2 + AttributeMessagingKafkaDestinationPartition = "messaging.kafka.destination.partition" +) + +// These attributes may be used for any network related operation. +const ( + // Deprecated, use server.address. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `server.address`. + // Examples: 'example.com' + AttributeNetHostName = "net.host.name" + // Deprecated, use server.port. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `server.port`. + // Examples: 8080 + AttributeNetHostPort = "net.host.port" + // Deprecated, use server.address on client spans and client.address on server + // spans. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `server.address` on client spans and `client.address` + // on server spans. + // Examples: 'example.com' + AttributeNetPeerName = "net.peer.name" + // Deprecated, use server.port on client spans and client.port on server spans. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `server.port` on client spans and `client.port` on + // server spans. + // Examples: 8080 + AttributeNetPeerPort = "net.peer.port" + // Deprecated, use network.protocol.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.protocol.name`. + // Examples: 'amqp', 'http', 'mqtt' + AttributeNetProtocolName = "net.protocol.name" + // Deprecated, use network.protocol.version. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.protocol.version`. + // Examples: '3.1.1' + AttributeNetProtocolVersion = "net.protocol.version" + // Deprecated, use network.transport and network.type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Split to `network.transport` and `network.type`. + AttributeNetSockFamily = "net.sock.family" + // Deprecated, use network.local.address. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.local.address`. + // Examples: '/var/my.sock' + AttributeNetSockHostAddr = "net.sock.host.addr" + // Deprecated, use network.local.port. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.local.port`. + // Examples: 8080 + AttributeNetSockHostPort = "net.sock.host.port" + // Deprecated, use network.peer.address. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.peer.address`. + // Examples: '192.168.0.1' + AttributeNetSockPeerAddr = "net.sock.peer.addr" + // Deprecated, no replacement at this time. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Removed. + // Examples: '/var/my.sock' + AttributeNetSockPeerName = "net.sock.peer.name" + // Deprecated, use network.peer.port. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.peer.port`. + // Examples: 65531 + AttributeNetSockPeerPort = "net.sock.peer.port" + // Deprecated, use network.transport. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.transport`. + AttributeNetTransport = "net.transport" +) + +const ( + // IPv4 address + AttributeNetSockFamilyInet = "inet" + // IPv6 address + AttributeNetSockFamilyInet6 = "inet6" + // Unix domain socket path + AttributeNetSockFamilyUnix = "unix" +) + +const ( + // ip_tcp + AttributeNetTransportTCP = "ip_tcp" + // ip_udp + AttributeNetTransportUDP = "ip_udp" + // Named or anonymous pipe + AttributeNetTransportPipe = "pipe" + // In-process communication + AttributeNetTransportInProc = "inproc" + // Something else (non IP-based) + AttributeNetTransportOther = "other" +) + +// Deprecated system attributes. +const ( + // Deprecated, use system.process.status instead. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `system.process.status`. + // Examples: 'running' + AttributeSystemProcessesStatus = "system.processes.status" +) + +const ( + // running + AttributeSystemProcessesStatusRunning = "running" + // sleeping + AttributeSystemProcessesStatusSleeping = "sleeping" + // stopped + AttributeSystemProcessesStatusStopped = "stopped" + // defunct + AttributeSystemProcessesStatusDefunct = "defunct" +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // Destination address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through an + // intermediary, destination.address SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + AttributeDestinationAddress = "destination.address" + // Destination port number + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3389, 2888 + AttributeDestinationPort = "destination.port" +) + +// Describes device attributes. +const ( + // A unique identifier representing the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the vendor + // identifier. On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found here on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + AttributeDeviceID = "device.id" + // The name of the device manufacturer + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via Build. iOS apps SHOULD hardcode + // the value Apple. + AttributeDeviceManufacturer = "device.manufacturer" + // The model identifier for the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + AttributeDeviceModelIdentifier = "device.model.identifier" + // The marketing name for the device model + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + AttributeDeviceModelName = "device.model.name" +) + +// These attributes may be used for any disk related operation. +const ( + // The disk IO operation direction. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'read' + AttributeDiskIoDirection = "disk.io.direction" +) + +const ( + // read + AttributeDiskIoDirectionRead = "read" + // write + AttributeDiskIoDirectionWrite = "write" +) + +// The shared attributes used to report a DNS query. +const ( + // The name being queried. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, and + // line feeds should be converted to \t, \r, and \n respectively. + AttributeDNSQuestionName = "dns.question.name" +) + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or Authorization header + // in the inbound request from outside the system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'username' + AttributeEnduserID = "enduser.id" + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'admin' + AttributeEnduserRole = "enduser.role" + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an OAuth 2.0 Access Token or an attribute value in a SAML 2.0 + // Assertion. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + AttributeEnduserScope = "enduser.scope" +) + +// The shared attributes used to report an error. +const ( + // Describes a class of error the operation ended with. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The error.type SHOULD be predictable and SHOULD have low cardinality. + // Instrumentations SHOULD document the list of errors they report.The cardinality + // of error.type within one instrumentation library SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation libraries + // and applications + // should be prepared for error.type to have high cardinality at query time when + // no + // additional filters are applied.If the operation has completed successfully, + // instrumentations SHOULD NOT set error.type.If a specific domain defines its own + // set of error identifiers (such as HTTP or gRPC status codes), + // it's RECOMMENDED to:
    + //
  • Use a domain-specific attribute
  • + //
  • Set error.type to capture all errors, regardless of whether they are + // defined within the domain-specific set or not.
  • + //
+ AttributeErrorType = "error.type" +) + +const ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + AttributeErrorTypeOther = "_OTHER" +) + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's __exit__ method in Python) but will + // usually be caught at the point of recording the exception in most languages.It + // is usually not possible to determine at the point where an exception is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the example for recording span exceptions.It follows that an + // exception may still escape the scope of the span + // even if the exception.escaped attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + AttributeExceptionEscaped = "exception.escaped" + // The exception message. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + AttributeExceptionMessage = "exception.message" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeExceptionStacktrace = "exception.stacktrace" + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + AttributeExceptionType = "exception.type" +) + +// FaaS attributes +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSColdstart = "faas.coldstart" + // A string containing the schedule period as Cron Expression. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + AttributeFaaSCron = "faas.cron" + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + AttributeFaaSDocumentCollection = "faas.document.collection" + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + AttributeFaaSDocumentName = "faas.document.name" + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSDocumentOperation = "faas.document.operation" + // A string containing the time when the data was accessed in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSDocumentTime = "faas.document.time" + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note:
    + //
  • AWS Lambda: Use the (full) log stream name.
  • + //
+ AttributeFaaSInstance = "faas.instance" + // The invocation ID of the current function invocation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + AttributeFaaSInvocationID = "faas.invocation_id" + // The name of the invoked function. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the faas.name resource attribute of the invoked + // function. + AttributeFaaSInvokedName = "faas.invoked_name" + // The cloud provider of the invoked function. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: SHOULD be equal to the cloud.provider resource attribute of the invoked + // function. + AttributeFaaSInvokedProvider = "faas.invoked_provider" + // The cloud region of the invoked function. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the cloud.region resource attribute of the invoked + // function. + AttributeFaaSInvokedRegion = "faas.invoked_region" + // The amount of memory available to the serverless function converted to Bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable AWS_LAMBDA_FUNCTION_MEMORY_SIZE provides this + // information (which must be multiplied by 1,048,576). + AttributeFaaSMaxMemory = "faas.max_memory" + // The name of the single function that this runtime instance executes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // code.namespace/code.function + // span attributes).For some cloud providers, the above definition is ambiguous. + // The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products:
    + //
  • Azure: The full name /, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the cloud.resource_id attribute).
  • + //
+ AttributeFaaSName = "faas.name" + // A string containing the function invocation time in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSTime = "faas.time" + // Type of the trigger which caused this function invocation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSTrigger = "faas.trigger" + // The immutable version of the function being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use:
    + //
  • AWS Lambda: The function version + // (an integer represented as a decimal string).
  • + //
  • Google Cloud Run (Services): The revision + // (i.e., the function name plus the revision suffix).
  • + //
  • Google Cloud Functions: The value of the + // K_REVISION environment variable.
  • + //
  • Azure Functions: Not applicable. Do not set this attribute.
  • + //
+ AttributeFaaSVersion = "faas.version" +) + +const ( + // When a new object is created + AttributeFaaSDocumentOperationInsert = "insert" + // When an object is modified + AttributeFaaSDocumentOperationEdit = "edit" + // When an object is deleted + AttributeFaaSDocumentOperationDelete = "delete" +) + +const ( + // Alibaba Cloud + AttributeFaaSInvokedProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeFaaSInvokedProviderAWS = "aws" + // Microsoft Azure + AttributeFaaSInvokedProviderAzure = "azure" + // Google Cloud Platform + AttributeFaaSInvokedProviderGCP = "gcp" + // Tencent Cloud + AttributeFaaSInvokedProviderTencentCloud = "tencent_cloud" +) + +const ( + // A response to some data source operation such as a database or filesystem read/write + AttributeFaaSTriggerDatasource = "datasource" + // To provide an answer to an inbound HTTP request + AttributeFaaSTriggerHTTP = "http" + // A function is set to be executed when messages are sent to a messaging system + AttributeFaaSTriggerPubsub = "pubsub" + // A function is scheduled to be executed regularly + AttributeFaaSTriggerTimer = "timer" + // If none of the others apply + AttributeFaaSTriggerOther = "other" +) + +// Attributes for Feature Flags. +const ( + // The unique identifier of the feature flag. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logo-color' + AttributeFeatureFlagKey = "feature_flag.key" + // The name of the service provider that performs the flag evaluation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Flag Manager' + AttributeFeatureFlagProviderName = "feature_flag.provider_name" + // SHOULD be a semantic identifier for a value. If one is unavailable, a + // stringified version of the value can be used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant red maybe be used for the value #c05543.A stringified + // version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + AttributeFeatureFlagVariant = "feature_flag.variant" +) + +// Describes file attributes. +const ( + // Directory where the file is located. It should include the drive letter, when + // appropriate. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + AttributeFileDirectory = "file.directory" + // File extension, excluding the leading dot. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + AttributeFileExtension = "file.extension" + // Name of the file including the extension, without the directory. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example.png' + AttributeFileName = "file.name" + // Full path to the file, including the file name. It should include the drive + // letter, when appropriate. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program Files\\MyApp\\myapp.exe' + AttributeFilePath = "file.path" + // File size in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeFileSize = "file.size" +) + +// Attributes for Google Cloud Run. +const ( + // The name of the Cloud Run execution being run for the Job, as set by the + // CLOUD_RUN_EXECUTION environment variable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + AttributeGCPCloudRunJobExecution = "gcp.cloud_run.job.execution" + // The index for a task within an execution as provided by the + // CLOUD_RUN_TASK_INDEX environment variable. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1 + AttributeGCPCloudRunJobTaskIndex = "gcp.cloud_run.job.task_index" +) + +// Attributes for Google Compute Engine (GCE). +const ( + // The hostname of a GCE instance. This is the full value of the default or custom + // hostname. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', 'sample-vm.us-west1-b.c.my- + // project.internal' + AttributeGCPGceInstanceHostname = "gcp.gce.instance.hostname" + // The instance name of a GCE instance. This is the value provided by host.name, + // the visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the default internal DNS + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + AttributeGCPGceInstanceName = "gcp.gce.instance.name" +) + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // The CPU architecture the host system is running on. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeHostArch = "host.arch" + // The amount of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 12288000 + AttributeHostCPUCacheL2Size = "host.cpu.cache.l2.size" + // Family or generation of the CPU. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + AttributeHostCPUFamily = "host.cpu.family" + // Model identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + AttributeHostCPUModelID = "host.cpu.model.id" + // Model designation of the processor. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + AttributeHostCPUModelName = "host.cpu.model.name" + // Stepping or core revisions. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + AttributeHostCPUStepping = "host.cpu.stepping" + // Processor manufacturer identifier. A maximum 12-character string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: CPUID command returns the vendor ID string in EBX, EDX and ECX registers. + // Writing these to memory in this order results in a 12-character string. + AttributeHostCPUVendorID = "host.cpu.vendor.id" + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. For non-containerized systems, this should be the machine-id. See the + // table below for the sources to use to determine the machine-id based on + // operating system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + AttributeHostID = "host.id" + // VM image ID or host OS image ID. For Cloud, this value is from the provider. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + AttributeHostImageID = "host.image.id" + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + AttributeHostImageName = "host.image.name" + // The version string of the VM image or host OS as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0.1' + AttributeHostImageVersion = "host.image.version" + // Available IP addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 addresses + // MUST be specified in the RFC 5952 format. + AttributeHostIP = "host.ip" + // Available MAC addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in IEEE RA hexadecimal form: as hyphen- + // separated octets in uppercase hexadecimal form from most to least significant. + AttributeHostMac = "host.mac" + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + AttributeHostName = "host.name" + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + AttributeHostType = "host.type" +) + +const ( + // AMD64 + AttributeHostArchAMD64 = "amd64" + // ARM32 + AttributeHostArchARM32 = "arm32" + // ARM64 + AttributeHostArchARM64 = "arm64" + // Itanium + AttributeHostArchIA64 = "ia64" + // 32-bit PowerPC + AttributeHostArchPPC32 = "ppc32" + // 64-bit PowerPC + AttributeHostArchPPC64 = "ppc64" + // IBM z/Architecture + AttributeHostArchS390x = "s390x" + // 32-bit x86 + AttributeHostArchX86 = "x86" +) + +// Semantic convention attributes in the HTTP namespace. +const ( + // State of the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'active', 'idle' + AttributeHTTPConnectionState = "http.connection.state" + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3495 + AttributeHTTPRequestBodySize = "http.request.body.size" + // HTTP request method. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones + // listed in RFC9110 + // and the PATCH method defined in RFC5789.If the HTTP request method is not known + // to instrumentation, it MUST set the http.request.method attribute to _OTHER.If + // the HTTP instrumentation could end up converting valid HTTP request methods to + // _OTHER, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults).HTTP method names are case- + // sensitive and http.request.method attribute value MUST match a known HTTP + // method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set http.request.method_original + // to the original value. + AttributeHTTPRequestMethod = "http.request.method" + // Original HTTP method sent by the client in the request line. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + AttributeHTTPRequestMethodOriginal = "http.request.method_original" + // The ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets resent + // by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, or + // any other). + AttributeHTTPRequestResendCount = "http.request.resend_count" + // The total size of the request in bytes. This should be the total number of + // bytes sent over the wire, including the request line (HTTP/1.1), framing + // (HTTP/2 and HTTP/3), headers, and request body if any. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1437 + AttributeHTTPRequestSize = "http.request.size" + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3495 + AttributeHTTPResponseBodySize = "http.response.body.size" + // The total size of the response in bytes. This should be the total number of + // bytes sent over the wire, including the status line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1437 + AttributeHTTPResponseSize = "http.response.size" + // HTTP response status code. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 200 + AttributeHTTPResponseStatusCode = "http.response.status_code" + // The matched route, that is, the path template in the format used by the + // respective server framework. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the application root if there is one. + AttributeHTTPRoute = "http.route" +) + +const ( + // active state + AttributeHTTPConnectionStateActive = "active" + // idle state + AttributeHTTPConnectionStateIdle = "idle" +) + +const ( + // CONNECT method + AttributeHTTPRequestMethodConnect = "CONNECT" + // DELETE method + AttributeHTTPRequestMethodDelete = "DELETE" + // GET method + AttributeHTTPRequestMethodGet = "GET" + // HEAD method + AttributeHTTPRequestMethodHead = "HEAD" + // OPTIONS method + AttributeHTTPRequestMethodOptions = "OPTIONS" + // PATCH method + AttributeHTTPRequestMethodPatch = "PATCH" + // POST method + AttributeHTTPRequestMethodPost = "POST" + // PUT method + AttributeHTTPRequestMethodPut = "PUT" + // TRACE method + AttributeHTTPRequestMethodTrace = "TRACE" + // Any HTTP method that the instrumentation has no prior knowledge of + AttributeHTTPRequestMethodOther = "_OTHER" +) + +// Kubernetes resource attributes. +const ( + // The name of the cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + AttributeK8SClusterName = "k8s.cluster.name" + // A pseudo-ID for the cluster, set to the UID of the kube-system namespace. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the k8s.cluster.uid through the + // official APIs. In the meantime, we are able to use the uid of the + // kube-system namespace as a proxy for cluster ID. Read on for the + // rationale.Every object created in a K8S cluster is assigned a distinct UID. The + // kube-system namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the uid of the kube-system + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // ISO/IEC 9834-8 and ITU-T X.667. + // Which states:
+ // If generated according to one of the mechanisms defined in Rec.
+ // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism + // chosen).Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + AttributeK8SClusterUID = "k8s.cluster.uid" + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name (container.name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'redis' + AttributeK8SContainerName = "k8s.container.name" + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 2 + AttributeK8SContainerRestartCount = "k8s.container.restart_count" + // The name of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SCronJobName = "k8s.cronjob.name" + // The UID of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SCronJobUID = "k8s.cronjob.uid" + // The name of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDaemonSetName = "k8s.daemonset.name" + // The UID of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDaemonSetUID = "k8s.daemonset.uid" + // The name of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDeploymentName = "k8s.deployment.name" + // The UID of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDeploymentUID = "k8s.deployment.uid" + // The name of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SJobName = "k8s.job.name" + // The UID of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SJobUID = "k8s.job.uid" + // The name of the namespace that the pod is running in. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'default' + AttributeK8SNamespaceName = "k8s.namespace.name" + // The name of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'node-1' + AttributeK8SNodeName = "k8s.node.name" + // The UID of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + AttributeK8SNodeUID = "k8s.node.uid" + // The name of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + AttributeK8SPodName = "k8s.pod.name" + // The UID of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SPodUID = "k8s.pod.uid" + // The name of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SReplicaSetName = "k8s.replicaset.name" + // The UID of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SReplicaSetUID = "k8s.replicaset.uid" + // The name of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SStatefulSetName = "k8s.statefulset.name" + // The UID of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SStatefulSetUID = "k8s.statefulset.uid" +) + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // The number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set messaging.batch.message_count on spans + // that operate with a single message. When a messaging client library supports + // both batch and single-message API for the same operation, instrumentations + // SHOULD use messaging.batch.message_count for batching APIs and SHOULD NOT use + // it for single-message APIs. + AttributeMessagingBatchMessageCount = "messaging.batch.message_count" + // A unique identifier for the client that consumes or produces a message. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + AttributeMessagingClientID = "messaging.client_id" + // A boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationAnonymous = "messaging.destination.anonymous" + // The message destination name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + AttributeMessagingDestinationName = "messaging.destination.name" + // The identifier of the partition messages are sent to or received from, unique + // within the messaging.destination.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1' + AttributeMessagingDestinationPartitionID = "messaging.destination.partition.id" + // Low cardinality representation of the messaging destination name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and aggregation. + AttributeMessagingDestinationTemplate = "messaging.destination.template" + // A boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationTemporary = "messaging.destination.temporary" + // A boolean that is true if the publish message destination is anonymous (could + // be unnamed or have auto-generated name). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationPublishAnonymous = "messaging.destination_publish.anonymous" + // The name of the original destination the message was published to + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or other + // entity within the broker. If + // the broker doesn't have such notion, the original destination name SHOULD + // uniquely identify the broker. + AttributeMessagingDestinationPublishName = "messaging.destination_publish.name" + // The name of the consumer group the event consumer is associated with. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'indexer' + AttributeMessagingEventhubsConsumerGroup = "messaging.eventhubs.consumer.group" + // The UTC epoch seconds at which the message has been accepted and stored in the + // entity. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1701393730 + AttributeMessagingEventhubsMessageEnqueuedTime = "messaging.eventhubs.message.enqueued_time" + // The ordering key for a given message. If the attribute is not present, the + // message does not have an ordering key. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ordering_key' + AttributeMessagingGCPPubsubMessageOrderingKey = "messaging.gcp_pubsub.message.ordering_key" + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-group' + AttributeMessagingKafkaConsumerGroup = "messaging.kafka.consumer.group" + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from messaging.message.id in that + // they're not unique. If the key is null, the attribute MUST NOT be set. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + AttributeMessagingKafkaMessageKey = "messaging.kafka.message.key" + // The offset of a record in the corresponding Kafka partition. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeMessagingKafkaMessageOffset = "messaging.kafka.message.offset" + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingKafkaMessageTombstone = "messaging.kafka.message.tombstone" + // The size of the message body in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. If both + // sizes are known, the uncompressed + // body size should be used. + AttributeMessagingMessageBodySize = "messaging.message.body.size" + // The conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyConversationID' + AttributeMessagingMessageConversationID = "messaging.message.conversation_id" + // The size of the message body and metadata in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If both sizes + // are known, the uncompressed + // size should be used. + AttributeMessagingMessageEnvelopeSize = "messaging.message.envelope.size" + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + AttributeMessagingMessageID = "messaging.message.id" + // A string identifying the kind of messaging operation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + AttributeMessagingOperation = "messaging.operation" + // RabbitMQ message routing key. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myKey' + AttributeMessagingRabbitmqDestinationRoutingKey = "messaging.rabbitmq.destination.routing_key" + // RabbitMQ message delivery tag + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 123 + AttributeMessagingRabbitmqMessageDeliveryTag = "messaging.rabbitmq.message.delivery_tag" + // Name of the RocketMQ producer/consumer group that is handling the message. The + // client type is identified by the SpanKind. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + AttributeMessagingRocketmqClientGroup = "messaging.rocketmq.client_group" + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingRocketmqConsumptionModel = "messaging.rocketmq.consumption_model" + // The delay time level for delay message, which determines the message delay + // time. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3 + AttributeMessagingRocketmqMessageDelayTimeLevel = "messaging.rocketmq.message.delay_time_level" + // The timestamp in milliseconds that the delay message is expected to be + // delivered to consumer. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1665987217045 + AttributeMessagingRocketmqMessageDeliveryTimestamp = "messaging.rocketmq.message.delivery_timestamp" + // It is essential for FIFO message. Messages that belong to the same message + // group are always processed one by one within the same consumer group. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + AttributeMessagingRocketmqMessageGroup = "messaging.rocketmq.message.group" + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + AttributeMessagingRocketmqMessageKeys = "messaging.rocketmq.message.keys" + // The secondary classifier of message besides topic. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'tagA' + AttributeMessagingRocketmqMessageTag = "messaging.rocketmq.message.tag" + // Type of message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingRocketmqMessageType = "messaging.rocketmq.message.type" + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myNamespace' + AttributeMessagingRocketmqNamespace = "messaging.rocketmq.namespace" + // The name of the subscription in the topic messages are received from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mySubscription' + AttributeMessagingServicebusDestinationSubscriptionName = "messaging.servicebus.destination.subscription_name" + // Describes the settlement type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingServicebusDispositionStatus = "messaging.servicebus.disposition_status" + // Number of deliveries that have been attempted for this message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2 + AttributeMessagingServicebusMessageDeliveryCount = "messaging.servicebus.message.delivery_count" + // The UTC epoch seconds at which the message has been accepted and stored in the + // entity. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1701393730 + AttributeMessagingServicebusMessageEnqueuedTime = "messaging.servicebus.message.enqueued_time" + // An identifier for the messaging system being used. See below for a list of + // well-known identifiers. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingSystem = "messaging.system" +) + +const ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + AttributeMessagingOperationPublish = "publish" + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + AttributeMessagingOperationCreate = "create" + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + AttributeMessagingOperationReceive = "receive" + // One or more messages are delivered to or processed by a consumer + AttributeMessagingOperationDeliver = "process" + // One or more messages are settled + AttributeMessagingOperationSettle = "settle" +) + +const ( + // Clustering consumption model + AttributeMessagingRocketmqConsumptionModelClustering = "clustering" + // Broadcasting consumption model + AttributeMessagingRocketmqConsumptionModelBroadcasting = "broadcasting" +) + +const ( + // Normal message + AttributeMessagingRocketmqMessageTypeNormal = "normal" + // FIFO message + AttributeMessagingRocketmqMessageTypeFifo = "fifo" + // Delay message + AttributeMessagingRocketmqMessageTypeDelay = "delay" + // Transaction message + AttributeMessagingRocketmqMessageTypeTransaction = "transaction" +) + +const ( + // Message is completed + AttributeMessagingServicebusDispositionStatusComplete = "complete" + // Message is abandoned + AttributeMessagingServicebusDispositionStatusAbandon = "abandon" + // Message is sent to dead letter queue + AttributeMessagingServicebusDispositionStatusDeadLetter = "dead_letter" + // Message is deferred + AttributeMessagingServicebusDispositionStatusDefer = "defer" +) + +const ( + // Apache ActiveMQ + AttributeMessagingSystemActivemq = "activemq" + // Amazon Simple Queue Service (SQS) + AttributeMessagingSystemAWSSqs = "aws_sqs" + // Azure Event Grid + AttributeMessagingSystemEventgrid = "eventgrid" + // Azure Event Hubs + AttributeMessagingSystemEventhubs = "eventhubs" + // Azure Service Bus + AttributeMessagingSystemServicebus = "servicebus" + // Google Cloud Pub/Sub + AttributeMessagingSystemGCPPubsub = "gcp_pubsub" + // Java Message Service + AttributeMessagingSystemJms = "jms" + // Apache Kafka + AttributeMessagingSystemKafka = "kafka" + // RabbitMQ + AttributeMessagingSystemRabbitmq = "rabbitmq" + // Apache RocketMQ + AttributeMessagingSystemRocketmq = "rocketmq" +) + +// These attributes may be used for any network related operation. +const ( + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'DE' + AttributeNetworkCarrierIcc = "network.carrier.icc" + // The mobile carrier country code. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '310' + AttributeNetworkCarrierMcc = "network.carrier.mcc" + // The mobile carrier network code. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '001' + AttributeNetworkCarrierMnc = "network.carrier.mnc" + // The name of the mobile carrier. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'sprint' + AttributeNetworkCarrierName = "network.carrier.name" + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'LTE' + AttributeNetworkConnectionSubtype = "network.connection.subtype" + // The internet connection type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'wifi' + AttributeNetworkConnectionType = "network.connection.type" + // The network IO operation direction. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'transmit' + AttributeNetworkIoDirection = "network.io.direction" + // Local address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + AttributeNetworkLocalAddress = "network.local.address" + // Local port number of the network connection. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + AttributeNetworkLocalPort = "network.local.port" + // Peer address of the network connection - IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + AttributeNetworkPeerAddress = "network.peer.address" + // Peer port number of the network connection. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + AttributeNetworkPeerPort = "network.peer.port" + // OSI application layer or non-OSI equivalent. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + AttributeNetworkProtocolName = "network.protocol.name" + // The actual version of the protocol used for network communication. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using ALPN), + // this attribute SHOULD be set to the negotiated version. If the actual protocol + // version is not known, this attribute SHOULD NOT be set. + AttributeNetworkProtocolVersion = "network.protocol.version" + // OSI transport layer or inter-process communication method. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase.Consider always setting the + // transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + AttributeNetworkTransport = "network.transport" + // OSI network layer or non-OSI equivalent. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + AttributeNetworkType = "network.type" +) + +const ( + // GPRS + AttributeNetworkConnectionSubtypeGprs = "gprs" + // EDGE + AttributeNetworkConnectionSubtypeEdge = "edge" + // UMTS + AttributeNetworkConnectionSubtypeUmts = "umts" + // CDMA + AttributeNetworkConnectionSubtypeCdma = "cdma" + // EVDO Rel. 0 + AttributeNetworkConnectionSubtypeEvdo0 = "evdo_0" + // EVDO Rev. A + AttributeNetworkConnectionSubtypeEvdoA = "evdo_a" + // CDMA2000 1XRTT + AttributeNetworkConnectionSubtypeCdma20001xrtt = "cdma2000_1xrtt" + // HSDPA + AttributeNetworkConnectionSubtypeHsdpa = "hsdpa" + // HSUPA + AttributeNetworkConnectionSubtypeHsupa = "hsupa" + // HSPA + AttributeNetworkConnectionSubtypeHspa = "hspa" + // IDEN + AttributeNetworkConnectionSubtypeIden = "iden" + // EVDO Rev. B + AttributeNetworkConnectionSubtypeEvdoB = "evdo_b" + // LTE + AttributeNetworkConnectionSubtypeLte = "lte" + // EHRPD + AttributeNetworkConnectionSubtypeEhrpd = "ehrpd" + // HSPAP + AttributeNetworkConnectionSubtypeHspap = "hspap" + // GSM + AttributeNetworkConnectionSubtypeGsm = "gsm" + // TD-SCDMA + AttributeNetworkConnectionSubtypeTdScdma = "td_scdma" + // IWLAN + AttributeNetworkConnectionSubtypeIwlan = "iwlan" + // 5G NR (New Radio) + AttributeNetworkConnectionSubtypeNr = "nr" + // 5G NRNSA (New Radio Non-Standalone) + AttributeNetworkConnectionSubtypeNrnsa = "nrnsa" + // LTE CA + AttributeNetworkConnectionSubtypeLteCa = "lte_ca" +) + +const ( + // wifi + AttributeNetworkConnectionTypeWifi = "wifi" + // wired + AttributeNetworkConnectionTypeWired = "wired" + // cell + AttributeNetworkConnectionTypeCell = "cell" + // unavailable + AttributeNetworkConnectionTypeUnavailable = "unavailable" + // unknown + AttributeNetworkConnectionTypeUnknown = "unknown" +) + +const ( + // transmit + AttributeNetworkIoDirectionTransmit = "transmit" + // receive + AttributeNetworkIoDirectionReceive = "receive" +) + +const ( + // TCP + AttributeNetworkTransportTCP = "tcp" + // UDP + AttributeNetworkTransportUDP = "udp" + // Named or anonymous pipe + AttributeNetworkTransportPipe = "pipe" + // Unix domain socket + AttributeNetworkTransportUnix = "unix" +) + +const ( + // IPv4 + AttributeNetworkTypeIpv4 = "ipv4" + // IPv6 + AttributeNetworkTypeIpv6 = "ipv6" +) + +// An OCI image manifest. +const ( + // The digest of the OCI image manifest. For container images specifically is the + // digest by which the container image is known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows OCI Image Manifest Specification, and specifically the Digest + // property. + // An example can be found in Example Image Manifest. + AttributeOciManifestDigest = "oci.manifest.digest" +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // Unique identifier for a particular build or compilation of the operating + // system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + AttributeOSBuildID = "os.build_id" + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by ver or lsb_release -a commands. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + AttributeOSDescription = "os.description" + // Human readable operating system name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + AttributeOSName = "os.name" + // The operating system type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeOSType = "os.type" + // The version string of the operating system as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + AttributeOSVersion = "os.version" +) + +const ( + // Microsoft Windows + AttributeOSTypeWindows = "windows" + // Linux + AttributeOSTypeLinux = "linux" + // Apple Darwin + AttributeOSTypeDarwin = "darwin" + // FreeBSD + AttributeOSTypeFreeBSD = "freebsd" + // NetBSD + AttributeOSTypeNetBSD = "netbsd" + // OpenBSD + AttributeOSTypeOpenBSD = "openbsd" + // DragonFly BSD + AttributeOSTypeDragonflyBSD = "dragonflybsd" + // HP-UX (Hewlett Packard Unix) + AttributeOSTypeHPUX = "hpux" + // AIX (Advanced Interactive eXecutive) + AttributeOSTypeAIX = "aix" + // SunOS, Oracle Solaris + AttributeOSTypeSolaris = "solaris" + // IBM z/OS + AttributeOSTypeZOS = "z_os" +) + +// An operating system process. +const ( + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + // be set to the first parameter extracted from GetCommandLineW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + AttributeProcessCommand = "process.command" + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from proc/[pid]/cmdline. For libc-based executables, this would be + // the full argv vector passed to main. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + AttributeProcessCommandArgs = "process.command_args" + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of GetCommandLineW. Do not + // set this if you have to assemble it just for monitoring; use + // process.command_args instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + AttributeProcessCommandLine = "process.command_line" + // The name of the process executable. On Linux based systems, can be set to the + // Name in proc/[pid]/status. On Windows, can be set to the base name of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcol' + AttributeProcessExecutableName = "process.executable.name" + // The full path to the process executable. On Linux based systems, can be set to + // the target of proc/[pid]/exe. On Windows, can be set to the result of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + AttributeProcessExecutablePath = "process.executable.path" + // The username of the user that owns the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'root' + AttributeProcessOwner = "process.owner" + // Parent Process identifier (PPID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 111 + AttributeProcessParentPID = "process.parent_pid" + // Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1234 + AttributeProcessPID = "process.pid" + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + AttributeProcessRuntimeDescription = "process.runtime.description" + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + AttributeProcessRuntimeName = "process.runtime.name" + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.0.2' + AttributeProcessRuntimeVersion = "process.runtime.version" +) + +// Attributes for remote procedure calls. +const ( + // The error codes of the Connect request. Error codes are always string values. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCConnectRPCErrorCode = "rpc.connect_rpc.error_code" + // The numeric status code of the gRPC request. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCGRPCStatusCode = "rpc.grpc.status_code" + // error.code property of response if it is an error response. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: -32700, 100 + AttributeRPCJsonrpcErrorCode = "rpc.jsonrpc.error_code" + // error.message property of response if it is an error response. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + AttributeRPCJsonrpcErrorMessage = "rpc.jsonrpc.error_message" + // id property of request or response. Since protocol allows id to be int, string, + // null or missing (for notifications), value is expected to be cast to string for + // simplicity. Use empty string in case of null value. Omit entirely if this is a + // notification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + AttributeRPCJsonrpcRequestID = "rpc.jsonrpc.request_id" + // Protocol version as in jsonrpc property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + AttributeRPCJsonrpcVersion = "rpc.jsonrpc.version" + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The code.function attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + AttributeRPCMethod = "rpc.method" + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The code.namespace attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + AttributeRPCService = "rpc.service" + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCSystem = "rpc.system" +) + +const ( + // cancelled + AttributeRPCConnectRPCErrorCodeCancelled = "cancelled" + // unknown + AttributeRPCConnectRPCErrorCodeUnknown = "unknown" + // invalid_argument + AttributeRPCConnectRPCErrorCodeInvalidArgument = "invalid_argument" + // deadline_exceeded + AttributeRPCConnectRPCErrorCodeDeadlineExceeded = "deadline_exceeded" + // not_found + AttributeRPCConnectRPCErrorCodeNotFound = "not_found" + // already_exists + AttributeRPCConnectRPCErrorCodeAlreadyExists = "already_exists" + // permission_denied + AttributeRPCConnectRPCErrorCodePermissionDenied = "permission_denied" + // resource_exhausted + AttributeRPCConnectRPCErrorCodeResourceExhausted = "resource_exhausted" + // failed_precondition + AttributeRPCConnectRPCErrorCodeFailedPrecondition = "failed_precondition" + // aborted + AttributeRPCConnectRPCErrorCodeAborted = "aborted" + // out_of_range + AttributeRPCConnectRPCErrorCodeOutOfRange = "out_of_range" + // unimplemented + AttributeRPCConnectRPCErrorCodeUnimplemented = "unimplemented" + // internal + AttributeRPCConnectRPCErrorCodeInternal = "internal" + // unavailable + AttributeRPCConnectRPCErrorCodeUnavailable = "unavailable" + // data_loss + AttributeRPCConnectRPCErrorCodeDataLoss = "data_loss" + // unauthenticated + AttributeRPCConnectRPCErrorCodeUnauthenticated = "unauthenticated" +) + +const ( + // OK + AttributeRPCGRPCStatusCodeOk = "0" + // CANCELLED + AttributeRPCGRPCStatusCodeCancelled = "1" + // UNKNOWN + AttributeRPCGRPCStatusCodeUnknown = "2" + // INVALID_ARGUMENT + AttributeRPCGRPCStatusCodeInvalidArgument = "3" + // DEADLINE_EXCEEDED + AttributeRPCGRPCStatusCodeDeadlineExceeded = "4" + // NOT_FOUND + AttributeRPCGRPCStatusCodeNotFound = "5" + // ALREADY_EXISTS + AttributeRPCGRPCStatusCodeAlreadyExists = "6" + // PERMISSION_DENIED + AttributeRPCGRPCStatusCodePermissionDenied = "7" + // RESOURCE_EXHAUSTED + AttributeRPCGRPCStatusCodeResourceExhausted = "8" + // FAILED_PRECONDITION + AttributeRPCGRPCStatusCodeFailedPrecondition = "9" + // ABORTED + AttributeRPCGRPCStatusCodeAborted = "10" + // OUT_OF_RANGE + AttributeRPCGRPCStatusCodeOutOfRange = "11" + // UNIMPLEMENTED + AttributeRPCGRPCStatusCodeUnimplemented = "12" + // INTERNAL + AttributeRPCGRPCStatusCodeInternal = "13" + // UNAVAILABLE + AttributeRPCGRPCStatusCodeUnavailable = "14" + // DATA_LOSS + AttributeRPCGRPCStatusCodeDataLoss = "15" + // UNAUTHENTICATED + AttributeRPCGRPCStatusCodeUnauthenticated = "16" +) + +const ( + // gRPC + AttributeRPCSystemGRPC = "grpc" + // Java RMI + AttributeRPCSystemJavaRmi = "java_rmi" + // .NET WCF + AttributeRPCSystemDotnetWcf = "dotnet_wcf" + // Apache Dubbo + AttributeRPCSystemApacheDubbo = "apache_dubbo" + // Connect RPC + AttributeRPCSystemConnectRPC = "connect_rpc" +) + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // Server domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through an + // intermediary, server.address SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + AttributeServerAddress = "server.address" + // Server port number. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, server.port SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + AttributeServerPort = "server.port" +) + +// A service instance. +const ( + // The string ID of the service instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // service.namespace,service.name pair (in other words + // service.namespace,service.name,service.instance.id triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service).Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 RFC + // 4122 UUID, but are free to use an inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: 4d63009a-8d0f-11ee- + // aad7-4c796ed8e320.UUIDs are typically recommended, as only an opaque value for + // the purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // /etc/machine-id file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute.For applications running behind an + // application server (like unicorn), we do not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id.It's not recommended for a + // Collector to set service.instance.id if it can't unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on pod.name will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the service.instance.id if they can unambiguously + // determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as they + // know the target address and + // port. + AttributeServiceInstanceID = "service.instance.id" + // Logical name of the service. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to unknown_service: + // concatenated with process.executable.name, e.g. unknown_service:bash. If + // process.executable.name is not available, the value MUST be set to + // unknown_service. + AttributeServiceName = "service.name" + // A namespace for service.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. service.name + // is expected to be unique within the same namespace. If service.namespace is not + // specified in the Resource then service.name is expected to be unique for all + // services that have no explicit namespace defined (so the empty/unspecified + // namespace is simply one more valid namespace). Zero-length namespace string is + // assumed equal to unspecified namespace. + AttributeServiceNamespace = "service.namespace" + // The version string of the service API or implementation. The format is not + // defined by these conventions. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + AttributeServiceVersion = "service.version" +) + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // A unique id to identify a session. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + AttributeSessionID = "session.id" + // The previous session.id for this user, when known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + AttributeSessionPreviousID = "session.previous_id" +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // Source address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating through + // an intermediary, source.address SHOULD represent the source address behind any + // intermediaries, for example proxies, if it's available. + AttributeSourceAddress = "source.address" + // Source port number + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3389, 2888 + AttributeSourcePort = "source.port" +) + +// Attributes for telemetry SDK. +const ( + // The language of the telemetry SDK. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + // The name of the telemetry SDK as defined above. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the telemetry.sdk.name attribute to + // opentelemetry. + // If another SDK, like a fork or a vendor-provided implementation, is used, this + // SDK MUST set the + // telemetry.sdk.name attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier opentelemetry is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + AttributeTelemetrySDKName = "telemetry.sdk.name" + // The version string of the telemetry SDK. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetrySDKVersion = "telemetry.sdk.version" + // The name of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set the + // telemetry.distro.name attribute to + // a string starting with opentelemetry-, e.g. opentelemetry-java-instrumentation. + AttributeTelemetryDistroName = "telemetry.distro.name" + // The version string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2.3' + AttributeTelemetryDistroVersion = "telemetry.distro.version" +) + +const ( + // cpp + AttributeTelemetrySDKLanguageCPP = "cpp" + // dotnet + AttributeTelemetrySDKLanguageDotnet = "dotnet" + // erlang + AttributeTelemetrySDKLanguageErlang = "erlang" + // go + AttributeTelemetrySDKLanguageGo = "go" + // java + AttributeTelemetrySDKLanguageJava = "java" + // nodejs + AttributeTelemetrySDKLanguageNodejs = "nodejs" + // php + AttributeTelemetrySDKLanguagePHP = "php" + // python + AttributeTelemetrySDKLanguagePython = "python" + // ruby + AttributeTelemetrySDKLanguageRuby = "ruby" + // rust + AttributeTelemetrySDKLanguageRust = "rust" + // swift + AttributeTelemetrySDKLanguageSwift = "swift" + // webjs + AttributeTelemetrySDKLanguageWebjs = "webjs" +) + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeThreadID = "thread.id" + // Current thread name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'main' + AttributeThreadName = "thread.name" +) + +// Semantic convention attributes in the TLS namespace. +const ( + // String indicating the cipher used during the current connection. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for tls.cipher MUST be one of the Descriptions of the + // registered TLS Cipher Suits. + AttributeTLSCipher = "tls.cipher" + // PEM-encoded stand-alone certificate offered by the client. This is usually + // mutually-exclusive of client.certificate_chain since this value also exists in + // that list. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...' + AttributeTLSClientCertificate = "tls.client.certificate" + // Array of PEM-encoded certificates that make up the certificate chain offered by + // the client. This is usually mutually-exclusive of client.certificate since that + // value should be the first certificate in the chain. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + AttributeTLSClientCertificateChain = "tls.client.certificate_chain" + // Certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + AttributeTLSClientHashMd5 = "tls.client.hash.md5" + // Certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + AttributeTLSClientHashSha1 = "tls.client.hash.sha1" + // Certificate fingerprint using the SHA256 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + AttributeTLSClientHashSha256 = "tls.client.hash.sha256" + // Distinguished name of subject of the issuer of the x.509 certificate presented + // by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com' + AttributeTLSClientIssuer = "tls.client.issuer" + // A hash that identifies clients based on how they perform an SSL/TLS handshake. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + AttributeTLSClientJa3 = "tls.client.ja3" + // Date/Time indicating when client certificate is no longer considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + AttributeTLSClientNotAfter = "tls.client.not_after" + // Date/Time indicating when client certificate is first considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + AttributeTLSClientNotBefore = "tls.client.not_before" + // Also called an SNI, this tells the server which hostname to which the client is + // attempting to connect to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + AttributeTLSClientServerName = "tls.client.server_name" + // Distinguished name of subject of the x.509 certificate presented by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + AttributeTLSClientSubject = "tls.client.subject" + // Array of ciphers offered by the client during the client hello. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + AttributeTLSClientSupportedCiphers = "tls.client.supported_ciphers" + // String indicating the curve used for the given cipher, when applicable + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'secp256r1' + AttributeTLSCurve = "tls.curve" + // Boolean flag indicating if the TLS negotiation was successful and transitioned + // to an encrypted tunnel. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Examples: True + AttributeTLSEstablished = "tls.established" + // String indicating the protocol being tunneled. Per the values in the IANA + // registry, this string should be lower case. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'http/1.1' + AttributeTLSNextProtocol = "tls.next_protocol" + // Normalized lowercase protocol name parsed from original string of the + // negotiated SSL/TLS protocol version + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeTLSProtocolName = "tls.protocol.name" + // Numeric part of the version parsed from the original string of the negotiated + // SSL/TLS protocol version + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2', '3' + AttributeTLSProtocolVersion = "tls.protocol.version" + // Boolean flag indicating if this TLS connection was resumed from an existing TLS + // negotiation. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Examples: True + AttributeTLSResumed = "tls.resumed" + // PEM-encoded stand-alone certificate offered by the server. This is usually + // mutually-exclusive of server.certificate_chain since this value also exists in + // that list. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...' + AttributeTLSServerCertificate = "tls.server.certificate" + // Array of PEM-encoded certificates that make up the certificate chain offered by + // the server. This is usually mutually-exclusive of server.certificate since that + // value should be the first certificate in the chain. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + AttributeTLSServerCertificateChain = "tls.server.certificate_chain" + // Certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + AttributeTLSServerHashMd5 = "tls.server.hash.md5" + // Certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + AttributeTLSServerHashSha1 = "tls.server.hash.sha1" + // Certificate fingerprint using the SHA256 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + AttributeTLSServerHashSha256 = "tls.server.hash.sha256" + // Distinguished name of subject of the issuer of the x.509 certificate presented + // by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com' + AttributeTLSServerIssuer = "tls.server.issuer" + // A hash that identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + AttributeTLSServerJa3s = "tls.server.ja3s" + // Date/Time indicating when server certificate is no longer considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + AttributeTLSServerNotAfter = "tls.server.not_after" + // Date/Time indicating when server certificate is first considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + AttributeTLSServerNotBefore = "tls.server.not_before" + // Distinguished name of subject of the x.509 certificate presented by the server. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + AttributeTLSServerSubject = "tls.server.subject" +) + +const ( + // ssl + AttributeTLSProtocolNameSsl = "ssl" + // tls + AttributeTLSProtocolNameTLS = "tls" +) + +// Attributes describing URL. +const ( + // Domain extracted from the url.full, such as "opentelemetry.io". + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If the + // URL contains a literal IPv6 address enclosed by [ and ], the [ and ] characters + // should also be captured in the domain field. + AttributeURLDomain = "url.domain" + // The file extension extracted from the url.full, excluding the leading dot. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has a file + // extension. When the file name has multiple extensions example.tar.gz, only the + // last one should be captured gz, not tar.gz. + AttributeURLExtension = "url.extension" + // The URI fragment component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'SemConv' + AttributeURLFragment = "url.fragment" + // Absolute URL describing a network resource according to RFC3986 + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', '//localhost' + // Note: For network calls, URL usually has + // scheme://host[:port][path][?query][#fragment] format, where the fragment is not + // transmitted over HTTP, but if it is known, it SHOULD be included nevertheless. + // url.full MUST NOT contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case username and password + // SHOULD be redacted and attribute's value SHOULD be + // https://REDACTED:REDACTED@www.example.com/. + // url.full SHOULD capture the absolute URL when it is available (or can be + // reconstructed). Sensitive content provided in url.full SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLFull = "url.full" + // Unmodified original URL as seen in the event source. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // url.original might contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case password and username + // SHOULD NOT be redacted and attribute's value SHOULD remain the same. + AttributeURLOriginal = "url.original" + // The URI path component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in url.path SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLPath = "url.path" + // Port extracted from the url.full + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 443 + AttributeURLPort = "url.port" + // The URI query component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in url.query SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLQuery = "url.query" + // The highest registered url domain, stripped of the subdomain. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the public suffix list. For + // example, the registered domain for foo.example.com is example.com. Trying to + // approximate this by simply taking the last two labels will not work well for + // TLDs such as co.uk. + AttributeURLRegisteredDomain = "url.registered_domain" + // The URI scheme component identifying the used protocol. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + AttributeURLScheme = "url.scheme" + // The subdomain portion of a fully qualified domain name includes all of the + // names except the host name under the registered_domain. In a partially + // qualified domain, or if the qualification level of the full name cannot be + // determined, subdomain contains all of the names below the registered domain. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of www.east.mydomain.co.uk is east. If the domain + // has multiple levels of subdomain, such as sub2.sub1.example.com, the subdomain + // field should contain sub2.sub1, with no trailing period. + AttributeURLSubdomain = "url.subdomain" + // The effective top level domain (eTLD), also known as the domain suffix, is the + // last part of the domain name. For example, the top level domain for example.com + // is com. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the public suffix list. + AttributeURLTopLevelDomain = "url.top_level_domain" +) + +// Describes user-agent attributes. +const ( + // Name of the user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: Example of extracting browser's name from original string. In the case of + // using a user-agent for non-browser products, such as microservices with + // multiple names/versions inside the user_agent.original, the most significant + // name SHOULD be selected. In such a scenario it should align with + // user_agent.version + AttributeUserAgentName = "user_agent.name" + // Value of the HTTP User-Agent header sent by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU iPhone + // OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 grpc-java- + // okhttp/1.27.2' + AttributeUserAgentOriginal = "user_agent.original" + // Version of the user-agent extracted from original. Usually refers to the + // browser's version + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: Example of extracting browser's version from original string. In the case + // of using a user-agent for non-browser products, such as microservices with + // multiple names/versions inside the user_agent.original, the most significant + // version SHOULD be selected. In such a scenario it should align with + // user_agent.name + AttributeUserAgentVersion = "user_agent.version" +) + +func GetAttribute_groupSemanticConventionAttributeNames() []string { + return []string{ + AttributeEventName, + AttributeLogRecordUID, + AttributeLogIostream, + AttributeLogFileName, + AttributeLogFileNameResolved, + AttributeLogFilePath, + AttributeLogFilePathResolved, + AttributePoolName, + AttributeState, + AttributeAspnetcoreRateLimitingResult, + AttributeAspnetcoreDiagnosticsHandlerType, + AttributeAspnetcoreRateLimitingPolicy, + AttributeAspnetcoreRequestIsUnhandled, + AttributeAspnetcoreRoutingIsFallback, + AttributeSignalrConnectionStatus, + AttributeSignalrTransport, + AttributeJvmBufferPoolName, + AttributeJvmMemoryPoolName, + AttributeJvmMemoryType, + AttributeProcessCPUState, + AttributeSystemDevice, + AttributeSystemCPULogicalNumber, + AttributeSystemCPUState, + AttributeSystemMemoryState, + AttributeSystemPagingDirection, + AttributeSystemPagingState, + AttributeSystemPagingType, + AttributeSystemFilesystemMode, + AttributeSystemFilesystemMountpoint, + AttributeSystemFilesystemState, + AttributeSystemFilesystemType, + AttributeSystemNetworkState, + AttributeSystemProcessStatus, + AttributeAndroidOSAPILevel, + AttributeAWSDynamoDBAttributeDefinitions, + AttributeAWSDynamoDBAttributesToGet, + AttributeAWSDynamoDBConsistentRead, + AttributeAWSDynamoDBConsumedCapacity, + AttributeAWSDynamoDBCount, + AttributeAWSDynamoDBExclusiveStartTable, + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates, + AttributeAWSDynamoDBGlobalSecondaryIndexes, + AttributeAWSDynamoDBIndexName, + AttributeAWSDynamoDBItemCollectionMetrics, + AttributeAWSDynamoDBLimit, + AttributeAWSDynamoDBLocalSecondaryIndexes, + AttributeAWSDynamoDBProjection, + AttributeAWSDynamoDBProvisionedReadCapacity, + AttributeAWSDynamoDBProvisionedWriteCapacity, + AttributeAWSDynamoDBScanForward, + AttributeAWSDynamoDBScannedCount, + AttributeAWSDynamoDBSegment, + AttributeAWSDynamoDBSelect, + AttributeAWSDynamoDBTableCount, + AttributeAWSDynamoDBTableNames, + AttributeAWSDynamoDBTotalSegments, + AttributeBrowserBrands, + AttributeBrowserLanguage, + AttributeBrowserMobile, + AttributeBrowserPlatform, + AttributeClientAddress, + AttributeClientPort, + AttributeCloudAccountID, + AttributeCloudAvailabilityZone, + AttributeCloudPlatform, + AttributeCloudProvider, + AttributeCloudRegion, + AttributeCloudResourceID, + AttributeCloudeventsEventID, + AttributeCloudeventsEventSource, + AttributeCloudeventsEventSpecVersion, + AttributeCloudeventsEventSubject, + AttributeCloudeventsEventType, + AttributeCodeColumn, + AttributeCodeFilepath, + AttributeCodeFunction, + AttributeCodeLineNumber, + AttributeCodeNamespace, + AttributeCodeStacktrace, + AttributeContainerCommand, + AttributeContainerCommandArgs, + AttributeContainerCommandLine, + AttributeContainerCPUState, + AttributeContainerID, + AttributeContainerImageID, + AttributeContainerImageName, + AttributeContainerImageRepoDigests, + AttributeContainerImageTags, + AttributeContainerName, + AttributeContainerRuntime, + AttributeDBCassandraConsistencyLevel, + AttributeDBCassandraCoordinatorDC, + AttributeDBCassandraCoordinatorID, + AttributeDBCassandraIdempotence, + AttributeDBCassandraPageSize, + AttributeDBCassandraSpeculativeExecutionCount, + AttributeDBCassandraTable, + AttributeDBCosmosDBClientID, + AttributeDBCosmosDBConnectionMode, + AttributeDBCosmosDBContainer, + AttributeDBCosmosDBOperationType, + AttributeDBCosmosDBRequestCharge, + AttributeDBCosmosDBRequestContentLength, + AttributeDBCosmosDBStatusCode, + AttributeDBCosmosDBSubStatusCode, + AttributeDBElasticsearchClusterName, + AttributeDBInstanceID, + AttributeDBMongoDBCollection, + AttributeDBMSSQLInstanceName, + AttributeDBName, + AttributeDBOperation, + AttributeDBRedisDBIndex, + AttributeDBSQLTable, + AttributeDBStatement, + AttributeDBSystem, + AttributeDBUser, + AttributeDeploymentEnvironment, + AttributeDBConnectionString, + AttributeDBElasticsearchNodeName, + AttributeDBJDBCDriverClassname, + AttributeHTTPFlavor, + AttributeHTTPMethod, + AttributeHTTPRequestContentLength, + AttributeHTTPResponseContentLength, + AttributeHTTPScheme, + AttributeHTTPStatusCode, + AttributeHTTPTarget, + AttributeHTTPURL, + AttributeHTTPUserAgent, + AttributeMessagingKafkaDestinationPartition, + AttributeNetHostName, + AttributeNetHostPort, + AttributeNetPeerName, + AttributeNetPeerPort, + AttributeNetProtocolName, + AttributeNetProtocolVersion, + AttributeNetSockFamily, + AttributeNetSockHostAddr, + AttributeNetSockHostPort, + AttributeNetSockPeerAddr, + AttributeNetSockPeerName, + AttributeNetSockPeerPort, + AttributeNetTransport, + AttributeSystemProcessesStatus, + AttributeDestinationAddress, + AttributeDestinationPort, + AttributeDeviceID, + AttributeDeviceManufacturer, + AttributeDeviceModelIdentifier, + AttributeDeviceModelName, + AttributeDiskIoDirection, + AttributeDNSQuestionName, + AttributeEnduserID, + AttributeEnduserRole, + AttributeEnduserScope, + AttributeErrorType, + AttributeExceptionEscaped, + AttributeExceptionMessage, + AttributeExceptionStacktrace, + AttributeExceptionType, + AttributeFaaSColdstart, + AttributeFaaSCron, + AttributeFaaSDocumentCollection, + AttributeFaaSDocumentName, + AttributeFaaSDocumentOperation, + AttributeFaaSDocumentTime, + AttributeFaaSInstance, + AttributeFaaSInvocationID, + AttributeFaaSInvokedName, + AttributeFaaSInvokedProvider, + AttributeFaaSInvokedRegion, + AttributeFaaSMaxMemory, + AttributeFaaSName, + AttributeFaaSTime, + AttributeFaaSTrigger, + AttributeFaaSVersion, + AttributeFeatureFlagKey, + AttributeFeatureFlagProviderName, + AttributeFeatureFlagVariant, + AttributeFileDirectory, + AttributeFileExtension, + AttributeFileName, + AttributeFilePath, + AttributeFileSize, + AttributeGCPCloudRunJobExecution, + AttributeGCPCloudRunJobTaskIndex, + AttributeGCPGceInstanceHostname, + AttributeGCPGceInstanceName, + AttributeHostArch, + AttributeHostCPUCacheL2Size, + AttributeHostCPUFamily, + AttributeHostCPUModelID, + AttributeHostCPUModelName, + AttributeHostCPUStepping, + AttributeHostCPUVendorID, + AttributeHostID, + AttributeHostImageID, + AttributeHostImageName, + AttributeHostImageVersion, + AttributeHostIP, + AttributeHostMac, + AttributeHostName, + AttributeHostType, + AttributeHTTPConnectionState, + AttributeHTTPRequestBodySize, + AttributeHTTPRequestMethod, + AttributeHTTPRequestMethodOriginal, + AttributeHTTPRequestResendCount, + AttributeHTTPRequestSize, + AttributeHTTPResponseBodySize, + AttributeHTTPResponseSize, + AttributeHTTPResponseStatusCode, + AttributeHTTPRoute, + AttributeK8SClusterName, + AttributeK8SClusterUID, + AttributeK8SContainerName, + AttributeK8SContainerRestartCount, + AttributeK8SCronJobName, + AttributeK8SCronJobUID, + AttributeK8SDaemonSetName, + AttributeK8SDaemonSetUID, + AttributeK8SDeploymentName, + AttributeK8SDeploymentUID, + AttributeK8SJobName, + AttributeK8SJobUID, + AttributeK8SNamespaceName, + AttributeK8SNodeName, + AttributeK8SNodeUID, + AttributeK8SPodName, + AttributeK8SPodUID, + AttributeK8SReplicaSetName, + AttributeK8SReplicaSetUID, + AttributeK8SStatefulSetName, + AttributeK8SStatefulSetUID, + AttributeMessagingBatchMessageCount, + AttributeMessagingClientID, + AttributeMessagingDestinationAnonymous, + AttributeMessagingDestinationName, + AttributeMessagingDestinationPartitionID, + AttributeMessagingDestinationTemplate, + AttributeMessagingDestinationTemporary, + AttributeMessagingDestinationPublishAnonymous, + AttributeMessagingDestinationPublishName, + AttributeMessagingEventhubsConsumerGroup, + AttributeMessagingEventhubsMessageEnqueuedTime, + AttributeMessagingGCPPubsubMessageOrderingKey, + AttributeMessagingKafkaConsumerGroup, + AttributeMessagingKafkaMessageKey, + AttributeMessagingKafkaMessageOffset, + AttributeMessagingKafkaMessageTombstone, + AttributeMessagingMessageBodySize, + AttributeMessagingMessageConversationID, + AttributeMessagingMessageEnvelopeSize, + AttributeMessagingMessageID, + AttributeMessagingOperation, + AttributeMessagingRabbitmqDestinationRoutingKey, + AttributeMessagingRabbitmqMessageDeliveryTag, + AttributeMessagingRocketmqClientGroup, + AttributeMessagingRocketmqConsumptionModel, + AttributeMessagingRocketmqMessageDelayTimeLevel, + AttributeMessagingRocketmqMessageDeliveryTimestamp, + AttributeMessagingRocketmqMessageGroup, + AttributeMessagingRocketmqMessageKeys, + AttributeMessagingRocketmqMessageTag, + AttributeMessagingRocketmqMessageType, + AttributeMessagingRocketmqNamespace, + AttributeMessagingServicebusDestinationSubscriptionName, + AttributeMessagingServicebusDispositionStatus, + AttributeMessagingServicebusMessageDeliveryCount, + AttributeMessagingServicebusMessageEnqueuedTime, + AttributeMessagingSystem, + AttributeNetworkCarrierIcc, + AttributeNetworkCarrierMcc, + AttributeNetworkCarrierMnc, + AttributeNetworkCarrierName, + AttributeNetworkConnectionSubtype, + AttributeNetworkConnectionType, + AttributeNetworkIoDirection, + AttributeNetworkLocalAddress, + AttributeNetworkLocalPort, + AttributeNetworkPeerAddress, + AttributeNetworkPeerPort, + AttributeNetworkProtocolName, + AttributeNetworkProtocolVersion, + AttributeNetworkTransport, + AttributeNetworkType, + AttributeOciManifestDigest, + AttributeOSBuildID, + AttributeOSDescription, + AttributeOSName, + AttributeOSType, + AttributeOSVersion, + AttributeProcessCommand, + AttributeProcessCommandArgs, + AttributeProcessCommandLine, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessOwner, + AttributeProcessParentPID, + AttributeProcessPID, + AttributeProcessRuntimeDescription, + AttributeProcessRuntimeName, + AttributeProcessRuntimeVersion, + AttributeRPCConnectRPCErrorCode, + AttributeRPCGRPCStatusCode, + AttributeRPCJsonrpcErrorCode, + AttributeRPCJsonrpcErrorMessage, + AttributeRPCJsonrpcRequestID, + AttributeRPCJsonrpcVersion, + AttributeRPCMethod, + AttributeRPCService, + AttributeRPCSystem, + AttributeServerAddress, + AttributeServerPort, + AttributeServiceInstanceID, + AttributeServiceName, + AttributeServiceNamespace, + AttributeServiceVersion, + AttributeSessionID, + AttributeSessionPreviousID, + AttributeSourceAddress, + AttributeSourcePort, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKName, + AttributeTelemetrySDKVersion, + AttributeTelemetryDistroName, + AttributeTelemetryDistroVersion, + AttributeThreadID, + AttributeThreadName, + AttributeTLSCipher, + AttributeTLSClientCertificate, + AttributeTLSClientCertificateChain, + AttributeTLSClientHashMd5, + AttributeTLSClientHashSha1, + AttributeTLSClientHashSha256, + AttributeTLSClientIssuer, + AttributeTLSClientJa3, + AttributeTLSClientNotAfter, + AttributeTLSClientNotBefore, + AttributeTLSClientServerName, + AttributeTLSClientSubject, + AttributeTLSClientSupportedCiphers, + AttributeTLSCurve, + AttributeTLSEstablished, + AttributeTLSNextProtocol, + AttributeTLSProtocolName, + AttributeTLSProtocolVersion, + AttributeTLSResumed, + AttributeTLSServerCertificate, + AttributeTLSServerCertificateChain, + AttributeTLSServerHashMd5, + AttributeTLSServerHashSha1, + AttributeTLSServerHashSha256, + AttributeTLSServerIssuer, + AttributeTLSServerJa3s, + AttributeTLSServerNotAfter, + AttributeTLSServerNotBefore, + AttributeTLSServerSubject, + AttributeURLDomain, + AttributeURLExtension, + AttributeURLFragment, + AttributeURLFull, + AttributeURLOriginal, + AttributeURLPath, + AttributeURLPort, + AttributeURLQuery, + AttributeURLRegisteredDomain, + AttributeURLScheme, + AttributeURLSubdomain, + AttributeURLTopLevelDomain, + AttributeUserAgentName, + AttributeUserAgentOriginal, + AttributeUserAgentVersion, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_event.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_event.go new file mode 100644 index 00000000000..9ce1307afe1 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_event.go @@ -0,0 +1,105 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// This event represents an occurrence of a lifecycle transition on the iOS +// platform. +const ( + // This attribute represents the state the application has transitioned into at + // the occurrence of the event. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + // Note: The iOS lifecycle states are defined in the UIApplicationDelegate + // documentation, and from which the OS terminology column values are derived. + AttributeIosState = "ios.state" +) + +const ( + // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` + AttributeIosStateActive = "active" + // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` + AttributeIosStateInactive = "inactive" + // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` + AttributeIosStateBackground = "background" + // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` + AttributeIosStateForeground = "foreground" + // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` + AttributeIosStateTerminate = "terminate" +) + +// This event represents an occurrence of a lifecycle transition on the Android +// platform. +const ( + // This attribute represents the state the application has transitioned into at + // the occurrence of the event. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + // Note: The Android lifecycle states are defined in Activity lifecycle callbacks, + // and from which the OS identifiers are derived. + AttributeAndroidState = "android.state" +) + +const ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AttributeAndroidStateCreated = "created" + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AttributeAndroidStateBackground = "background" + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AttributeAndroidStateForeground = "foreground" +) + +// RPC received/sent message. +const ( + // Compressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeMessageCompressedSize = "message.compressed_size" + // MUST be calculated as two different counters starting from 1 one for sent + // messages and one for received message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + AttributeMessageID = "message.id" + // Whether this is a received or sent message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessageType = "message.type" + // Uncompressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeMessageUncompressedSize = "message.uncompressed_size" +) + +const ( + // sent + AttributeMessageTypeSent = "SENT" + // received + AttributeMessageTypeReceived = "RECEIVED" +) + +func GetEventSemanticConventionAttributeNames() []string { + return []string{ + AttributeIosState, + AttributeAndroidState, + AttributeMessageCompressedSize, + AttributeMessageID, + AttributeMessageType, + AttributeMessageUncompressedSize, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_resource.go new file mode 100644 index 00000000000..7b355e40661 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_resource.go @@ -0,0 +1,242 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The ID of a running ECS task. The ID MUST be extracted from task.arn. + // + // Type: string + // Requirement Level: Conditionally Required - If and only if `task.arn` is + // populated. + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AttributeAWSECSTaskID = "aws.ecs.task.id" + // The ARN of an ECS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSECSClusterARN = "aws.ecs.cluster.arn" + // The Amazon Resource Name (ARN) of an ECS container instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AttributeAWSECSContainerARN = "aws.ecs.container.arn" + // The launch type for an ECS task. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeAWSECSLaunchtype = "aws.ecs.launchtype" + // The ARN of a running ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task- + // id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AttributeAWSECSTaskARN = "aws.ecs.task.arn" + // The family name of the ECS task definition used to create the ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AttributeAWSECSTaskFamily = "aws.ecs.task.family" + // The revision for the task definition used to create the ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '8', '26' + AttributeAWSECSTaskRevision = "aws.ecs.task.revision" +) + +const ( + // ec2 + AttributeAWSECSLaunchtypeEC2 = "ec2" + // fargate + AttributeAWSECSLaunchtypeFargate = "fargate" +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSEKSClusterARN = "aws.eks.cluster.arn" +) + +// Resources specific to Amazon Web Services. +const ( + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the log group ARN format documentation. + AttributeAWSLogGroupARNs = "aws.log.group.arns" + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AttributeAWSLogGroupNames = "aws.log.group.names" + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the log stream ARN format documentation. One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AttributeAWSLogStreamARNs = "aws.log.stream.arns" + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSLogStreamNames = "aws.log.stream.names" +) + +// Heroku dyno metadata +const ( + // Unique identifier for the application + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + AttributeHerokuAppID = "heroku.app.id" + // Commit hash for the current release + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + AttributeHerokuReleaseCommit = "heroku.release.commit" + // Time and date the release was created + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + AttributeHerokuReleaseCreationTimestamp = "heroku.release.creation_timestamp" +) + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'WildFly' + AttributeWebEngineName = "webengine.name" + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + AttributeWebEngineDescription = "webengine.description" + // The version of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '21.0.0' + AttributeWebEngineVersion = "webengine.version" +) + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // The name of the instrumentation scope - (InstrumentationScope.Name in OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOTelScopeName = "otel.scope.name" + // The version of the instrumentation scope - (InstrumentationScope.Version in + // OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.0.0' + AttributeOTelScopeVersion = "otel.scope.version" +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // None + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: use the `otel.scope.name` attribute. + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOTelLibraryName = "otel.library.name" + // None + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: use the `otel.scope.version` attribute. + // Examples: '1.0.0' + AttributeOTelLibraryVersion = "otel.library.version" +) + +func GetResourceSemanticConventionAttributeNames() []string { + return []string{ + AttributeAWSECSTaskID, + AttributeAWSECSClusterARN, + AttributeAWSECSContainerARN, + AttributeAWSECSLaunchtype, + AttributeAWSECSTaskARN, + AttributeAWSECSTaskFamily, + AttributeAWSECSTaskRevision, + AttributeAWSEKSClusterARN, + AttributeAWSLogGroupARNs, + AttributeAWSLogGroupNames, + AttributeAWSLogStreamARNs, + AttributeAWSLogStreamNames, + AttributeHerokuAppID, + AttributeHerokuReleaseCommit, + AttributeHerokuReleaseCreationTimestamp, + AttributeWebEngineName, + AttributeWebEngineDescription, + AttributeWebEngineVersion, + AttributeOTelScopeName, + AttributeOTelScopeVersion, + AttributeOTelLibraryName, + AttributeOTelLibraryVersion, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_trace.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_trace.go new file mode 100644 index 00000000000..b214a9f4fb0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_trace.go @@ -0,0 +1,245 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// Operations that access some remote service. +const ( + // The service.name of the remote service. SHOULD be equal to the actual + // service.name resource attribute of the remote service if any. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + AttributePeerService = "peer.service" +) + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // The full invoked ARN as provided on the Context passed to the function (Lambda- + // Runtime-Invoked-Function-ARN header on the /runtime/invocation/next + // applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from cloud.resource_id if an alias is involved. + AttributeAWSLambdaInvokedARN = "aws.lambda.invoked_arn" +) + +// Semantic conventions for the OpenTracing Shim +const ( + // Parent-child Reference type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + AttributeOpentracingRefType = "opentracing.ref_type" +) + +const ( + // The parent Span depends on the child Span in some capacity + AttributeOpentracingRefTypeChildOf = "child_of" + // The parent Span doesn't depend in any way on the result of the child Span + AttributeOpentracingRefTypeFollowsFrom = "follows_from" +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // Name of the code, either "OK" or "ERROR". MUST NOT be set + // if the status code is UNSET. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeOTelStatusCode = "otel.status_code" + // Description of the Status if it has a value, otherwise not set. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'resource not found' + AttributeOTelStatusDescription = "otel.status_description" +) + +const ( + // The operation has been validated by an Application developer or Operator to have completed successfully + AttributeOTelStatusCodeOk = "OK" + // The operation contains an error + AttributeOTelStatusCodeError = "ERROR" +) + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // The AWS request ID as returned in the response headers x-amz-request-id or + // x-amz-requestid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AttributeAWSRequestID = "aws.request_id" +) + +// Attributes that exist for S3 request types. +const ( + // The S3 bucket name the request refers to. Corresponds to the --bucket parameter + // of the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The bucket attribute is applicable to all S3 operations that reference a + // bucket, i.e. that require the bucket name as a mandatory parameter. + // This applies to almost all S3 operations except list-buckets. + AttributeAWSS3Bucket = "aws.s3.bucket" + // The source object (in the form bucket/key) for the copy operation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The copy_source attribute applies to S3 copy operations and corresponds + // to the --copy-source parameter + // of the copy-object operation within the S3 API. + // This applies in particular to the following operations:
    + //
  • copy-object
  • + //
  • upload-part-copy
  • + //
+ AttributeAWSS3CopySource = "aws.s3.copy_source" + // The delete request container that specifies the objects to be deleted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string} + // ],Quiet=boolean' + // Note: The delete attribute is only applicable to the delete-object operation. + // The delete attribute corresponds to the --delete parameter of the + // delete-objects operation within the S3 API. + AttributeAWSS3Delete = "aws.s3.delete" + // The S3 object key the request refers to. Corresponds to the --key parameter of + // the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The key attribute is applicable to all object-related S3 operations, i.e. + // that require the object key as a mandatory parameter. + // This applies in particular to the following operations:
    + //
  • copy-object
  • + //
  • delete-object
  • + //
  • get-object
  • + //
  • head-object
  • + //
  • put-object
  • + //
  • restore-object
  • + //
  • select-object-content
  • + //
  • abort-multipart-upload
  • + //
  • complete-multipart-upload
  • + //
  • create-multipart-upload
  • + //
  • list-parts
  • + //
  • upload-part
  • + //
  • upload-part-copy
  • + //
+ AttributeAWSS3Key = "aws.s3.key" + // The part number of the part being uploaded in a multipart-upload operation. + // This is a positive integer between 1 and 10,000. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3456 + // Note: The part_number attribute is only applicable to the upload-part + // and upload-part-copy operations. + // The part_number attribute corresponds to the --part-number parameter of the + // upload-part operation within the S3 API. + AttributeAWSS3PartNumber = "aws.s3.part_number" + // Upload ID that identifies the multipart upload. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The upload_id attribute applies to S3 multipart-upload operations and + // corresponds to the --upload-id parameter + // of the S3 API multipart operations. + // This applies in particular to the following operations:
    + //
  • abort-multipart-upload
  • + //
  • complete-multipart-upload
  • + //
  • list-parts
  • + //
  • upload-part
  • + //
  • upload-part-copy
  • + //
+ AttributeAWSS3UploadID = "aws.s3.upload_id" +) + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // The GraphQL document being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + AttributeGraphqlDocument = "graphql.document" + // The name of the operation being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findBookByID' + AttributeGraphqlOperationName = "graphql.operation.name" + // The type of the operation being executed. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + AttributeGraphqlOperationType = "graphql.operation.type" +) + +const ( + // GraphQL query + AttributeGraphqlOperationTypeQuery = "query" + // GraphQL mutation + AttributeGraphqlOperationTypeMutation = "mutation" + // GraphQL subscription + AttributeGraphqlOperationTypeSubscription = "subscription" +) + +func GetTraceSemanticConventionAttributeNames() []string { + return []string{ + AttributePeerService, + AttributeAWSLambdaInvokedARN, + AttributeOpentracingRefType, + AttributeOTelStatusCode, + AttributeOTelStatusDescription, + AttributeAWSRequestID, + AttributeAWSS3Bucket, + AttributeAWSS3CopySource, + AttributeAWSS3Delete, + AttributeAWSS3Key, + AttributeAWSS3PartNumber, + AttributeAWSS3UploadID, + AttributeGraphqlDocument, + AttributeGraphqlOperationName, + AttributeGraphqlOperationType, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/schema.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/schema.go new file mode 100644 index 00000000000..3739fe9ef81 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.25.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.25.0" diff --git a/vendor/go.opentelemetry.io/collector/service/documentation.md b/vendor/go.opentelemetry.io/collector/service/documentation.md new file mode 100644 index 00000000000..2cbd48a0708 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/documentation.md @@ -0,0 +1,55 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# service + +## Internal Telemetry + +The following telemetry is emitted by this component. + +### process_cpu_seconds + +Total CPU user and system time in seconds + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| s | Sum | Double | true | + +### process_memory_rss + +Total physical memory (resident set size) + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### process_runtime_heap_alloc_bytes + +Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### process_runtime_total_alloc_bytes + +Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| By | Sum | Int | true | + +### process_runtime_total_sys_memory_bytes + +Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys') + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### process_uptime + +Uptime of the process + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| s | Sum | Double | true | diff --git a/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go b/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go index e05c5aa01f0..c8f632965ca 100644 --- a/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go +++ b/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go @@ -183,7 +183,7 @@ func New(ctx context.Context, set Settings, cfg Config) (*Extensions, error) { ID: extID, Kind: component.KindExtension, } - extSet := extension.CreateSettings{ + extSet := extension.Settings{ ID: extID, TelemetrySettings: set.Telemetry.ToComponentTelemetrySettings(instanceID), BuildInfo: set.BuildInfo, diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go index c95d1f54b7e..cbf7e8538c5 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go @@ -32,6 +32,7 @@ import ( "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/service/internal/capabilityconsumer" "go.opentelemetry.io/collector/service/internal/servicetelemetry" + "go.opentelemetry.io/collector/service/internal/status" "go.opentelemetry.io/collector/service/pipelines" ) @@ -69,7 +70,6 @@ func Build(ctx context.Context, set Settings) (*Graph, error) { componentGraph: simple.NewDirectedGraph(), pipelines: make(map[component.ID]*pipelineNodes, len(set.PipelineConfigs)), instanceIDs: make(map[int64]*component.InstanceID), - telemetry: set.Telemetry, } for pipelineID := range set.PipelineConfigs { pipelines.pipelines[pipelineID] = &pipelineNodes{ @@ -394,7 +394,7 @@ type pipelineNodes struct { exporters map[int64]graph.Node } -func (g *Graph) StartAll(ctx context.Context, host component.Host) error { +func (g *Graph) StartAll(ctx context.Context, host component.Host, reporter status.Reporter) error { nodes, err := topo.Sort(g.componentGraph) if err != nil { return err @@ -413,25 +413,25 @@ func (g *Graph) StartAll(ctx context.Context, host component.Host) error { } instanceID := g.instanceIDs[node.ID()] - g.telemetry.Status.ReportStatus( + reporter.ReportStatus( instanceID, component.NewStatusEvent(component.StatusStarting), ) if compErr := comp.Start(ctx, host); compErr != nil { - g.telemetry.Status.ReportStatus( + reporter.ReportStatus( instanceID, component.NewPermanentErrorEvent(compErr), ) return compErr } - g.telemetry.Status.ReportOKIfStarting(instanceID) + reporter.ReportOKIfStarting(instanceID) } return nil } -func (g *Graph) ShutdownAll(ctx context.Context) error { +func (g *Graph) ShutdownAll(ctx context.Context, reporter status.Reporter) error { nodes, err := topo.Sort(g.componentGraph) if err != nil { return err @@ -452,21 +452,21 @@ func (g *Graph) ShutdownAll(ctx context.Context) error { } instanceID := g.instanceIDs[node.ID()] - g.telemetry.Status.ReportStatus( + reporter.ReportStatus( instanceID, component.NewStatusEvent(component.StatusStopping), ) if compErr := comp.Shutdown(ctx); compErr != nil { errs = multierr.Append(errs, compErr) - g.telemetry.Status.ReportStatus( + reporter.ReportStatus( instanceID, component.NewPermanentErrorEvent(compErr), ) continue } - g.telemetry.Status.ReportStatus( + reporter.ReportStatus( instanceID, component.NewStatusEvent(component.StatusStopped), ) diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/nodes.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/nodes.go index 238bbf3b287..9edb700b242 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/graph/nodes.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/nodes.go @@ -73,7 +73,7 @@ func (n *receiverNode) buildComponent(ctx context.Context, builder *receiver.Builder, nexts []baseConsumer, ) error { - set := receiver.CreateSettings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} + set := receiver.Settings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} set.TelemetrySettings.Logger = components.ReceiverLogger(tel.Logger, n.componentID, n.pipelineType) var err error switch n.pipelineType { @@ -133,7 +133,7 @@ func (n *processorNode) buildComponent(ctx context.Context, builder *processor.Builder, next baseConsumer, ) error { - set := processor.CreateSettings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} + set := processor.Settings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} set.TelemetrySettings.Logger = components.ProcessorLogger(set.TelemetrySettings.Logger, n.componentID, n.pipelineID) var err error switch n.pipelineID.Type() { @@ -181,7 +181,7 @@ func (n *exporterNode) buildComponent( info component.BuildInfo, builder *exporter.Builder, ) error { - set := exporter.CreateSettings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} + set := exporter.Settings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} set.TelemetrySettings.Logger = components.ExporterLogger(set.TelemetrySettings.Logger, n.componentID, n.pipelineType) var err error switch n.pipelineType { @@ -233,7 +233,7 @@ func (n *connectorNode) buildComponent( builder *connector.Builder, nexts []baseConsumer, ) error { - set := connector.CreateSettings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} + set := connector.Settings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} set.TelemetrySettings.Logger = components.ConnectorLogger(set.TelemetrySettings.Logger, n.componentID, n.exprPipelineType, n.rcvrPipelineType) switch n.rcvrPipelineType { diff --git a/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go new file mode 100644 index 00000000000..dac0e287f35 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go @@ -0,0 +1,179 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("go.opentelemetry.io/collector/service") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("go.opentelemetry.io/collector/service") +} + +// TelemetryBuilder provides an interface for components to report telemetry +// as defined in metadata and user config. +type TelemetryBuilder struct { + meter metric.Meter + ProcessCPUSeconds metric.Float64ObservableCounter + observeProcessCPUSeconds func() float64 + ProcessMemoryRss metric.Int64ObservableGauge + observeProcessMemoryRss func() int64 + ProcessRuntimeHeapAllocBytes metric.Int64ObservableGauge + observeProcessRuntimeHeapAllocBytes func() int64 + ProcessRuntimeTotalAllocBytes metric.Int64ObservableCounter + observeProcessRuntimeTotalAllocBytes func() int64 + ProcessRuntimeTotalSysMemoryBytes metric.Int64ObservableGauge + observeProcessRuntimeTotalSysMemoryBytes func() int64 + ProcessUptime metric.Float64ObservableCounter + observeProcessUptime func() float64 + level configtelemetry.Level + attributeSet attribute.Set +} + +// telemetryBuilderOption applies changes to default builder. +type telemetryBuilderOption func(*TelemetryBuilder) + +// WithLevel sets the current telemetry level for the component. +func WithLevel(lvl configtelemetry.Level) telemetryBuilderOption { + return func(builder *TelemetryBuilder) { + builder.level = lvl + } +} + +// WithAttributeSet applies a set of attributes for asynchronous instruments. +func WithAttributeSet(set attribute.Set) telemetryBuilderOption { + return func(builder *TelemetryBuilder) { + builder.attributeSet = set + } +} + +// WithProcessCPUSecondsCallback sets callback for observable ProcessCPUSeconds metric. +func WithProcessCPUSecondsCallback(cb func() float64) telemetryBuilderOption { + return func(builder *TelemetryBuilder) { + builder.observeProcessCPUSeconds = cb + } +} + +// WithProcessMemoryRssCallback sets callback for observable ProcessMemoryRss metric. +func WithProcessMemoryRssCallback(cb func() int64) telemetryBuilderOption { + return func(builder *TelemetryBuilder) { + builder.observeProcessMemoryRss = cb + } +} + +// WithProcessRuntimeHeapAllocBytesCallback sets callback for observable ProcessRuntimeHeapAllocBytes metric. +func WithProcessRuntimeHeapAllocBytesCallback(cb func() int64) telemetryBuilderOption { + return func(builder *TelemetryBuilder) { + builder.observeProcessRuntimeHeapAllocBytes = cb + } +} + +// WithProcessRuntimeTotalAllocBytesCallback sets callback for observable ProcessRuntimeTotalAllocBytes metric. +func WithProcessRuntimeTotalAllocBytesCallback(cb func() int64) telemetryBuilderOption { + return func(builder *TelemetryBuilder) { + builder.observeProcessRuntimeTotalAllocBytes = cb + } +} + +// WithProcessRuntimeTotalSysMemoryBytesCallback sets callback for observable ProcessRuntimeTotalSysMemoryBytes metric. +func WithProcessRuntimeTotalSysMemoryBytesCallback(cb func() int64) telemetryBuilderOption { + return func(builder *TelemetryBuilder) { + builder.observeProcessRuntimeTotalSysMemoryBytes = cb + } +} + +// WithProcessUptimeCallback sets callback for observable ProcessUptime metric. +func WithProcessUptimeCallback(cb func() float64) telemetryBuilderOption { + return func(builder *TelemetryBuilder) { + builder.observeProcessUptime = cb + } +} + +// NewTelemetryBuilder provides a struct with methods to update all internal telemetry +// for a component +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...telemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{level: configtelemetry.LevelBasic} + for _, op := range options { + op(&builder) + } + var err, errs error + if builder.level >= configtelemetry.LevelBasic { + builder.meter = Meter(settings) + } else { + builder.meter = noop.Meter{} + } + builder.ProcessCPUSeconds, err = builder.meter.Float64ObservableCounter( + "process_cpu_seconds", + metric.WithDescription("Total CPU user and system time in seconds"), + metric.WithUnit("s"), + metric.WithFloat64Callback(func(_ context.Context, o metric.Float64Observer) error { + o.Observe(builder.observeProcessCPUSeconds(), metric.WithAttributeSet(builder.attributeSet)) + return nil + }), + ) + errs = errors.Join(errs, err) + builder.ProcessMemoryRss, err = builder.meter.Int64ObservableGauge( + "process_memory_rss", + metric.WithDescription("Total physical memory (resident set size)"), + metric.WithUnit("By"), + metric.WithInt64Callback(func(_ context.Context, o metric.Int64Observer) error { + o.Observe(builder.observeProcessMemoryRss(), metric.WithAttributeSet(builder.attributeSet)) + return nil + }), + ) + errs = errors.Join(errs, err) + builder.ProcessRuntimeHeapAllocBytes, err = builder.meter.Int64ObservableGauge( + "process_runtime_heap_alloc_bytes", + metric.WithDescription("Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc')"), + metric.WithUnit("By"), + metric.WithInt64Callback(func(_ context.Context, o metric.Int64Observer) error { + o.Observe(builder.observeProcessRuntimeHeapAllocBytes(), metric.WithAttributeSet(builder.attributeSet)) + return nil + }), + ) + errs = errors.Join(errs, err) + builder.ProcessRuntimeTotalAllocBytes, err = builder.meter.Int64ObservableCounter( + "process_runtime_total_alloc_bytes", + metric.WithDescription("Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc')"), + metric.WithUnit("By"), + metric.WithInt64Callback(func(_ context.Context, o metric.Int64Observer) error { + o.Observe(builder.observeProcessRuntimeTotalAllocBytes(), metric.WithAttributeSet(builder.attributeSet)) + return nil + }), + ) + errs = errors.Join(errs, err) + builder.ProcessRuntimeTotalSysMemoryBytes, err = builder.meter.Int64ObservableGauge( + "process_runtime_total_sys_memory_bytes", + metric.WithDescription("Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys')"), + metric.WithUnit("By"), + metric.WithInt64Callback(func(_ context.Context, o metric.Int64Observer) error { + o.Observe(builder.observeProcessRuntimeTotalSysMemoryBytes(), metric.WithAttributeSet(builder.attributeSet)) + return nil + }), + ) + errs = errors.Join(errs, err) + builder.ProcessUptime, err = builder.meter.Float64ObservableCounter( + "process_uptime", + metric.WithDescription("Uptime of the process"), + metric.WithUnit("s"), + metric.WithFloat64Callback(func(_ context.Context, o metric.Float64Observer) error { + o.Observe(builder.observeProcessUptime(), metric.WithAttributeSet(builder.attributeSet)) + return nil + }), + ) + errs = errors.Join(errs, err) + return &builder, errs +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/config.go b/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/config.go index 0a9a8b08d82..1c9ba0b63be 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/config.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/config.go @@ -8,6 +8,7 @@ import ( "encoding/json" "errors" "fmt" + "net" "net/http" "net/url" "os" @@ -175,7 +176,7 @@ func initPrometheusExporter(prometheusConfig *config.Prometheus, asyncErrorChann return nil, nil, fmt.Errorf("error creating otel prometheus exporter: %w", err) } - return exporter, InitPrometheusServer(promRegistry, fmt.Sprintf("%s:%d", *prometheusConfig.Host, *prometheusConfig.Port), asyncErrorChannel), nil + return exporter, InitPrometheusServer(promRegistry, net.JoinHostPort(*prometheusConfig.Host, fmt.Sprintf("%d", *prometheusConfig.Port)), asyncErrorChannel), nil } func initPullExporter(exporter config.MetricExporter, asyncErrorChannel chan error) (sdkmetric.Reader, *http.Server, error) { diff --git a/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go b/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go index 991897f8b1b..0b978758104 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go @@ -10,15 +10,12 @@ import ( "sync" "time" - "github.com/shirou/gopsutil/v3/common" - "github.com/shirou/gopsutil/v3/process" - otelmetric "go.opentelemetry.io/otel/metric" - "go.uber.org/multierr" -) + "github.com/shirou/gopsutil/v4/common" + "github.com/shirou/gopsutil/v4/process" -const ( - scopeName = "go.opentelemetry.io/collector/service/process_telemetry" - processNameKey = "process_name" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/service/internal/metadata" + "go.opentelemetry.io/collector/service/internal/servicetelemetry" ) // processMetrics is a struct that contains views related to process metrics (cpu, mem, etc) @@ -28,13 +25,6 @@ type processMetrics struct { proc *process.Process context context.Context - otelProcessUptime otelmetric.Float64ObservableCounter - otelAllocMem otelmetric.Int64ObservableGauge - otelTotalAllocMem otelmetric.Int64ObservableCounter - otelSysMem otelmetric.Int64ObservableGauge - otelCPUSeconds otelmetric.Float64ObservableCounter - otelRSSMemory otelmetric.Int64ObservableGauge - // mu protects everything bellow. mu sync.Mutex lastMsRead time.Time @@ -64,7 +54,7 @@ func WithHostProc(hostProc string) RegisterOption { // RegisterProcessMetrics creates a new set of processMetrics (mem, cpu) that can be used to measure // basic information about this process. -func RegisterProcessMetrics(mp otelmetric.MeterProvider, ballastSizeBytes uint64, opts ...RegisterOption) error { +func RegisterProcessMetrics(cfg servicetelemetry.TelemetrySettings, ballastSizeBytes uint64, opts ...RegisterOption) error { set := registerOption{} for _, opt := range opts { opt.apply(&set) @@ -86,73 +76,15 @@ func RegisterProcessMetrics(mp otelmetric.MeterProvider, ballastSizeBytes uint64 return err } - return pm.record(mp.Meter(scopeName)) -} - -func (pm *processMetrics) record(meter otelmetric.Meter) error { - var errs, err error - - pm.otelProcessUptime, err = meter.Float64ObservableCounter( - "process_uptime", - otelmetric.WithDescription("Uptime of the process"), - otelmetric.WithUnit("s"), - otelmetric.WithFloat64Callback(func(_ context.Context, o otelmetric.Float64Observer) error { - o.Observe(pm.updateProcessUptime()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelAllocMem, err = meter.Int64ObservableGauge( - "process_runtime_heap_alloc_bytes", - otelmetric.WithDescription("Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc')"), - otelmetric.WithUnit("By"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(pm.updateAllocMem()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelTotalAllocMem, err = meter.Int64ObservableCounter( - "process_runtime_total_alloc_bytes", - otelmetric.WithDescription("Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc')"), - otelmetric.WithUnit("By"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(pm.updateTotalAllocMem()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelSysMem, err = meter.Int64ObservableGauge( - "process_runtime_total_sys_memory_bytes", - otelmetric.WithDescription("Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys')"), - otelmetric.WithUnit("By"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(pm.updateSysMem()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelCPUSeconds, err = meter.Float64ObservableCounter( - "process_cpu_seconds", - otelmetric.WithDescription("Total CPU user and system time in seconds"), - otelmetric.WithUnit("s"), - otelmetric.WithFloat64Callback(func(_ context.Context, o otelmetric.Float64Observer) error { - o.Observe(pm.updateCPUSeconds()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelRSSMemory, err = meter.Int64ObservableGauge( - "process_memory_rss", - otelmetric.WithDescription("Total physical memory (resident set size)"), - otelmetric.WithUnit("By"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(pm.updateRSSMemory()) - return nil - })) - errs = multierr.Append(errs, err) - - return errs + _, err = metadata.NewTelemetryBuilder(cfg.ToComponentTelemetrySettings(&component.InstanceID{}), + metadata.WithProcessUptimeCallback(pm.updateProcessUptime), + metadata.WithProcessRuntimeHeapAllocBytesCallback(pm.updateAllocMem), + metadata.WithProcessRuntimeTotalAllocBytesCallback(pm.updateTotalAllocMem), + metadata.WithProcessRuntimeTotalSysMemoryBytesCallback(pm.updateSysMem), + metadata.WithProcessCPUSecondsCallback(pm.updateCPUSeconds), + metadata.WithProcessMemoryRssCallback(pm.updateRSSMemory), + ) + return err } func (pm *processMetrics) updateProcessUptime() float64 { diff --git a/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/telemetry_settings.go b/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/telemetry_settings.go index 55c6b9f3962..55da9bcf0c2 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/telemetry_settings.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/telemetry_settings.go @@ -37,7 +37,7 @@ type TelemetrySettings struct { // Status contains a Reporter that allows the service to report status on behalf of a // component. - Status *status.Reporter + Status status.Reporter } // ToComponentTelemetrySettings returns a TelemetrySettings for a specific component derived from diff --git a/vendor/go.opentelemetry.io/collector/service/internal/status/status.go b/vendor/go.opentelemetry.io/collector/service/internal/status/status.go index e1d524906e5..fecae051d13 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/status/status.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/status/status.go @@ -96,7 +96,13 @@ type ServiceStatusFunc func(*component.InstanceID, *component.StatusEvent) var ErrStatusNotReady = errors.New("report component status is not ready until service start") // Reporter handles component status reporting -type Reporter struct { +type Reporter interface { + Ready() + ReportStatus(id *component.InstanceID, ev *component.StatusEvent) + ReportOKIfStarting(id *component.InstanceID) +} + +type reporter struct { mu sync.Mutex ready bool fsmMap map[*component.InstanceID]*fsm @@ -106,8 +112,8 @@ type Reporter struct { // NewReporter returns a reporter that will invoke the NotifyStatusFunc when a component's status // has changed. -func NewReporter(onStatusChange NotifyStatusFunc, onInvalidTransition InvalidTransitionFunc) *Reporter { - return &Reporter{ +func NewReporter(onStatusChange NotifyStatusFunc, onInvalidTransition InvalidTransitionFunc) Reporter { + return &reporter{ fsmMap: make(map[*component.InstanceID]*fsm), onStatusChange: onStatusChange, onInvalidTransition: onInvalidTransition, @@ -115,14 +121,14 @@ func NewReporter(onStatusChange NotifyStatusFunc, onInvalidTransition InvalidTra } // Ready enables status reporting -func (r *Reporter) Ready() { +func (r *reporter) Ready() { r.mu.Lock() defer r.mu.Unlock() r.ready = true } // ReportStatus reports status for the given InstanceID -func (r *Reporter) ReportStatus( +func (r *reporter) ReportStatus( id *component.InstanceID, ev *component.StatusEvent, ) { @@ -137,7 +143,7 @@ func (r *Reporter) ReportStatus( } } -func (r *Reporter) ReportOKIfStarting(id *component.InstanceID) { +func (r *reporter) ReportOKIfStarting(id *component.InstanceID) { r.mu.Lock() defer r.mu.Unlock() if !r.ready { @@ -152,7 +158,7 @@ func (r *Reporter) ReportOKIfStarting(id *component.InstanceID) { } // Note: a lock must be acquired before calling this method. -func (r *Reporter) componentFSM(id *component.InstanceID) *fsm { +func (r *reporter) componentFSM(id *component.InstanceID) *fsm { fsm, ok := r.fsmMap[id] if !ok { fsm = newFSM(func(ev *component.StatusEvent) { r.onStatusChange(id, ev) }) diff --git a/vendor/go.opentelemetry.io/collector/service/metadata.yaml b/vendor/go.opentelemetry.io/collector/service/metadata.yaml new file mode 100644 index 00000000000..d5417740d57 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/metadata.yaml @@ -0,0 +1,68 @@ +type: service + +status: + class: pkg + stability: + development: [traces, metrics, logs] + distributions: [core, contrib] + +tests: + goleak: + ignore: + top: + # See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information. + - "go.opencensus.io/stats/view.(*worker).start" + - "go.opentelemetry.io/collector/service/internal/proctelemetry.InitPrometheusServer.func1" + +telemetry: + metrics: + process_uptime: + enabled: true + description: Uptime of the process + unit: s + sum: + async: true + value_type: double + monotonic: true + + process_runtime_heap_alloc_bytes: + enabled: true + description: Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') + unit: By + gauge: + async: true + value_type: int + + process_runtime_total_alloc_bytes: + enabled: true + description: Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') + unit: By + sum: + async: true + value_type: int + monotonic: true + + process_runtime_total_sys_memory_bytes: + enabled: true + description: Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys') + unit: By + gauge: + async: true + value_type: int + + process_cpu_seconds: + enabled: true + description: Total CPU user and system time in seconds + unit: s + sum: + async: true + value_type: double + monotonic: true + + process_memory_rss: + enabled: true + description: Total physical memory (resident set size) + unit: By + gauge: + async: true + value_type: int diff --git a/vendor/go.opentelemetry.io/collector/service/service.go b/vendor/go.opentelemetry.io/collector/service/service.go index 53ea3eebe65..775aa7d7238 100644 --- a/vendor/go.opentelemetry.io/collector/service/service.go +++ b/vendor/go.opentelemetry.io/collector/service/service.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +//go:generate mdatagen metadata.yaml + package service // import "go.opentelemetry.io/collector/service" import ( @@ -139,13 +141,24 @@ func New(ctx context.Context, set Settings, cfg Config) (*Service, error) { }), } + if err = srv.initGraph(ctx, set, cfg); err != nil { + err = multierr.Append(err, srv.shutdownTelemetry(ctx)) + return nil, err + } + // process the configuration and initialize the pipeline - if err = srv.initExtensionsAndPipeline(ctx, set, cfg); err != nil { - // If pipeline initialization fails then shut down telemetry + if err = srv.initExtensions(ctx, cfg.Extensions); err != nil { err = multierr.Append(err, srv.shutdownTelemetry(ctx)) return nil, err } + if cfg.Telemetry.Metrics.Level != configtelemetry.LevelNone && cfg.Telemetry.Metrics.Address != "" { + // The process telemetry initialization requires the ballast size, which is available after the extensions are initialized. + if err = proctelemetry.RegisterProcessMetrics(srv.telemetrySettings, getBallastSize(srv.host)); err != nil { + return nil, fmt.Errorf("failed to register process metrics: %w", err) + } + } + return srv, nil } @@ -195,7 +208,7 @@ func (srv *Service) Start(ctx context.Context) error { } } - if err := srv.host.pipelines.StartAll(ctx, srv.host); err != nil { + if err := srv.host.pipelines.StartAll(ctx, srv.host, srv.telemetrySettings.Status); err != nil { return fmt.Errorf("cannot start pipelines: %w", err) } @@ -246,7 +259,7 @@ func (srv *Service) Shutdown(ctx context.Context) error { errs = multierr.Append(errs, fmt.Errorf("failed to notify that pipeline is not ready: %w", err)) } - if err := srv.host.pipelines.ShutdownAll(ctx); err != nil { + if err := srv.host.pipelines.ShutdownAll(ctx, srv.telemetrySettings.Status); err != nil { errs = multierr.Append(errs, fmt.Errorf("failed to shutdown pipelines: %w", err)) } @@ -261,19 +274,24 @@ func (srv *Service) Shutdown(ctx context.Context) error { return errs } -// Creates extensions and then builds the pipeline graph. -func (srv *Service) initExtensionsAndPipeline(ctx context.Context, set Settings, cfg Config) error { +// Creates extensions. +func (srv *Service) initExtensions(ctx context.Context, cfg extensions.Config) error { var err error extensionsSettings := extensions.Settings{ Telemetry: srv.telemetrySettings, BuildInfo: srv.buildInfo, Extensions: srv.host.extensions, } - if srv.host.serviceExtensions, err = extensions.New(ctx, extensionsSettings, cfg.Extensions); err != nil { + if srv.host.serviceExtensions, err = extensions.New(ctx, extensionsSettings, cfg); err != nil { return fmt.Errorf("failed to build extensions: %w", err) } + return nil +} - pSet := graph.Settings{ +// Creates the pipeline graph. +func (srv *Service) initGraph(ctx context.Context, set Settings, cfg Config) error { + var err error + if srv.host.pipelines, err = graph.Build(ctx, graph.Settings{ Telemetry: srv.telemetrySettings, BuildInfo: srv.buildInfo, ReceiverBuilder: set.Receivers, @@ -281,19 +299,9 @@ func (srv *Service) initExtensionsAndPipeline(ctx context.Context, set Settings, ExporterBuilder: set.Exporters, ConnectorBuilder: set.Connectors, PipelineConfigs: cfg.Pipelines, - } - - if srv.host.pipelines, err = graph.Build(ctx, pSet); err != nil { + }); err != nil { return fmt.Errorf("failed to build pipelines: %w", err) } - - if cfg.Telemetry.Metrics.Level != configtelemetry.LevelNone && cfg.Telemetry.Metrics.Address != "" { - // The process telemetry initialization requires the ballast size, which is available after the extensions are initialized. - if err = proctelemetry.RegisterProcessMetrics(srv.telemetrySettings.MeterProvider, getBallastSize(srv.host)); err != nil { - return fmt.Errorf("failed to register process metrics: %w", err) - } - } - return nil } diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/internal/factory.go b/vendor/go.opentelemetry.io/collector/service/telemetry/internal/factory.go index f1368c9704d..9afd6bd06c3 100644 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/internal/factory.go +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/internal/factory.go @@ -13,8 +13,8 @@ import ( "go.opentelemetry.io/collector/component" ) -// CreateSettings holds configuration for building Telemetry. -type CreateSettings struct { +// Settings holds configuration for building Telemetry. +type Settings struct { BuildInfo component.BuildInfo AsyncErrorChannel chan error ZapOptions []zap.Option @@ -29,10 +29,10 @@ type Factory interface { CreateDefaultConfig() component.Config // CreateLogger creates a logger. - CreateLogger(ctx context.Context, set CreateSettings, cfg component.Config) (*zap.Logger, error) + CreateLogger(ctx context.Context, set Settings, cfg component.Config) (*zap.Logger, error) // CreateTracerProvider creates a TracerProvider. - CreateTracerProvider(ctx context.Context, set CreateSettings, cfg component.Config) (trace.TracerProvider, error) + CreateTracerProvider(ctx context.Context, set Settings, cfg component.Config) (trace.TracerProvider, error) // TODO: Add CreateMeterProvider. @@ -69,7 +69,7 @@ func (f *factory) CreateDefaultConfig() component.Config { } // CreateLoggerFunc is the equivalent of Factory.CreateLogger. -type CreateLoggerFunc func(context.Context, CreateSettings, component.Config) (*zap.Logger, error) +type CreateLoggerFunc func(context.Context, Settings, component.Config) (*zap.Logger, error) // WithLogger overrides the default no-op logger. func WithLogger(createLogger CreateLoggerFunc) FactoryOption { @@ -78,7 +78,7 @@ func WithLogger(createLogger CreateLoggerFunc) FactoryOption { }) } -func (f *factory) CreateLogger(ctx context.Context, set CreateSettings, cfg component.Config) (*zap.Logger, error) { +func (f *factory) CreateLogger(ctx context.Context, set Settings, cfg component.Config) (*zap.Logger, error) { if f.CreateLoggerFunc == nil { return zap.NewNop(), nil } @@ -86,7 +86,7 @@ func (f *factory) CreateLogger(ctx context.Context, set CreateSettings, cfg comp } // CreateTracerProviderFunc is the equivalent of Factory.CreateTracerProvider. -type CreateTracerProviderFunc func(context.Context, CreateSettings, component.Config) (trace.TracerProvider, error) +type CreateTracerProviderFunc func(context.Context, Settings, component.Config) (trace.TracerProvider, error) // WithTracerProvider overrides the default no-op tracer provider. func WithTracerProvider(createTracerProvider CreateTracerProviderFunc) FactoryOption { @@ -95,7 +95,7 @@ func WithTracerProvider(createTracerProvider CreateTracerProviderFunc) FactoryOp }) } -func (f *factory) CreateTracerProvider(ctx context.Context, set CreateSettings, cfg component.Config) (trace.TracerProvider, error) { +func (f *factory) CreateTracerProvider(ctx context.Context, set Settings, cfg component.Config) (trace.TracerProvider, error) { if f.CreateTracerProviderFunc == nil { return tracenoop.NewTracerProvider(), nil } diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go b/vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go index c9346c26d59..8ca29332924 100644 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go @@ -8,4 +8,4 @@ import ( ) // Settings holds configuration for building Telemetry. -type Settings = internal.CreateSettings +type Settings = internal.Settings diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index a199b36b4fa..ab091cf6ade 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -30,7 +30,7 @@ const ( type InterceptorFilter func(*InterceptorInfo) bool // Filter is a predicate used to determine whether a given request in -// should be instrumented by the attatched RPC tag info. +// should be instrumented by the attached RPC tag info. // A Filter must return true if the request should be instrumented. type Filter func(*stats.RPCTagInfo) bool diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index 7f19058e4c4..7d5ed058082 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -7,6 +7,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md import ( "context" + "errors" "io" "net" "strconv" @@ -136,7 +137,7 @@ func (w *clientStream) RecvMsg(m interface{}) error { if err == nil && !w.desc.ServerStreams { w.endSpan(nil) - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { w.endSpan(nil) } else if err != nil { w.endSpan(err) @@ -333,7 +334,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { elapsedTime := float64(time.Since(before)) / float64(time.Millisecond) metricAttrs = append(metricAttrs, grpcStatusCodeAttr) - cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) return resp, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index fad58733fec..201867a8694 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -151,7 +151,7 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.InPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesReceived, 1) - c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.ReceivedEvent { @@ -167,7 +167,7 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.OutPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesSent, 1) - c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.SentEvent { @@ -204,14 +204,17 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool span.End() metricAttrs = append(metricAttrs, rpcStatusAttr) + // Allocate vararg slice once. + recordOpts := []metric.RecordOption{metric.WithAttributeSet(attribute.NewSet(metricAttrs...))} // Use floating point division here for higher precision (instead of Millisecond method). + // Measure right before calling Record() to capture as much elapsed time as possible. elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond) - c.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) if gctx != nil { - c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...)) - c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...)) + c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), recordOpts...) + c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), recordOpts...) } default: return diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index 3f9cfda5413..a15d06cb0c4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,7 +5,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.52.0" + return "0.53.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index deea149645c..6aae83bfd20 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -12,7 +12,7 @@ import ( ) // DefaultClient is the default Client and is used by Get, Head, Post and PostForm. -// Please be careful of intitialization order - for example, if you change +// Please be careful of initialization order - for example, if you change // the global propagator, the DefaultClient might still be using the old one. var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index c1015a9eccf..f0a9bb9efeb 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -100,7 +100,7 @@ func WithPublicEndpoint() Option { }) } -// WithPublicEndpointFn runs with every request, and allows conditionnally +// WithPublicEndpointFn runs with every request, and allows conditionally // configuring the Handler to link the span with an incoming span context. If // this option is not provided or returns false, then the association is a // child association instead of a link. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index c64f8beca71..d01bdccf40d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" @@ -204,10 +205,15 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { return rww.WriteHeader }, + Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { + return rww.Flush + }, }) - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } next.ServeHTTP(w, r.WithContext(ctx)) @@ -225,9 +231,10 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http if rww.statusCode > 0 { attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) } - o := metric.WithAttributes(attributes...) - h.requestBytesCounter.Add(ctx, bw.read.Load(), o) - h.responseBytesCounter.Add(ctx, rww.written, o) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) + h.responseBytesCounter.Add(ctx, rww.written, addOpts...) // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 9be3feef29e..3ec0ad00c81 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -6,6 +6,8 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/ import ( "fmt" "net/http" + "os" + "strings" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" @@ -19,40 +21,51 @@ type ResponseTelemetry struct { WriteError error } -type HTTPServer interface { - // RequestTraceAttrs returns trace attributes for an HTTP request received by a - // server. - // - // The server must be the primary server name if it is known. For example this - // would be the ServerName directive - // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache - // server, and the server_name directive - // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an - // nginx server. More generically, the primary server name would be the host - // header value that matches the default virtual host of an HTTP server. It - // should include the host identifier and if a port is used to route to the - // server that port identifier should be included as an appropriate port - // suffix. - // - // If the primary server name is not known, server should be an empty string. - // The req Host will be used to determine the server instead. - RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue +type HTTPServer struct { + duplicate bool +} - // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. - // - // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. - ResponseTraceAttrs(ResponseTelemetry) []attribute.KeyValue +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + } + return oldHTTPServer{}.RequestTraceAttrs(server, req) +} - // Route returns the attribute for the route. - Route(string) attribute.KeyValue +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + } + return oldHTTPServer{}.ResponseTraceAttrs(resp) } -// var warnOnce = sync.Once{} +// Route returns the attribute for the route. +func (s HTTPServer) Route(route string) attribute.KeyValue { + return oldHTTPServer{}.Route(route) +} func NewHTTPServer() HTTPServer { - // TODO (#5331): Detect version based on environment variable OTEL_HTTP_CLIENT_COMPATIBILITY_MODE. - // TODO (#5331): Add warning of use of a deprecated version of Semantic Versions. - return oldHTTPServer{} + env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE")) + return HTTPServer{duplicate: env == "http/dup"} } // ServerStatus returns a span status code and message for an HTTP status code diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index c92076bc3d9..e7f293761bd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -5,8 +5,12 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/ import ( "net" + "net/http" "strconv" "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" ) // splitHostPort splits a network address hostport of the form "host", @@ -47,3 +51,41 @@ func splitHostPort(hostport string) (host string, port int) { } return host, int(p) } + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} + +var methodLookup = map[string]attribute.KeyValue{ + http.MethodConnect: semconvNew.HTTPRequestMethodConnect, + http.MethodDelete: semconvNew.HTTPRequestMethodDelete, + http.MethodGet: semconvNew.HTTPRequestMethodGet, + http.MethodHead: semconvNew.HTTPRequestMethodHead, + http.MethodOptions: semconvNew.HTTPRequestMethodOptions, + http.MethodPatch: semconvNew.HTTPRequestMethodPatch, + http.MethodPost: semconvNew.HTTPRequestMethodPost, + http.MethodPut: semconvNew.HTTPRequestMethodPut, + http.MethodTrace: semconvNew.HTTPRequestMethodTrace, +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index d753083b7b4..c3e838aaa54 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -4,6 +4,7 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "errors" "io" "net/http" @@ -14,8 +15,6 @@ import ( type oldHTTPServer struct{} -var _ HTTPServer = oldHTTPServer{} - // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. // @@ -45,7 +44,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke if resp.ReadBytes > 0 { attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) } - if resp.ReadError != nil && resp.ReadError != io.EOF { + if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { // This is not in the semantic conventions, but is historically provided attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) } @@ -55,7 +54,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke if resp.StatusCode > 0 { attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) } - if resp.WriteError != nil && resp.WriteError != io.EOF { + if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { // This is not in the semantic conventions, but is historically provided attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go new file mode 100644 index 00000000000..0c5d4c4608a --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go @@ -0,0 +1,197 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" +) + +type newHTTPServer struct{} + +// TraceRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconvNew.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconvNew.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) + } + } + + if useragent := req.UserAgent(); useragent != "" { + attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconvNew.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + +// TraceResponse returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n newHTTPServer) Route(route string) attribute.KeyValue { + return semconvNew.HTTPRoute(route) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index d5c0093fc47..a9a9226b39a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -92,7 +92,7 @@ func (c *netConv) Host(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { - attrs = append(attrs, c.HostPort(int(p))) + attrs = append(attrs, c.HostPort(p)) } return attrs } @@ -138,7 +138,7 @@ func (c *netConv) Peer(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { - attrs = append(attrs, c.PeerPort(int(p))) + attrs = append(attrs, c.PeerPort(p)) } return attrs } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go index 1548b2db636..ea504e396f1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -37,8 +37,12 @@ type labelerContextKeyType int const lablelerContextKey labelerContextKeyType = 0 -func injectLabeler(ctx context.Context, l *Labeler) context.Context { - return context.WithValue(ctx, lablelerContextKey, l) +// ContextWithLabeler returns a new context with the provided Labeler instance. +// Attributes added to the specified labeler will be injected into metrics +// emitted by the instrumentation. Only one labeller can be injected into the +// context. Injecting it multiple times will override the previous calls. +func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { + return context.WithValue(parent, lablelerContextKey, l) } // LabelerFromContext retrieves a Labeler instance from the provided context if diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 8a25e586574..0d3cb2e4aa4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -11,15 +11,14 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" ) // Transport implements the http.RoundTripper interface and wraps @@ -137,8 +136,10 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) } - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. @@ -170,11 +171,12 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { if res.StatusCode > 0 { metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) } - o := metric.WithAttributes(metricAttrs...) - t.requestBytesCounter.Add(ctx, bw.read.Load(), o) + o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, o) + t.responseBytesCounter.Add(ctx, n, addOpts...) } // traces diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 22e485dd7d3..b0957f28cea 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.52.0" + return "0.53.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go index 2f4cc124dc6..948f8406c09 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -87,3 +87,13 @@ func (w *respWriterWrapper) WriteHeader(statusCode int) { } w.ResponseWriter.WriteHeader(statusCode) } + +func (w *respWriterWrapper) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 120b63a9c7d..6bf3abc41e7 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -5,3 +5,5 @@ collison consequentially ans nam +valu +thirdparty diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc index 4afbb1fb3bd..e2cb3ea944b 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellrc +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -5,6 +5,6 @@ check-filenames = check-hidden = ignore-words = .codespellignore interactive = 1 -skip = .git,go.mod,go.sum,semconv,venv,.tools +skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools uri-ignore-words-list = * write = diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules deleted file mode 100644 index 38a1f56982b..00000000000 --- a/vendor/go.opentelemetry.io/otel/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "opentelemetry-proto"] - path = exporters/otlp/internal/opentelemetry-proto - url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index a62511f382e..6d9c8b64958 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -11,6 +11,7 @@ linters: enable: - depguard - errcheck + - errorlint - godot - gofumpt - goimports @@ -21,8 +22,11 @@ linters: - misspell - revive - staticcheck + - tenv - typecheck + - unconvert - unused + - unparam issues: # Maximum issues count per one linter. @@ -124,6 +128,8 @@ linters-settings: - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" + - "**/log/*.go" + - "**/log/**/*.go" deny: - pkg: "go.opentelemetry.io/otel/internal$" desc: Do not use cross-module internal packages. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index e5946bfb25c..c01e6998e0b 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,48 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.28.0/0.50.0/0.4.0] 2024-07-02 + +### Added + +- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. + This method is used to check if an `Instrument` instance is a zero-value. (#5431) +- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) +- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. + The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) +- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) +- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) + +### Changed + +- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) + - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) +- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) +- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) +- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) +- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) + +### Fixed + +- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) +- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) +- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) +- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) +- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) +- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) +- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) +- Fix stale timestamps reported by the last-value aggregation. (#5517) +- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) +- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) + ## [1.27.0/0.49.0/0.3.0] 2024-05-21 ### Added @@ -250,7 +292,7 @@ See our [versioning policy](VERSIONING.md) for more information about these stab ## [1.20.0/0.43.0] 2023-11-10 -This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. ### Added @@ -282,15 +324,15 @@ This release brings a breaking change for custom trace API implementations. Some - `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) - The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. This extends the `Tracer` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. This extends the `Span` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) @@ -926,7 +968,7 @@ The next release will require at least [Go 1.19]. - Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) - `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) - Re-enabled Attribute Filters in the Metric SDK. (#3396) -- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) - Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) - Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) - Prevent duplicate Prometheus description, unit, and type. (#3469) @@ -1971,7 +2013,7 @@ with major version 0. - `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) - Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) - Unify endpoint API that related to OTel exporter. (#1401) -- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) - Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) - Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) - `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) @@ -2961,7 +3003,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.27.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD +[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 [1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 88f4c7d0e09..20255493323 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 2176ce5261b..b86572f58ea 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -628,16 +628,15 @@ should be canceled. ### Approvers -- [Evan Torrie](https://github.com/evantorrie), Verizon Media -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Damien Mathieu](https://github.com/dmathieu), Elastic ### Maintainers -- [David Ashpole](https://github.com/dashpole), Google - [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Tyler Yahn](https://github.com/MrAlias), Splunk ### Emeritus @@ -646,6 +645,7 @@ should be canceled. - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep - [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Evan Torrie](https://github.com/evantorrie), Yahoo ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index a9845a88f63..f33619f76a2 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -14,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes test-default -ci: generate license-check lint vanity-import-check verify-readmes build test-default check-clean-work-tree test-coverage +precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -264,10 +264,7 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" @@ -280,16 +277,20 @@ gorelease/%:| $(GORELEASE) && $(GORELEASE) \ || echo "" +.PHONY: verify-mods +verify-mods: $(MULTIMOD) + $(MULTIMOD) verify + .PHONY: prerelease -prerelease: $(MULTIMOD) +prerelease: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + $(MULTIMOD) prerelease -m ${MODSET} COMMIT ?= "HEAD" .PHONY: add-tags -add-tags: $(MULTIMOD) +add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} .PHONY: lint-markdown lint-markdown: diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index f98c54a3cba..c40c896cc66 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -302,7 +302,7 @@ func parseMember(member string) (Member, error) { // Decode a percent-encoded value. value, err := url.PathUnescape(val) if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err) + return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } return Member{key: key, value: value, properties: props, hasData: true}, nil } @@ -735,7 +735,7 @@ func validateKey(s string) bool { } func validateKeyChar(c int32) bool { - return c >= 0 && c <= int32(utf8.RuneSelf) && safeKeyCharset[c] + return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } func validateValue(s string) bool { @@ -850,7 +850,7 @@ var safeValueCharset = [utf8.RuneSelf]bool{ } func validateValueChar(c int32) bool { - return c >= 0 && c <= int32(utf8.RuneSelf) && safeValueCharset[c] + return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] } // valueEscape escapes the string so it can be safely placed inside a baggage value, diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go index 4f2113ae2cf..1c5450ab62d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -112,7 +112,7 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { - return fmt.Errorf("%w: %s", ctxErr, err) + return fmt.Errorf("%w: %w", ctxErr, err) } } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go index bbad0e6d01e..00ab1f20c6d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -59,8 +59,9 @@ func WithInsecure() Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpointURL are used, the last used option will // take precedence. @@ -79,8 +80,9 @@ func WithEndpoint(endpoint string) Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpoint are used, the last used option will // take precedence. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go index 1c487a09630..1e59ff23932 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go @@ -316,7 +316,10 @@ func evaluate(err error) (bool, time.Duration) { return false, 0 } - rErr, ok := err.(retryableError) + // Do not use errors.As here, this should only be flattened one layer. If + // there are several chained errors, all the errors above it will be + // discarded if errors.As is used instead. + rErr, ok := err.(retryableError) //nolint:errorlint if !ok { return false, 0 } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go index e3e6477832e..86c4819f449 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go @@ -112,7 +112,7 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { - return fmt.Errorf("%w: %s", ctxErr, err) + return fmt.Errorf("%w: %w", ctxErr, err) } } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index fc7190d940a..14ad8c33b48 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.27.0" + return "1.28.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go index 1a8e28542e7..d2e387e607c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go @@ -192,7 +192,7 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { if !c.disableScopeInfo { scopeInfo, err := c.scopeInfo(scopeMetrics.Scope) - if err == errScopeInvalid { + if errors.Is(err, errScopeInvalid) { // Do not report the same error multiple times. continue } diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/config.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/config.go index 3f60e3ef089..0ba3424e295 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/config.go @@ -29,7 +29,7 @@ type config struct { } // newConfig creates a validated Config configured with options. -func newConfig(options ...Option) (config, error) { +func newConfig(options ...Option) config { cfg := config{ Writer: defaultWriter, PrettyPrint: defaultPrettyPrint, @@ -38,7 +38,7 @@ func newConfig(options ...Option) (config, error) { for _, opt := range options { cfg = opt.apply(cfg) } - return cfg, nil + return cfg } // Option sets the value of an option for a Config. diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go index b19eb0dfbed..bdb915ba803 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go @@ -19,10 +19,7 @@ var _ trace.SpanExporter = &Exporter{} // New creates an Exporter with the passed options. func New(options ...Option) (*Exporter, error) { - cfg, err := newConfig(options...) - if err != nil { - return nil, err - } + cfg := newConfig(options...) enc := json.NewEncoder(cfg.Writer) if cfg.PrettyPrint { diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index f32766e57f6..822d8479474 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -14,33 +14,33 @@ import ( // BoolSliceValue converts a bool slice into an array with same elements as slice. func BoolSliceValue(v []bool) interface{} { var zero bool - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Int64SliceValue converts an int64 slice into an array with same elements as slice. func Int64SliceValue(v []int64) interface{} { var zero int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Float64SliceValue converts a float64 slice into an array with same elements as slice. func Float64SliceValue(v []float64) interface{} { var zero float64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // StringSliceValue converts a string slice into an array with same elements as slice. func StringSliceValue(v []string) interface{} { var zero string - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // AsBoolSlice converts a bool array into a slice into with same elements as array. diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index 590fa7385f3..cfd1df9bfa2 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -65,6 +65,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.meters == nil { diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 596f716f40c..e31f442b48f 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -86,6 +86,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.tracers == nil { @@ -101,10 +102,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct { - name string - version string -} +type il struct{ name, version, schema string } // tracer is a placeholder for a trace.Tracer. // diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index 075234b3380..f153745b005 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -57,6 +57,23 @@ asynchronous measurement, a Gauge ([Int64ObservableGauge] and See the [OpenTelemetry documentation] for more information about instruments and their intended use. +# Instrument Name + +OpenTelemetry defines an [instrument name syntax] that restricts what +instrument names are allowed. + +Instrument names should ... + + - Not be empty. + - Have an alphabetic character as their first letter. + - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, + ‘-’, or ‘/’. + - Have a maximum length of 255 letters. + +To ensure compatibility with observability platforms, all instruments created +need to conform to this syntax. Not all implementations of the API will validate +these names, it is the callers responsibility to ensure compliance. + # Measurements Measurements are made by recording values and information about the values with @@ -153,6 +170,7 @@ It is strongly recommended that authors only embed That implementation is the only one OpenTelemetry authors can guarantee will fully implement all the API interfaces when a user updates their API. +[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax [OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ [GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider */ diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 460b3f9b08b..6a7991e0151 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -47,20 +47,36 @@ type Meter interface { // Int64Counter returns a new Int64Counter instrument identified by name // and configured with options. The instrument is used to synchronously // record increasing int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) // Int64Gauge returns a new Int64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to @@ -71,6 +87,10 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The @@ -81,6 +101,10 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -91,26 +115,46 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) // Float64Counter returns a new Float64Counter instrument identified by // name and configured with options. The instrument is used to // synchronously record increasing float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) // Float64Gauge returns a new Float64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous float64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The @@ -121,6 +165,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and @@ -131,6 +179,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -141,6 +193,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) // RegisterCallback registers f to be called during the collection of a diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index e0a43e13840..ab09daf9d53 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.6 +codespell==2.3.0 diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go deleted file mode 100644 index 1fc19d3fe3f..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/sdk/internal" - -//go:generate gotmpl --body=../../internal/shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go -//go:generate gotmpl --body=../../internal/shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go -//go:generate gotmpl --body=../../internal/shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go - -//go:generate gotmpl --body=../../internal/shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go -//go:generate gotmpl --body=../../internal/shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go -//go:generate gotmpl --body=../../internal/shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go -//go:generate gotmpl --body=../../internal/shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go -//go:generate gotmpl --body=../../internal/shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/sdk/internal/matchers\"}" --out=internaltest/harness.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go deleted file mode 100644 index a990092f9d1..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/sdk/internal" - -import "time" - -// MonotonicEndTime returns the end time at present -// but offset from start, monotonically. -// -// The monotonic clock is used in subtractions hence -// the duration since start added back to start gives -// end as a monotonic time. -// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks -func MonotonicEndTime(start time.Time) time.Time { - return start.Add(time.Since(start)) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md new file mode 100644 index 00000000000..fab61647c2d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md @@ -0,0 +1,46 @@ +# Experimental Features + +The SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Resource](#resource) + +### Resource + +[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental. +To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable. +The value set must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + + + +[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/ +[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector + +#### Examples + +Enable experimental resource semantic conventions. + +```console +export OTEL_GO_X_RESOURCE=true +``` + +Disable experimental resource semantic conventions. + +```console +unset OTEL_GO_X_RESOURCE +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go new file mode 100644 index 00000000000..68d296cbed3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for OTel SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import ( + "os" + "strings" +) + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature("RESOURCE", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go index 9a41f94e979..bbe7bf671fd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -122,7 +122,7 @@ func WithReader(r Reader) Option { }) } -// WithView associates views a MeterProvider. +// WithView associates views with a MeterProvider. // // Views are appended to existing ones in a MeterProvider if this option is // used multiple times. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go index c774a4684f2..82619da78ec 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go @@ -19,67 +19,63 @@ import ( // Note: This will only return non-nil values when the experimental exemplar // feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable // is not set to always_off. -func reservoirFunc(agg Aggregation) func() exemplar.Reservoir { +func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredReservoir[N] { if !x.Exemplars.Enabled() { return nil } - - // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults - resF := func() func() exemplar.Reservoir { - // Explicit bucket histogram aggregation with more than 1 bucket will - // use AlignedHistogramBucketExemplarReservoir. - a, ok := agg.(AggregationExplicitBucketHistogram) - if ok && len(a.Boundaries) > 0 { - cp := slices.Clone(a.Boundaries) - return func() exemplar.Reservoir { - bounds := cp - return exemplar.Histogram(bounds) - } - } - - var n int - if a, ok := agg.(AggregationBase2ExponentialHistogram); ok { - // Base2 Exponential Histogram Aggregation SHOULD use a - // SimpleFixedSizeExemplarReservoir with a reservoir equal to the - // smaller of the maximum number of buckets configured on the - // aggregation or twenty (e.g. min(20, max_buckets)). - n = int(a.MaxSize) - if n > 20 { - n = 20 - } - } else { - // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir - // This Exemplar reservoir MAY take a configuration parameter for - // the size of the reservoir. If no size configuration is - // provided, the default size MAY be the number of possible - // concurrent threads (e.g. number of CPUs) to help reduce - // contention. Otherwise, a default size of 1 SHOULD be used. - n = runtime.NumCPU() - if n < 1 { - // Should never be the case, but be defensive. - n = 1 - } - } - - return func() exemplar.Reservoir { - return exemplar.FixedSize(n) - } - } - // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" + var filter exemplar.Filter + switch os.Getenv(filterEnvKey) { case "always_on": - return resF() + filter = exemplar.AlwaysOnFilter case "always_off": return exemplar.Drop case "trace_based": fallthrough default: - newR := resF() - return func() exemplar.Reservoir { - return exemplar.SampledFilter(newR()) + filter = exemplar.SampledFilter + } + + // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults + // Explicit bucket histogram aggregation with more than 1 bucket will + // use AlignedHistogramBucketExemplarReservoir. + a, ok := agg.(AggregationExplicitBucketHistogram) + if ok && len(a.Boundaries) > 0 { + cp := slices.Clone(a.Boundaries) + return func() exemplar.FilteredReservoir[N] { + bounds := cp + return exemplar.NewFilteredReservoir[N](filter, exemplar.Histogram(bounds)) } } + + var n int + if a, ok := agg.(AggregationBase2ExponentialHistogram); ok { + // Base2 Exponential Histogram Aggregation SHOULD use a + // SimpleFixedSizeExemplarReservoir with a reservoir equal to the + // smaller of the maximum number of buckets configured on the + // aggregation or twenty (e.g. min(20, max_buckets)). + n = int(a.MaxSize) + if n > 20 { + n = 20 + } + } else { + // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir + // This Exemplar reservoir MAY take a configuration parameter for + // the size of the reservoir. If no size configuration is + // provided, the default size MAY be the number of possible + // concurrent threads (e.g. number of CPUs) to help reduce + // contention. Otherwise, a default size of 1 SHOULD be used. + n = runtime.NumCPU() + if n < 1 { + // Should never be the case, but be defensive. + n = 1 + } + } + + return func() exemplar.FilteredReservoir[N] { + return exemplar.NewFilteredReservoir[N](filter, exemplar.FixedSize(n)) + } } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go index f9768fd11cc..b52a330b3bc 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go @@ -18,10 +18,7 @@ import ( "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" ) -var ( - zeroInstrumentKind InstrumentKind - zeroScope instrumentation.Scope -) +var zeroScope instrumentation.Scope // InstrumentKind is the identifier of a group of instruments that all // performing the same function. @@ -77,11 +74,11 @@ type Instrument struct { nonComparable // nolint: unused } -// empty returns if all fields of i are their zero-value. -func (i Instrument) empty() bool { +// IsEmpty returns if all Instrument fields are their zero-value. +func (i Instrument) IsEmpty() bool { return i.Name == "" && i.Description == "" && - i.Kind == zeroInstrumentKind && + i.Kind == instrumentKindUndefined && i.Unit == "" && i.Scope == zeroScope } @@ -112,7 +109,7 @@ func (i Instrument) matchesDescription(other Instrument) bool { // matchesKind returns true if the Kind of i is its zero-value or it equals the // Kind of other, otherwise false. func (i Instrument) matchesKind(other Instrument) bool { - return i.Kind == zeroInstrumentKind || i.Kind == other.Kind + return i.Kind == instrumentKindUndefined || i.Kind == other.Kind } // matchesUnit returns true if the Unit of i is its zero-value or it equals the diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go index c9976de6c78..b18ee719bd1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -39,7 +39,7 @@ type Builder[N int64 | float64] struct { // // If this is not provided a default factory function that returns an // exemplar.Drop reservoir will be used. - ReservoirFunc func() exemplar.Reservoir + ReservoirFunc func() exemplar.FilteredReservoir[N] // AggregationLimit is the cardinality limit of measurement attributes. Any // measurement for new attributes once the limit has been reached will be // aggregated into a single aggregate for the "otel.metric.overflow" @@ -50,7 +50,7 @@ type Builder[N int64 | float64] struct { AggregationLimit int } -func (b Builder[N]) resFunc() func() exemplar.Reservoir { +func (b Builder[N]) resFunc() func() exemplar.FilteredReservoir[N] { if b.ReservoirFunc != nil { return b.ReservoirFunc } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index 902074b5bfd..c9c7e8f62a9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -31,7 +31,7 @@ const ( // expoHistogramDataPoint is a single data point in an exponential histogram. type expoHistogramDataPoint[N int64 | float64] struct { attrs attribute.Set - res exemplar.Reservoir + res exemplar.FilteredReservoir[N] count uint64 min N @@ -282,7 +282,7 @@ func (b *expoBuckets) downscale(delta int) { // newExponentialHistogram returns an Aggregator that summarizes a set of // measurements as an exponential histogram. Each histogram is scoped by attributes // and the aggregation cycle the measurements were made in. -func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir) *expoHistogram[N] { +func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *expoHistogram[N] { return &expoHistogram[N]{ noSum: noSum, noMinMax: noMinMax, @@ -305,7 +305,7 @@ type expoHistogram[N int64 | float64] struct { maxSize int maxScale int - newRes func() exemplar.Reservoir + newRes func() exemplar.FilteredReservoir[N] limit limiter[*expoHistogramDataPoint[N]] values map[attribute.Distinct]*expoHistogramDataPoint[N] valuesMu sync.Mutex @@ -319,8 +319,6 @@ func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attrib return } - t := now() - e.valuesMu.Lock() defer e.valuesMu.Unlock() @@ -333,7 +331,7 @@ func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attrib e.values[attr.Equivalent()] = v } v.record(value) - v.res.Offer(ctx, t, exemplar.NewValue(value), droppedAttr) + v.res.Offer(ctx, value, droppedAttr) } func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index 213baf50f53..ade0941f5f5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -17,7 +17,7 @@ import ( type buckets[N int64 | float64] struct { attrs attribute.Set - res exemplar.Reservoir + res exemplar.FilteredReservoir[N] counts []uint64 count uint64 @@ -48,13 +48,13 @@ type histValues[N int64 | float64] struct { noSum bool bounds []float64 - newRes func() exemplar.Reservoir + newRes func() exemplar.FilteredReservoir[N] limit limiter[*buckets[N]] values map[attribute.Distinct]*buckets[N] valuesMu sync.Mutex } -func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.Reservoir) *histValues[N] { +func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histValues[N] { // The responsibility of keeping all buckets correctly associated with the // passed boundaries is ultimately this type's responsibility. Make a copy // here so we can always guarantee this. Or, in the case of failure, have @@ -80,8 +80,6 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute // (s.bounds[len(s.bounds)-1], +∞). idx := sort.SearchFloat64s(s.bounds, float64(value)) - t := now() - s.valuesMu.Lock() defer s.valuesMu.Unlock() @@ -106,12 +104,12 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute if !s.noSum { b.sum(value) } - b.res.Offer(ctx, t, exemplar.NewValue(value), droppedAttr) + b.res.Offer(ctx, value, droppedAttr) } // newHistogram returns an Aggregator that summarizes a set of measurements as // an histogram. -func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir) *histogram[N] { +func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histogram[N] { return &histogram[N]{ histValues: newHistValues[N](boundaries, noSum, limit, r), noMinMax: noMinMax, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index 8f406dd2bcb..c359368403e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -15,13 +15,12 @@ import ( // datapoint is timestamped measurement data. type datapoint[N int64 | float64] struct { - attrs attribute.Set - timestamp time.Time - value N - res exemplar.Reservoir + attrs attribute.Set + value N + res exemplar.FilteredReservoir[N] } -func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir) *lastValue[N] { +func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *lastValue[N] { return &lastValue[N]{ newRes: r, limit: newLimiter[datapoint[N]](limit), @@ -34,15 +33,13 @@ func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir) *la type lastValue[N int64 | float64] struct { sync.Mutex - newRes func() exemplar.Reservoir + newRes func() exemplar.FilteredReservoir[N] limit limiter[datapoint[N]] values map[attribute.Distinct]datapoint[N] start time.Time } func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { - t := now() - s.Lock() defer s.Unlock() @@ -53,14 +50,14 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute. } d.attrs = attr - d.timestamp = t d.value = value - d.res.Offer(ctx, t, exemplar.NewValue(value), droppedAttr) + d.res.Offer(ctx, value, droppedAttr) s.values[attr.Equivalent()] = d } func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { + t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). gData, _ := (*dest).(metricdata.Gauge[N]) @@ -68,11 +65,11 @@ func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { s.Lock() defer s.Unlock() - n := s.copyDpts(&gData.DataPoints) + n := s.copyDpts(&gData.DataPoints, t) // Do not report stale values. clear(s.values) // Update start time for delta temporality. - s.start = now() + s.start = t *dest = gData @@ -80,6 +77,7 @@ func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { } func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). gData, _ := (*dest).(metricdata.Gauge[N]) @@ -87,7 +85,7 @@ func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int { s.Lock() defer s.Unlock() - n := s.copyDpts(&gData.DataPoints) + n := s.copyDpts(&gData.DataPoints, t) // TODO (#3006): This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute // sets that become "stale" need to be forgotten so this will not @@ -99,7 +97,7 @@ func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int { // copyDpts copies the datapoints held by s into dest. The number of datapoints // copied is returned. -func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N]) int { +func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) int { n := len(s.values) *dest = reset(*dest, n, n) @@ -107,7 +105,7 @@ func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N]) int { for _, v := range s.values { (*dest)[i].Attributes = v.attrs (*dest)[i].StartTime = s.start - (*dest)[i].Time = v.timestamp + (*dest)[i].Time = t (*dest)[i].Value = v.value collectExemplars(&(*dest)[i].Exemplars, v.res.Collect) i++ @@ -117,7 +115,7 @@ func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N]) int { // newPrecomputedLastValue returns an aggregator that summarizes a set of // observations as the last one made. -func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir) *precomputedLastValue[N] { +func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *precomputedLastValue[N] { return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} } @@ -127,6 +125,7 @@ type precomputedLastValue[N int64 | float64] struct { } func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { + t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). gData, _ := (*dest).(metricdata.Gauge[N]) @@ -134,11 +133,11 @@ func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { s.Lock() defer s.Unlock() - n := s.copyDpts(&gData.DataPoints) + n := s.copyDpts(&gData.DataPoints, t) // Do not report stale values. clear(s.values) // Update start time for delta temporality. - s.start = now() + s.start = t *dest = gData @@ -146,6 +145,7 @@ func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { } func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). gData, _ := (*dest).(metricdata.Gauge[N]) @@ -153,7 +153,7 @@ func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int { s.Lock() defer s.Unlock() - n := s.copyDpts(&gData.DataPoints) + n := s.copyDpts(&gData.DataPoints, t) // Do not report stale values. clear(s.values) *dest = gData diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index babe76aba9b..89136692260 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -15,19 +15,19 @@ import ( type sumValue[N int64 | float64] struct { n N - res exemplar.Reservoir + res exemplar.FilteredReservoir[N] attrs attribute.Set } // valueMap is the storage for sums. type valueMap[N int64 | float64] struct { sync.Mutex - newRes func() exemplar.Reservoir + newRes func() exemplar.FilteredReservoir[N] limit limiter[sumValue[N]] values map[attribute.Distinct]sumValue[N] } -func newValueMap[N int64 | float64](limit int, r func() exemplar.Reservoir) *valueMap[N] { +func newValueMap[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *valueMap[N] { return &valueMap[N]{ newRes: r, limit: newLimiter[sumValue[N]](limit), @@ -36,8 +36,6 @@ func newValueMap[N int64 | float64](limit int, r func() exemplar.Reservoir) *val } func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { - t := now() - s.Lock() defer s.Unlock() @@ -49,7 +47,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S v.attrs = attr v.n += value - v.res.Offer(ctx, t, exemplar.NewValue(value), droppedAttr) + v.res.Offer(ctx, value, droppedAttr) s.values[attr.Equivalent()] = v } @@ -57,7 +55,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S // newSum returns an aggregator that summarizes a set of measurements as their // arithmetic sum. Each sum is scoped by attributes and the aggregation cycle // the measurements were made in. -func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir) *sum[N] { +func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *sum[N] { return &sum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, @@ -146,7 +144,7 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { // newPrecomputedSum returns an aggregator that summarizes a set of // observatrions as their arithmetic sum. Each sum is scoped by attributes and // the aggregation cycle the measurements were made in. -func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir) *precomputedSum[N] { +func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *precomputedSum[N] { return &precomputedSum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go index bf21e45dfaf..5a0f39ae147 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go @@ -5,20 +5,19 @@ package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exempla import ( "context" - "time" "go.opentelemetry.io/otel/attribute" ) -// Drop returns a [Reservoir] that drops all measurements it is offered. -func Drop() Reservoir { return &dropRes{} } +// Drop returns a [FilteredReservoir] that drops all measurements it is offered. +func Drop[N int64 | float64]() FilteredReservoir[N] { return &dropRes[N]{} } -type dropRes struct{} +type dropRes[N int64 | float64] struct{} // Offer does nothing, all measurements offered will be dropped. -func (r *dropRes) Offer(context.Context, time.Time, Value, []attribute.KeyValue) {} +func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} // Collect resets dest. No exemplars will ever be returned. -func (r *dropRes) Collect(dest *[]Exemplar) { +func (r *dropRes[N]) Collect(dest *[]Exemplar) { *dest = (*dest)[:0] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go index d96aacc281a..152a069a09e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go @@ -5,25 +5,25 @@ package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exempla import ( "context" - "time" - "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) -// SampledFilter returns a [Reservoir] wrapping r that will only offer measurements -// to r if the passed context associated with the measurement contains a sampled -// [go.opentelemetry.io/otel/trace.SpanContext]. -func SampledFilter(r Reservoir) Reservoir { - return filtered{Reservoir: r} -} +// Filter determines if a measurement should be offered. +// +// The passed ctx needs to contain any baggage or span that were active +// when the measurement was made. This information may be used by the +// Reservoir in making a sampling decision. +type Filter func(context.Context) bool -type filtered struct { - Reservoir +// SampledFilter is a [Filter] that will only offer measurements +// if the passed context associated with the measurement contains a sampled +// [go.opentelemetry.io/otel/trace.SpanContext]. +func SampledFilter(ctx context.Context) bool { + return trace.SpanContextFromContext(ctx).IsSampled() } -func (f filtered) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) { - if trace.SpanContextFromContext(ctx).IsSampled() { - f.Reservoir.Offer(ctx, t, n, a) - } +// AlwaysOnFilter is a [Filter] that always offers measurements. +func AlwaysOnFilter(ctx context.Context) bool { + return true } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go new file mode 100644 index 00000000000..9fedfa4be68 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// FilteredReservoir wraps a [Reservoir] with a filter. +type FilteredReservoir[N int64 | float64] interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the filter decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + Offer(ctx context.Context, val N, attr []attribute.KeyValue) + // Collect returns all the held exemplars in the reservoir. + Collect(dest *[]Exemplar) +} + +// filteredReservoir handles the pre-sampled exemplar of measurements made. +type filteredReservoir[N int64 | float64] struct { + filter Filter + reservoir Reservoir +} + +// NewFilteredReservoir creates a [FilteredReservoir] which only offers values +// that are allowed by the filter. +func NewFilteredReservoir[N int64 | float64](f Filter, r Reservoir) FilteredReservoir[N] { + return &filteredReservoir[N]{ + filter: f, + reservoir: r, + } +} + +func (f *filteredReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { + if f.filter(ctx) { + // only record the current time if we are sampling this measurment. + f.reservoir.Offer(ctx, time.Now(), NewValue(val), attr) + } +} + +func (f *filteredReservoir[N]) Collect(dest *[]Exemplar) { f.reservoir.Collect(dest) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go index 6753e116646..199a2608f71 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go @@ -7,16 +7,21 @@ import ( "context" "math" "math/rand" + "sync" "time" "go.opentelemetry.io/otel/attribute" ) -// rng is used to make sampling decisions. -// -// Do not use crypto/rand. There is no reason for the decrease in performance -// given this is not a security sensitive decision. -var rng = rand.New(rand.NewSource(time.Now().UnixNano())) +var ( + // rng is used to make sampling decisions. + // + // Do not use crypto/rand. There is no reason for the decrease in performance + // given this is not a security sensitive decision. + rng = rand.New(rand.NewSource(time.Now().UnixNano())) + // Ensure concurrent safe accecess to rng and its underlying source. + rngMu sync.Mutex +) // random returns, as a float64, a uniform pseudo-random number in the open // interval (0.0,1.0). @@ -38,6 +43,9 @@ func random() float64 { // // There are likely many other methods to explore here as well. + rngMu.Lock() + defer rngMu.Unlock() + f := rng.Float64() for f == 0 { f = rng.Float64() diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index 9cdd9384c51..67ee1b11a2e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -334,7 +334,7 @@ func (r *PeriodicReader) Shutdown(ctx context.Context) error { } sErr := r.exporter.Shutdown(ctx) - if err == nil || err == ErrReaderShutdown { + if err == nil || errors.Is(err, ErrReaderShutdown) { err = sErr } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index c6f9597198c..823bf2fe3d2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -349,7 +349,7 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum cv := i.aggregators.Lookup(normID, func() aggVal[N] { b := aggregate.Builder[N]{ Temporality: i.pipeline.reader.temporality(kind), - ReservoirFunc: reservoirFunc(stream.Aggregation), + ReservoirFunc: reservoirFunc[N](stream.Aggregation), } b.Filter = stream.AttributeFilter // A value less than or equal to zero will disable the aggregation diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index 43f85cfbcfc..dade0a19a2b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.27.0" + return "1.28.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go index 11e334319d0..cd08c673248 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go @@ -43,7 +43,7 @@ type View func(Instrument) (Stream, bool) // of the default. If you need to zero out an Stream field returned from a // View, create a View directly. func NewView(criteria Instrument, mask Stream) View { - if criteria.empty() { + if criteria.IsEmpty() { global.Error( errEmptyView, "dropping view", "mask", mask, diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 50d2df5eb4b..6ac1cdbf7b4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -9,9 +9,11 @@ import ( "os" "path/filepath" + "github.com/google/uuid" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type ( @@ -36,6 +38,8 @@ type ( } defaultServiceNameDetector struct{} + + defaultServiceInstanceIDDetector struct{} ) var ( @@ -43,6 +47,7 @@ var ( _ Detector = host{} _ Detector = stringDetector{} _ Detector = defaultServiceNameDetector{} + _ Detector = defaultServiceInstanceIDDetector{} ) // Detect returns a *Resource that describes the OpenTelemetry SDK used. @@ -95,3 +100,19 @@ func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) }, ).Detect(ctx) } + +// Detect implements Detector. +func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceInstanceIDKey, + func() (string, error) { + version4Uuid, err := uuid.NewRandom() + if err != nil { + return "", err + } + + return version4Uuid.String(), nil + }, + ).Detect(ctx) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index 7525ee75f07..5ecd859a52d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type containerIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 0d5a355ab94..813f0562424 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 3c1aa6285b9..2d0f65498a0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type hostIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index ff78020fa10..8a48ab4fa32 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type osDescriptionProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index e4e1df8c984..085fe68fd77 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index 9f1af3a236d..ad4b50df404 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" ) // Resource describes an entity about which identifying information @@ -218,11 +219,17 @@ func Empty() *Resource { func Default() *Resource { defaultResourceOnce.Do(func() { var err error - defaultResource, err = Detect( - context.Background(), + defaultDetectors := []Detector{ defaultServiceNameDetector{}, fromEnv{}, telemetrySDK{}, + } + if x.Resource.Enabled() { + defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) + } + defaultResource, err = Detect( + context.Background(), + defaultDetectors..., ) if err != nil { otel.Handle(err) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 8a89fffdb4a..1d399a75db2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -381,7 +381,7 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R } } -func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { +func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool { if !sd.SpanContext().IsSampled() { return false } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go index 69eb2fdfce3..821c83faa1d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -3,23 +3,43 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" +import ( + "slices" + "sync" + + "go.opentelemetry.io/otel/internal/global" +) + // evictedQueue is a FIFO queue with a configurable capacity. -type evictedQueue struct { - queue []interface{} +type evictedQueue[T any] struct { + queue []T capacity int droppedCount int + logDropped func() } -func newEvictedQueue(capacity int) evictedQueue { +func newEvictedQueueEvent(capacity int) evictedQueue[Event] { // Do not pre-allocate queue, do this lazily. - return evictedQueue{capacity: capacity} + return evictedQueue[Event]{ + capacity: capacity, + logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }), + } +} + +func newEvictedQueueLink(capacity int) evictedQueue[Link] { + // Do not pre-allocate queue, do this lazily. + return evictedQueue[Link]{ + capacity: capacity, + logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }), + } } // add adds value to the evictedQueue eq. If eq is at capacity, the oldest // queued value will be discarded and the drop count incremented. -func (eq *evictedQueue) add(value interface{}) { +func (eq *evictedQueue[T]) add(value T) { if eq.capacity == 0 { eq.droppedCount++ + eq.logDropped() return } @@ -28,6 +48,12 @@ func (eq *evictedQueue) add(value interface{}) { copy(eq.queue[:eq.capacity-1], eq.queue[1:]) eq.queue = eq.queue[:eq.capacity-1] eq.droppedCount++ + eq.logDropped() } eq.queue = append(eq.queue, value) } + +// copy returns a copy of the evictedQueue. +func (eq *evictedQueue[T]) copy() []T { + return slices.Clone(eq.queue) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go index f9633d8c576..925bcf99305 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -41,7 +41,12 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace gen.Lock() defer gen.Unlock() sid := trace.SpanID{} - _, _ = gen.randSource.Read(sid[:]) + for { + _, _ = gen.randSource.Read(sid[:]) + if sid.IsValid() { + break + } + } return sid } @@ -51,9 +56,19 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace. gen.Lock() defer gen.Unlock() tid := trace.TraceID{} - _, _ = gen.randSource.Read(tid[:]) sid := trace.SpanID{} - _, _ = gen.randSource.Read(sid[:]) + for { + _, _ = gen.randSource.Read(tid[:]) + if tid.IsValid() { + break + } + } + for { + _, _ = gen.randSource.Read(sid[:]) + if sid.IsValid() { + break + } + } return tid, sid } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index dec237ca731..14c2e5bebda 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -291,7 +291,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { retErr = err } else { // Poor man's list of errors - retErr = fmt.Errorf("%v; %v", retErr, err) + retErr = fmt.Errorf("%w; %w", retErr, err) } } } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index f0221eaa85b..ac90f1a2600 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -17,10 +17,10 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/internal" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -137,12 +137,13 @@ type recordingSpan struct { // ReadOnlySpan exported when the span ends. attributes []attribute.KeyValue droppedAttributes int + logDropAttrsOnce sync.Once // events are stored in FIFO queue capped by configured limit. - events evictedQueue + events evictedQueue[Event] // links are stored in FIFO queue capped by configured limit. - links evictedQueue + links evictedQueue[Link] // executionTracerTaskEnd ends the execution tracer span. executionTracerTaskEnd func() @@ -219,7 +220,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { limit := s.tracer.provider.spanLimits.AttributeCountLimit if limit == 0 { // No attributes allowed. - s.droppedAttributes += len(attributes) + s.addDroppedAttr(len(attributes)) return } @@ -236,7 +237,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { for _, a := range attributes { if !a.Valid() { // Drop all invalid attributes. - s.droppedAttributes++ + s.addDroppedAttr(1) continue } a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) @@ -244,6 +245,22 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { } } +// Declared as a var so tests can override. +var logDropAttrs = func() { + global.Warn("limit reached: dropping trace Span attributes") +} + +// addDroppedAttr adds incr to the count of dropped attributes. +// +// The first, and only the first, time this method is called a warning will be +// logged. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) addDroppedAttr(incr int) { + s.droppedAttributes += incr + s.logDropAttrsOnce.Do(logDropAttrs) +} + // addOverCapAttrs adds the attributes attrs to the span s while // de-duplicating the attributes of s and attrs and dropping attributes that // exceed the limit. @@ -273,7 +290,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { for _, a := range attrs { if !a.Valid() { // Drop all invalid attributes. - s.droppedAttributes++ + s.addDroppedAttr(1) continue } @@ -286,7 +303,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { if len(s.attributes) >= limit { // Do not just drop all of the remaining attributes, make sure // updates are checked and performed. - s.droppedAttributes++ + s.addDroppedAttr(1) } else { a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) s.attributes = append(s.attributes, a) @@ -367,7 +384,7 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { // Store the end time as soon as possible to avoid artificially increasing // the span's duration in case some operation below takes a while. - et := internal.MonotonicEndTime(s.startTime) + et := monotonicEndTime(s.startTime) // Do relative expensive check now that we have an end time and see if we // need to do any more processing. @@ -418,6 +435,16 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } } +// monotonicEndTime returns the end time at present but offset from start, +// monotonically. +// +// The monotonic clock is used in subtractions hence the duration since start +// added back to start gives end as a monotonic time. See +// https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func monotonicEndTime(start time.Time) time.Time { + return start.Add(time.Since(start)) +} + // RecordError will record err as a span event for this span. An additional call to // SetStatus is required if the Status of the Span should be set to Error, this method // does not change the Span status. If this span is not being recorded or err is nil @@ -585,7 +612,7 @@ func (s *recordingSpan) Links() []Link { if len(s.links.queue) == 0 { return []Link{} } - return s.interfaceArrayToLinksArray() + return s.links.copy() } // Events returns the events of this span. @@ -595,7 +622,7 @@ func (s *recordingSpan) Events() []Event { if len(s.events.queue) == 0 { return []Event{} } - return s.interfaceArrayToEventArray() + return s.events.copy() } // Status returns the status of this span. @@ -717,32 +744,16 @@ func (s *recordingSpan) snapshot() ReadOnlySpan { } sd.droppedAttributeCount = s.droppedAttributes if len(s.events.queue) > 0 { - sd.events = s.interfaceArrayToEventArray() + sd.events = s.events.copy() sd.droppedEventCount = s.events.droppedCount } if len(s.links.queue) > 0 { - sd.links = s.interfaceArrayToLinksArray() + sd.links = s.links.copy() sd.droppedLinkCount = s.links.droppedCount } return &sd } -func (s *recordingSpan) interfaceArrayToLinksArray() []Link { - linkArr := make([]Link, 0) - for _, value := range s.links.queue { - linkArr = append(linkArr, value.(Link)) - } - return linkArr -} - -func (s *recordingSpan) interfaceArrayToEventArray() []Event { - eventArr := make([]Event, 0) - for _, value := range s.events.queue { - eventArr = append(eventArr, value.(Event)) - } - return eventArr -} - func (s *recordingSpan) addChild() { if !s.IsRecording() { return diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index 3668b1387d0..43419d3b541 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -132,8 +132,8 @@ func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr Sa spanKind: trace.ValidateSpanKind(config.SpanKind()), name: name, startTime: startTime, - events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit), - links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit), + events: newEvictedQueueEvent(tr.provider.spanLimits.EventCountLimit), + links: newEvictedQueueLink(tr.provider.spanLimits.LinkCountLimit), tracer: tr, } diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index f0d8fc51a24..33d065a7cb9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.27.0" + return "1.28.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md new file mode 100644 index 00000000000..0b6cbe960cb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.24.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go new file mode 100644 index 00000000000..6e688345cbb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go @@ -0,0 +1,4387 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// Describes FaaS attributes. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Describes Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// A file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// Describes Database attributes +const ( + // PoolNameKey is the attribute Key conforming to the "pool.name" semantic + // conventions. It represents the name of the connection pool; unique + // within the instrumented application. In case the connection pool + // implementation doesn't provide a name, then the + // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) + // should be used + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'myDataSource' + PoolNameKey = attribute.Key("pool.name") + + // StateKey is the attribute Key conforming to the "state" semantic + // conventions. It represents the state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Examples: 'idle' + StateKey = attribute.Key("state") +) + +var ( + // idle + StateIdle = StateKey.String("idle") + // used + StateUsed = StateKey.String("used") +) + +// PoolName returns an attribute KeyValue conforming to the "pool.name" +// semantic conventions. It represents the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, then the +// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) +// should be used +func PoolName(val string) attribute.KeyValue { + return PoolNameKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: experimental + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if the matched endpoint for the + // request had a rate-limiting policy.) + // Stability: experimental + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (if and only if the request was + // not handled.) + // Stability: experimental + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If and only if a route was + // successfully matched.) + // Stability: experimental + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// Describes JVM buffer metric attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// Describes JVM memory metric attributes. +const ( + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// Describes System metric attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU metric attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory metric attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging metric attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem metric attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network metric attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process metric attributes +const ( + // SystemProcessesStatusKey is the attribute Key conforming to the + // "system.processes.status" semantic conventions. It represents the + // process state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessesStatusKey = attribute.Key("system.processes.status") +) + +var ( + // running + SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") + // sleeping + SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") + // stopped + SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") + // defunct + SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") +) + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// The attributes used to describe telemetry in the context of databases. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary Cassandra table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") + + // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" + // semantic conventions. It represents an identifier (address, unique name, + // or any other identifier) of the database instance that is executing + // queries or mutations on the current connection. This is useful in cases + // where the database is running in a clustered environment and the + // instrumentation is able to record the node executing the query. The + // client may obtain this value in databases like MySQL using queries like + // `select @@hostname`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mysql-e26b99z.example.com' + DBInstanceIDKey = attribute.Key("db.instance.id") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the MongoDB + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") + + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") + + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") + + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBSystemKey = attribute.Key("db.system") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary Cassandra table that the operation is acting upon, including the +// keyspace name (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// DBInstanceID returns an attribute KeyValue conforming to the +// "db.instance.id" semantic conventions. It represents an identifier (address, +// unique name, or any other identifier) of the database instance that is +// executing queries or mutations on the current connection. This is useful in +// cases where the database is running in a clustered environment and the +// instrumentation is able to record the node executing the query. The client +// may obtain this value in databases like MySQL using queries like `select +// @@hostname`. +func DBInstanceID(val string) attribute.KeyValue { + return DBInstanceIDKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the MongoDB +// collection being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// Describes deprecated HTTP attributes. +const ( + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'GET', 'POST', 'HEAD' + // Deprecated: use `http.request.method` instead. + HTTPMethodKey = attribute.Key("http.method") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + // Deprecated: use `http.request.header.content-length` instead. + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + // Deprecated: use `http.response.header.content-length` instead. + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") + + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'http', 'https' + // Deprecated: use `url.scheme` instead. + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 200 + // Deprecated: use `http.response.status_code` instead. + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/search?q=OpenTelemetry#SemConv' + // Deprecated: use `url.path` and `url.query` instead. + HTTPTargetKey = attribute.Key("http.target") + + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Deprecated: use `url.full` instead. + HTTPURLKey = attribute.Key("http.url") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + // Deprecated: use `user_agent.original` instead. + HTTPUserAgentKey = attribute.Key("http.user_agent") +) + +var ( + // HTTP/1.0 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. +// +// Deprecated: use `http.request.method` instead. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. +// +// Deprecated: use `http.request.header.content-length` instead. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. +// +// Deprecated: use `http.response.header.content-length` instead. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. +// +// Deprecated: use `url.scheme` instead. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. +// +// Deprecated: use `http.response.status_code` instead. +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. +// +// Deprecated: use `url.path` and `url.query` instead. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. +// +// Deprecated: use `url.full` instead. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. +// +// Deprecated: use `user_agent.original` instead. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + // Deprecated: use `server.address`. + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `server.port`. + NetHostPortKey = attribute.Key("net.host.port") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + // Deprecated: use `server.address` on client spans and `client.address` on + // server spans. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `server.port` on client spans and `client.port` on + // server spans. + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'amqp', 'http', 'mqtt' + // Deprecated: use `network.protocol.name`. + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '3.1.1' + // Deprecated: use `network.protocol.version`. + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + // Deprecated: use `network.local.address`. + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `network.local.port`. + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '192.168.0.1' + // Deprecated: use `network.peer.address`. + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + // Deprecated: no replacement at this time. + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 65531 + // Deprecated: use `network.peer.port`. + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.transport`. + NetTransportKey = attribute.Key("net.transport") +) + +var ( + // IPv4 address + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // ip_tcp + // + // Deprecated: use `network.transport`. + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + // + // Deprecated: use `network.transport`. + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe + // + // Deprecated: use `network.transport`. + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + // + // Deprecated: use `network.transport`. + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + // + // Deprecated: use `network.transport`. + NetTransportOther = NetTransportKey.String("other") +) + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. +// +// Deprecated: use `server.address`. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. +// +// Deprecated: use `server.port`. +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. +// +// Deprecated: use `server.address` on client spans and `client.address` on +// server spans. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. +// +// Deprecated: use `server.port` on client spans and `client.port` on server +// spans. +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. +// +// Deprecated: use `network.protocol.name`. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. +// +// Deprecated: use `network.protocol.version`. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. +// +// Deprecated: use `network.local.address`. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. +// +// Deprecated: use `network.local.port`. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. +// +// Deprecated: use `network.peer.address`. +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. +// +// Deprecated: no replacement at this time. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. +// +// Deprecated: use `network.peer.port`. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable and SHOULD have low + // cardinality. + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client_id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client_id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents an identifier for + // the messaging system being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationPublish = MessagingOperationKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationCreate = MessagingOperationKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationReceive = MessagingOperationKey.String("receive") + // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs + MessagingOperationDeliver = MessagingOperationKey.String("deliver") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid") + // Azure Event Hubs + MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs") + // Azure Service Bus + MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client_id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // version of the protocol specified in `network.protocol.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `network.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`, + // this attribute should be set to `1.1`. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the version +// of the protocol specified in `network.protocol.name`. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed) and SHOULD NOT be validated or modified except for + // sanitizing purposes. + URLFullKey = attribute.Key("url.full") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + URLPathKey = attribute.Key("url.path") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in query string SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") +) + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go new file mode 100644 index 00000000000..d27e8a8f8b3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.24.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go new file mode 100644 index 00000000000..6c019aafc3e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go @@ -0,0 +1,200 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// This event represents an occurrence of a lifecycle transition on the iOS +// platform. +const ( + // IosStateKey is the attribute Key conforming to the "ios.state" semantic + // conventions. It represents the this attribute represents the state the + // application has transitioned into at the occurrence of the event. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate + // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), + // and from which the `OS terminology` column values are derived. + IosStateKey = attribute.Key("ios.state") +) + +var ( + // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` + IosStateActive = IosStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` + IosStateInactive = IosStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` + IosStateBackground = IosStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` + IosStateForeground = IosStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` + IosStateTerminate = IosStateKey.String("terminate") +) + +// This event represents an occurrence of a lifecycle transition on the Android +// platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the this attribute represents the + // state the application has transitioned into at the occurrence of the + // event. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessageTypeKey = attribute.Key("message.type") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go new file mode 100644 index 00000000000..7235bb51d9a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go new file mode 100644 index 00000000000..a6b953f625e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go @@ -0,0 +1,1071 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +const ( + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // time between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Experimental + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Experimental + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Experimental + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // HTTPClientRequestTimeInQueue is the metric conforming to the + // "http.client.request.time_in_queue" semantic conventions. It represents the + // amount of time requests spent on a queue waiting for an available + // connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue" + HTTPClientRequestTimeInQueueUnit = "s" + HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Experimental + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Experimental + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingDeliverDuration is the metric conforming to the + // "messaging.deliver.duration" semantic conventions. It represents the + // measures the duration of deliver operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingDeliverDurationName = "messaging.deliver.duration" + MessagingDeliverDurationUnit = "s" + MessagingDeliverDurationDescription = "Measures the duration of deliver operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingDeliverMessages is the metric conforming to the + // "messaging.deliver.messages" semantic conventions. It represents the + // measures the number of delivered messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingDeliverMessagesName = "messaging.deliver.messages" + MessagingDeliverMessagesUnit = "{message}" + MessagingDeliverMessagesDescription = "Measures the number of delivered messages." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessesCount is the metric conforming to the + // "system.processes.count" semantic conventions. It represents the total + // number of processes in each state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessesCountName = "system.processes.count" + SystemProcessesCountUnit = "{process}" + SystemProcessesCountDescription = "Total number of processes in each state" + + // SystemProcessesCreated is the metric conforming to the + // "system.processes.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessesCreatedName = "system.processes.created" + SystemProcessesCreatedUnit = "{process}" + SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go new file mode 100644 index 00000000000..d66bbe9c23d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go @@ -0,0 +1,2545 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assinged by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val int) attribute.KeyValue { + return HostCPUSteppingKey.Int(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Resource used by Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Resources used by Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// Heroku dyno metadata +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// A serverless instance. +const ( + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + // Deprecated: use the `otel.scope.name` attribute. + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + // Deprecated: use the `otel.scope.version` attribute. + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. +// +// Deprecated: use the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. +// +// Deprecated: use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go new file mode 100644 index 00000000000..fe80b1731d0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go new file mode 100644 index 00000000000..c1718234e52 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go @@ -0,0 +1,1323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") +) + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") +) + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md new file mode 100644 index 00000000000..2de1fc3c6be --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.26.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go new file mode 100644 index 00000000000..d8dc822b263 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go @@ -0,0 +1,8996 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +import "go.opentelemetry.io/otel/attribute" + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: stable + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming + // to the "aspnetcore.diagnostics.exception.result" semantic conventions. + // It represents the aSP.NET Core exception middleware handling result + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") + + // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the + // "aspnetcore.routing.match_status" semantic conventions. It represents + // the match result - success or failure + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'success', 'failure' + AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +var ( + // Exception was handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") + // Exception was not handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") + // Exception handling was skipped because the response had started + AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") + // Exception handling didn't run because the request was aborted + AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") +) + +var ( + // Match succeeded + AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") + // Match failed + AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// Generic attributes for AWS services. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes for AWS DynamoDB. +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID + // MUST be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is + // populated.) + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a + // running [ECS + // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family + // name of the [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSTaskID returns an attribute KeyValue conforming to the +// "aws.ecs.task.id" semantic conventions. It represents the ID of a running +// ECS task. The ID MUST be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS +// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) +// used to create the ECS task. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Attributes for AWS Logs. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Attributes for AWS Lambda. +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for AWS S3. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// The web browser attributes +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Attributes for CloudEvents. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCPUStateKey is the attribute Key conforming to the + // "container.cpu.state" semantic conventions. It represents the CPU state + // for this data point. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + ContainerCPUStateKey = attribute.Key("container.cpu.state") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +var ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + ContainerCPUStateUser = ContainerCPUStateKey.String("user") + // When CPU is used by the system (host OS) + ContainerCPUStateSystem = ContainerCPUStateKey.String("system") + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // DBClientConnectionsPoolNameKey is the attribute Key conforming to the + // "db.client.connections.pool.name" semantic conventions. It represents + // the name of the connection pool; unique within the instrumented + // application. In case the connection pool implementation doesn't provide + // a name, instrumentation should use a combination of `server.address` and + // `server.port` attributes formatted as `server.address:server.port`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myDataSource' + DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") + + // DBClientConnectionsStateKey is the attribute Key conforming to the + // "db.client.connections.state" semantic conventions. It represents the + // state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle' + DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match + // the value provided in the query and may be qualified with the schema and + // database name. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" + // semantic conventions. It represents the name of the database, fully + // qualified within the server address and port. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they + // SHOULD be concatenated (potentially using database system specific + // conventions) from most general to most specific namespace component, and + // more specific namespaces SHOULD NOT be captured without the more general + // namespaces, to ensure that "startswith" queries for the more general + // namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document + // what `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationNameKey is the attribute Key conforming to the + // "db.operation.name" semantic conventions. It represents the name of the + // operation or command being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the + // application without attempting to do any case normalization. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey + // "WuValue"' + DBQueryTextKey = attribute.Key("db.query.text") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents the database management system (DBMS) product + // as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. + // For example, when using PostgreSQL client libraries to connect to a + // CockroachDB, the `db.system` is set to `postgresql` based on the + // instrumentation's best knowledge. + DBSystemKey = attribute.Key("db.system") +) + +var ( + // idle + DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") + // used + DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBClientConnectionsPoolName returns an attribute KeyValue conforming to +// the "db.client.connections.pool.name" semantic conventions. It represents +// the name of the connection pool; unique within the instrumented application. +// In case the connection pool implementation doesn't provide a name, +// instrumentation should use a combination of `server.address` and +// `server.port` attributes formatted as `server.address:server.port`. +func DBClientConnectionsPoolName(val string) attribute.KeyValue { + return DBClientConnectionsPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the +// "db.namespace" semantic conventions. It represents the name of the database, +// fully qualified within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the +// "db.query.text" semantic conventions. It represents the database query being +// executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// This group defines attributes for Cassandra. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// This group defines attributes for Azure Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// This group defines attributes for Elasticsearch. +const ( + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") +) + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// Attributes for software deployments. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the deprecated use the + // `device.app.lifecycle` event definition including `android.state` as a + // payload field instead. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report a DNS query. +const ( + // DNSQuestionNameKey is the attribute Key conforming to the + // "dns.question.name" semantic conventions. It represents the name being + // queried. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or + // above 126), those characters should be represented as escaped base 10 + // integers (\DDD). Back slashes and quotes should be escaped. Tabs, + // carriage returns, and line feeds should be converted to \t, \r, and \n + // respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// FaaS attributes +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Attributes for Feature Flags. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// Describes file attributes. +const ( + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is + // located. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the + // leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only + // the last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.png' + FileNameKey = attribute.Key("file.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program + // Files\\MyApp\\myapp.exe' + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + FileSizeKey = attribute.Key("file.size") +) + +// FileDirectory returns an attribute KeyValue conforming to the +// "file.directory" semantic conventions. It represents the directory where the +// file is located. It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the +// "file.extension" semantic conventions. It represents the file extension, +// excluding the leading dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" +// semantic conventions. It represents the name of the file including the +// extension, without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" +// semantic conventions. It represents the full path to the file, including the +// file name. It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" +// semantic conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// Attributes for Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Attributes for Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. +const ( + // GenAiCompletionKey is the attribute Key conforming to the + // "gen_ai.completion" semantic conventions. It represents the full + // response received from the LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching + // [OpenAI messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiCompletionKey = attribute.Key("gen_ai.completion") + + // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" + // semantic conventions. It represents the full prompt sent to an LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of + // France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI + // messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiPromptKey = attribute.Key("gen_ai.prompt") + + // GenAiRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the + // maximum number of tokens the LLM generates for a request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAiRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of + // the LLM a request is being made to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4' + GenAiRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAiRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0.0 + GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAiRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p + // sampling setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0 + GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAiResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to + // each generation received. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'stop' + GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAiResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + GenAiResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAiResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of + // the LLM a response was generated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + GenAiResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as + // identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate + // with Mistral, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge. + GenAiSystemKey = attribute.Key("gen_ai.system") + + // GenAiUsageCompletionTokensKey is the attribute Key conforming to the + // "gen_ai.usage.completion_tokens" semantic conventions. It represents the + // number of tokens used in the LLM response (completion). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 180 + GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") + + // GenAiUsagePromptTokensKey is the attribute Key conforming to the + // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the + // number of tokens used in the LLM prompt. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") +) + +var ( + // OpenAI + GenAiSystemOpenai = GenAiSystemKey.String("openai") +) + +// GenAiCompletion returns an attribute KeyValue conforming to the +// "gen_ai.completion" semantic conventions. It represents the full response +// received from the LLM. +func GenAiCompletion(val string) attribute.KeyValue { + return GenAiCompletionKey.String(val) +} + +// GenAiPrompt returns an attribute KeyValue conforming to the +// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to +// an LLM. +func GenAiPrompt(val string) attribute.KeyValue { + return GenAiPromptKey.String(val) +} + +// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the LLM generates for a request. +func GenAiRequestMaxTokens(val int) attribute.KeyValue { + return GenAiRequestMaxTokensKey.Int(val) +} + +// GenAiRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// LLM a request is being made to. +func GenAiRequestModel(val string) attribute.KeyValue { + return GenAiRequestModelKey.String(val) +} + +// GenAiRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the LLM request. +func GenAiRequestTemperature(val float64) attribute.KeyValue { + return GenAiRequestTemperatureKey.Float64(val) +} + +// GenAiRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p +// sampling setting for the LLM request. +func GenAiRequestTopP(val float64) attribute.KeyValue { + return GenAiRequestTopPKey.Float64(val) +} + +// GenAiResponseFinishReasons returns an attribute KeyValue conforming to +// the "gen_ai.response.finish_reasons" semantic conventions. It represents the +// array of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAiResponseFinishReasonsKey.StringSlice(val) +} + +// GenAiResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique +// identifier for the completion. +func GenAiResponseID(val string) attribute.KeyValue { + return GenAiResponseIDKey.String(val) +} + +// GenAiResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// LLM a response was generated from. +func GenAiResponseModel(val string) attribute.KeyValue { + return GenAiResponseModelKey.String(val) +} + +// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the +// number of tokens used in the LLM response (completion). +func GenAiUsageCompletionTokens(val int) attribute.KeyValue { + return GenAiUsageCompletionTokensKey.Int(val) +} + +// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number +// of tokens used in the LLM prompt. +func GenAiUsagePromptTokens(val int) attribute.KeyValue { + return GenAiUsagePromptTokensKey.Int(val) +} + +// Attributes for GraphQL. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of + // the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'active', 'idle' + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the + // "http.request.size" semantic conventions. It represents the total size + // of the request in bytes. This should be the total number of bytes sent + // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and request body if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size + // of the response in bytes. This should be the total number of bytes sent + // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and + // HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // active state + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of +// the request in bytes. This should be the total number of bytes sent over the +// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of +// the response in bytes. This should be the total number of bytes sent over +// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Java Virtual machine related attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") + + // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" + // semantic conventions. It represents the name of the garbage collector + // action. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). + JvmGcActionKey = attribute.Key("jvm.gc.action") + + // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" + // semantic conventions. It represents the name of the garbage collector. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). + JvmGcNameKey = attribute.Key("jvm.gc.name") + + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") + + // JvmThreadDaemonKey is the attribute Key conforming to the + // "jvm.thread.daemon" semantic conventions. It represents the whether the + // thread is daemon or not. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") + + // JvmThreadStateKey is the attribute Key conforming to the + // "jvm.thread.state" semantic conventions. It represents the state of the + // thread. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + JvmThreadStateKey = attribute.Key("jvm.thread.state") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +var ( + // A thread that has not yet started is in this state + JvmThreadStateNew = JvmThreadStateKey.String("new") + // A thread executing in the Java virtual machine is in this state + JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") + // A thread that is blocked waiting for a monitor lock is in this state + JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") + // A thread that has exited is in this state + JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// JvmGcAction returns an attribute KeyValue conforming to the +// "jvm.gc.action" semantic conventions. It represents the name of the garbage +// collector action. +func JvmGcAction(val string) attribute.KeyValue { + return JvmGcActionKey.String(val) +} + +// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" +// semantic conventions. It represents the name of the garbage collector. +func JvmGcName(val string) attribute.KeyValue { + return JvmGcNameKey.String(val) +} + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// JvmThreadDaemon returns an attribute KeyValue conforming to the +// "jvm.thread.daemon" semantic conventions. It represents the whether the +// thread is daemon or not. +func JvmThreadDaemon(val bool) attribute.KeyValue { + return JvmThreadDaemonKey.Bool(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key + // conforming to the "k8s.container.status.last_terminated_reason" semantic + // conventions. It represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Attributes for a file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// The generic attributes that may be used in any Log Record. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to + // the "messaging.destination.partition.id" semantic conventions. It + // represents the identifier of the partition messages are sent to or + // received from, unique within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1' + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents the messaging + // system as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate + // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on + // the instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are delivered to or processed by a consumer + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") + // One or more messages are settled + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + MessagingSystemServicebus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming +// to the "messaging.destination.partition.id" semantic conventions. It +// represents the identifier of the partition messages are sent to or received +// from, unique within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// This group describes attributes specific to Apache Kafka. +const ( + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// This group describes attributes specific to RabbitMQ. +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming + // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. + // It represents the rabbitMQ message delivery tag + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 123 + MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic +// conventions. It represents the rabbitMQ message delivery tag +func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitmqMessageDeliveryTagKey.Int(val) +} + +// This group describes attributes specific to RocketMQ. +const ( + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. + // It represents the ack deadline in seconds set for the modify ack + // deadline request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It + // represents the ack id for a given message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack_id' + MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key + // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" + // semantic conventions. It represents the delivery attempt for a given + // message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") +) + +// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic +// conventions. It represents the ack deadline in seconds set for the modify +// ack deadline request. +func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It +// represents the ack id for a given message. +func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageAckIDKey.String(val) +} + +// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// This group describes attributes specific to Azure Service Bus. +const ( + // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key + // conforming to the "messaging.servicebus.destination.subscription_name" + // semantic conventions. It represents the name of the subscription in the + // topic messages are received from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mySubscription' + MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") + + // MessagingServicebusDispositionStatusKey is the attribute Key conforming + // to the "messaging.servicebus.disposition_status" semantic conventions. + // It represents the describes the [settlement + // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServicebusMessageDeliveryCountKey is the attribute Key + // conforming to the "messaging.servicebus.message.delivery_count" semantic + // conventions. It represents the number of deliveries that have been + // attempted for this message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key + // conforming to the "messaging.servicebus.message.enqueued_time" semantic + // conventions. It represents the UTC epoch seconds at which the message + // has been accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") +) + +var ( + // Message is completed + MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") + // Message is abandoned + MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") + // Message is deferred + MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") +) + +// MessagingServicebusDestinationSubscriptionName returns an attribute +// KeyValue conforming to the +// "messaging.servicebus.destination.subscription_name" semantic conventions. +// It represents the name of the subscription in the topic messages are +// received from. +func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingServicebusDestinationSubscriptionNameKey.String(val) +} + +// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServicebusMessageDeliveryCountKey.Int(val) +} + +// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServicebusMessageEnqueuedTimeKey.Int(val) +} + +// This group describes attributes specific to Azure Event Hubs. +const ( + // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to + // the "messaging.eventhubs.consumer.group" semantic conventions. It + // represents the name of the consumer group the event consumer is + // associated with. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'indexer' + MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") + + // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming + // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. + // It represents the UTC epoch seconds at which the message has been + // accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") +) + +// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming +// to the "messaging.eventhubs.consumer.group" semantic conventions. It +// represents the name of the consumer group the event consumer is associated +// with. +func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { + return MessagingEventhubsConsumerGroupKey.String(val) +} + +// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.eventhubs.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // actual version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using + // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute + // SHOULD be set to the negotiated version. If the actual protocol version + // is not known, this attribute SHOULD NOT be set. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// Attributes used by the OpenTracing Shim layer. +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Attributes reserved for OpenTelemetry +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were + // voluntary or involuntary. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and + // time the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the + // "process.exit.code" semantic conventions. It represents the exit code of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the + // "process.exit.time" semantic conventions. It represents the date and + // time the process exited, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID + // of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether + // the process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type + // of page fault for this data point. Type `major` is for major/hard page + // faults, and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user + // ID (RUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the + // username of the real user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved + // user ID (SUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the + // username of the saved user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID + // of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessUserIDKey is the attribute Key conforming to the + // "process.user.id" semantic conventions. It represents the effective user + // ID (EUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the + // "process.user.name" semantic conventions. It represents the username of + // the effective user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" + // semantic conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily + // unique across all processes on the host but it is unique within the + // process namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") +) + +var ( + // voluntary + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +var ( + // major + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and +// time the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time +// the process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of +// the process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user +// ID (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username +// of the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the +// "process.vpid" semantic conventions. It represents the virtual process +// identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// Attributes for process CPU +const ( + // ProcessCPUStateKey is the attribute Key conforming to the + // "process.cpu.state" semantic conventions. It represents the CPU state of + // the process. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessCPUStateKey = attribute.Key("process.cpu.state") +) + +var ( + // system + ProcessCPUStateSystem = ProcessCPUStateKey.String("system") + // user + ProcessCPUStateUser = ProcessCPUStateKey.String("user") + // wait + ProcessCPUStateWait = ProcessCPUStateKey.String("wait") +) + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the + // "rpc.message.type" semantic conventions. It represents the whether this + // is a received or sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // sent + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the +// "rpc.message.id" semantic conventions. It represents the mUST be calculated +// as two different counters starting from `1` one for sent messages and one +// for received message. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to +// the "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to + // distinguish instances of the same service that exist at the same time + // (e.g. instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 [RFC + // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an + // inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be + // used as source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the + // purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) + // file, the underlying + // data, such as pod name and namespace should be treated as confidential, + // being the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we + // do not recommend using one identifier + // for all processes participating in the application. Instead, it's + // recommended each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it + // can't unambiguously determine the + // service instance that is generating that telemetry. For instance, + // creating an UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container + // within that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, + // as they know the target address and + // port. + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If + // `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Describes System attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process attributes +const ( + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +var ( + // running + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Attributes for telemetry SDK. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" + // semantic conventions. It represents the domain extracted from the + // `url.full`, such as "opentelemetry.io". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, + // without a domain name. In this case, the IP address would go to the + // domain field. If the URL contains a [literal IPv6 + // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by + // `[` and `]`, the `[` and `]` characters should also be captured in the + // domain field. + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from + // the `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has + // a file extension. When the file name has multiple extensions + // `example.tar.gz`, only the last one should be captured `gz`, not + // `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed). Sensitive content provided in `url.full` SHOULD be + // scrubbed when instrumentations can identify it. + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" + // semantic conventions. It represents the unmodified original URL as seen + // in the event source. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas + // in access logs, the URL is often just represented as a path. This field + // is meant to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the + // same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full` + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). For example, the registered domain for + // `foo.example.com` is `example.com`. Trying to approximate this by simply + // taking the last two labels will not work well for TLDs such as `co.uk`. + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name + // under the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain + // contains all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If + // the domain has multiple levels of subdomain, such as + // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, + // with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" + // semantic conventions. It represents the low-cardinality template of an + // [absolute path + // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective + // top level domain (eTLD), also known as the domain suffix, is the last + // part of the domain name. For example, the top level domain for + // example.com is `com`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the +// `url.full`, such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the +// "url.extension" semantic conventions. It represents the file extension +// extracted from the `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the +// "url.original" semantic conventions. It represents the unmodified original +// URL as seen in the event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" +// semantic conventions. It represents the port extracted from the `url.full` +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the +// "url.subdomain" semantic conventions. It represents the subdomain portion of +// a fully qualified domain name includes all of the names except the host name +// under the registered_domain. In a partially qualified domain, or if the +// qualification level of the full name cannot be determined, subdomain +// contains all of the names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the +// "url.template" semantic conventions. It represents the low-cardinality +// template of an [absolute path +// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of +// the domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentNameKey is the attribute Key conforming to the + // "user_agent.name" semantic conventions. It represents the name of the + // user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's name + // from original string. In the case of using a user-agent for non-browser + // products, such as microservices with multiple names/versions inside the + // `user_agent.original`, the most significant name SHOULD be selected. In + // such a scenario it should align with `user_agent.version` + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 + // grpc-java-okhttp/1.27.2' + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of + // the user-agent extracted from original. Usually refers to the browser's + // version + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's + // version from original string. In the case of using a user-agent for + // non-browser products, such as microservices with multiple names/versions + // inside the `user_agent.original`, the most significant version SHOULD be + // selected. In such a scenario it should align with `user_agent.name` + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// The attributes used to describe the packaged software running the +// application code. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go new file mode 100644 index 00000000000..d031bbea784 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.26.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go new file mode 100644 index 00000000000..bfaee0d56e3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go new file mode 100644 index 00000000000..fcdb9f48596 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go @@ -0,0 +1,1307 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + + // ContainerCPUTime is the metric conforming to the "container.cpu.time" + // semantic conventions. It represents the total CPU time consumed. + // Instrument: counter + // Unit: s + // Stability: Experimental + ContainerCPUTimeName = "container.cpu.time" + ContainerCPUTimeUnit = "s" + ContainerCPUTimeDescription = "Total CPU time consumed" + + // ContainerMemoryUsage is the metric conforming to the + // "container.memory.usage" semantic conventions. It represents the memory + // usage of the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerMemoryUsageName = "container.memory.usage" + ContainerMemoryUsageUnit = "By" + ContainerMemoryUsageDescription = "Memory usage of the container." + + // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic + // conventions. It represents the disk bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerDiskIoName = "container.disk.io" + ContainerDiskIoUnit = "By" + ContainerDiskIoDescription = "Disk bytes for the container." + + // ContainerNetworkIo is the metric conforming to the "container.network.io" + // semantic conventions. It represents the network bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerNetworkIoName = "container.network.io" + ContainerNetworkIoUnit = "By" + ContainerNetworkIoDescription = "Network bytes for the container." + + // DBClientOperationDuration is the metric conforming to the + // "db.client.operation.duration" semantic conventions. It represents the + // duration of database client operations. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientOperationDurationName = "db.client.operation.duration" + DBClientOperationDurationUnit = "s" + DBClientOperationDurationDescription = "Duration of database client operations." + + // DBClientConnectionCount is the metric conforming to the + // "db.client.connection.count" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionCountName = "db.client.connection.count" + DBClientConnectionCountUnit = "{connection}" + DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionIdleMax is the metric conforming to the + // "db.client.connection.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMaxName = "db.client.connection.idle.max" + DBClientConnectionIdleMaxUnit = "{connection}" + DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionIdleMin is the metric conforming to the + // "db.client.connection.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMinName = "db.client.connection.idle.min" + DBClientConnectionIdleMinUnit = "{connection}" + DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionMax is the metric conforming to the + // "db.client.connection.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionMaxName = "db.client.connection.max" + DBClientConnectionMaxUnit = "{connection}" + DBClientConnectionMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionPendingRequests is the metric conforming to the + // "db.client.connection.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" + DBClientConnectionPendingRequestsUnit = "{request}" + DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionTimeouts is the metric conforming to the + // "db.client.connection.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionTimeoutsName = "db.client.connection.timeouts" + DBClientConnectionTimeoutsUnit = "{timeout}" + DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionCreateTime is the metric conforming to the + // "db.client.connection.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionCreateTimeName = "db.client.connection.create_time" + DBClientConnectionCreateTimeUnit = "s" + DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionWaitTime is the metric conforming to the + // "db.client.connection.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionWaitTimeName = "db.client.connection.wait_time" + DBClientConnectionWaitTimeUnit = "s" + DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionUseTime is the metric conforming to the + // "db.client.connection.use_time" semantic conventions. It represents the time + // between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionUseTimeName = "db.client.connection.use_time" + DBClientConnectionUseTimeUnit = "s" + DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the + // deprecated, use `db.client.connection.count` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.min` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the deprecated, use `db.client.connection.pending_requests` instead. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // deprecated, use `db.client.connection.timeouts` instead. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.create_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.wait_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.use_time` instead. Note: the unit also + // changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Stable + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Stable + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Stable + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Stable + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingProcessDuration is the metric conforming to the + // "messaging.process.duration" semantic conventions. It represents the + // measures the duration of process operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingProcessDurationName = "messaging.process.duration" + MessagingProcessDurationUnit = "s" + MessagingProcessDurationDescription = "Measures the duration of process operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingProcessMessages is the metric conforming to the + // "messaging.process.messages" semantic conventions. It represents the + // measures the number of processed messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingProcessMessagesName = "messaging.process.messages" + MessagingProcessMessagesUnit = "{message}" + MessagingProcessMessagesDescription = "Measures the number of processed messages." + + // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic + // conventions. It represents the total CPU seconds broken down by different + // states. + // Instrument: counter + // Unit: s + // Stability: Experimental + ProcessCPUTimeName = "process.cpu.time" + ProcessCPUTimeUnit = "s" + ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." + + // ProcessCPUUtilization is the metric conforming to the + // "process.cpu.utilization" semantic conventions. It represents the difference + // in process.cpu.time since the last measurement, divided by the elapsed time + // and number of CPUs available to the process. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + ProcessCPUUtilizationName = "process.cpu.utilization" + ProcessCPUUtilizationUnit = "1" + ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." + + // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" + // semantic conventions. It represents the amount of physical memory in use. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryUsageName = "process.memory.usage" + ProcessMemoryUsageUnit = "By" + ProcessMemoryUsageDescription = "The amount of physical memory in use." + + // ProcessMemoryVirtual is the metric conforming to the + // "process.memory.virtual" semantic conventions. It represents the amount of + // committed virtual memory. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryVirtualName = "process.memory.virtual" + ProcessMemoryVirtualUnit = "By" + ProcessMemoryVirtualDescription = "The amount of committed virtual memory." + + // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic + // conventions. It represents the disk bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessDiskIoName = "process.disk.io" + ProcessDiskIoUnit = "By" + ProcessDiskIoDescription = "Disk bytes transferred." + + // ProcessNetworkIo is the metric conforming to the "process.network.io" + // semantic conventions. It represents the network bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessNetworkIoName = "process.network.io" + ProcessNetworkIoUnit = "By" + ProcessNetworkIoDescription = "Network bytes transferred." + + // ProcessThreadCount is the metric conforming to the "process.thread.count" + // semantic conventions. It represents the process threads count. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Experimental + ProcessThreadCountName = "process.thread.count" + ProcessThreadCountUnit = "{thread}" + ProcessThreadCountDescription = "Process threads count." + + // ProcessOpenFileDescriptorCount is the metric conforming to the + // "process.open_file_descriptor.count" semantic conventions. It represents the + // number of file descriptors in use by the process. + // Instrument: updowncounter + // Unit: {count} + // Stability: Experimental + ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" + ProcessOpenFileDescriptorCountUnit = "{count}" + ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." + + // ProcessContextSwitches is the metric conforming to the + // "process.context_switches" semantic conventions. It represents the number of + // times the process has been context switched. + // Instrument: counter + // Unit: {count} + // Stability: Experimental + ProcessContextSwitchesName = "process.context_switches" + ProcessContextSwitchesUnit = "{count}" + ProcessContextSwitchesDescription = "Number of times the process has been context switched." + + // ProcessPagingFaults is the metric conforming to the "process.paging.faults" + // semantic conventions. It represents the number of page faults the process + // has made. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + ProcessPagingFaultsName = "process.paging.faults" + ProcessPagingFaultsUnit = "{fault}" + ProcessPagingFaultsDescription = "Number of page faults the process has made." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryShared is the metric conforming to the "system.memory.shared" + // semantic conventions. It represents the shared memory used (mostly by + // tmpfs). + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemorySharedName = "system.memory.shared" + SystemMemorySharedUnit = "By" + SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessCount is the metric conforming to the "system.process.count" + // semantic conventions. It represents the total number of processes in each + // state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessCountName = "system.process.count" + SystemProcessCountUnit = "{process}" + SystemProcessCountDescription = "Total number of processes in each state" + + // SystemProcessCreated is the metric conforming to the + // "system.process.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessCreatedName = "system.process.created" + SystemProcessCreatedUnit = "{process}" + SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go new file mode 100644 index 00000000000..4c87c7adcc7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go index 1dfa52c5216..64a4f1b362f 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -67,11 +67,13 @@ func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) span = Span{sc: sc} } else { // No parent, return a No-Op span with an empty span context. - span = Span{} + span = noopSpanInstance } return trace.ContextWithSpan(ctx, span), span } +var noopSpanInstance trace.Span = Span{} + // Span is an OpenTelemetry No-Op Span. type Span struct { embedded.Span diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 102f2f508bf..ab28960524b 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.27.0" + return "1.28.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 60985f4362f..241cfc82a8d 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.27.0 + version: v1.28.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -29,12 +29,12 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.49.0 + version: v0.50.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.3.0 + version: v0.4.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go index 5577c0f939a..dc9311870a5 100644 --- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -4,7 +4,7 @@ // Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing // algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf -package bcrypt // import "golang.org/x/crypto/bcrypt" +package bcrypt // The code is a port of Provos and Mazières's C implementation. import ( diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go index 213bf204afe..0898956807c 100644 --- a/vendor/golang.org/x/crypto/blowfish/cipher.go +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -11,7 +11,7 @@ // Deprecated: any new system should use AES (from crypto/aes, if necessary in // an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from // golang.org/x/crypto/chacha20poly1305). -package blowfish // import "golang.org/x/crypto/blowfish" +package blowfish // The code is a port of Bruce Schneier's C implementation. // See https://www.schneier.com/blowfish.html. diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go index 93da7322bc4..8cf5d8112e4 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go @@ -5,7 +5,7 @@ // Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its // extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and // draft-irtf-cfrg-xchacha-01. -package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" +package chacha20poly1305 import ( "crypto/cipher" diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go index cda8e3edfd5..90ef6a241de 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -4,7 +4,7 @@ // Package asn1 contains supporting types for parsing and building ASN.1 // messages with the cryptobyte package. -package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" +package asn1 // Tag represents an ASN.1 identifier octet, consisting of a tag number // (indicating a type) and class (such as context-specific or constructed). diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go index 10692a8a315..4b0f8097f9e 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/string.go +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -15,7 +15,7 @@ // // See the documentation and examples for the Builder and String types to get // started. -package cryptobyte // import "golang.org/x/crypto/cryptobyte" +package cryptobyte // String represents a string of bytes. It provides methods for parsing // fixed-length and length-prefixed values from it. diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go index f4ded5fee2f..3bee66294ec 100644 --- a/vendor/golang.org/x/crypto/hkdf/hkdf.go +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -8,7 +8,7 @@ // HKDF is a cryptographic key derivation function (KDF) with the goal of // expanding limited input keying material into one or more cryptographically // strong secret keys. -package hkdf // import "golang.org/x/crypto/hkdf" +package hkdf import ( "crypto/hmac" diff --git a/vendor/golang.org/x/crypto/md4/md4.go b/vendor/golang.org/x/crypto/md4/md4.go index d1911c2e86e..7d9281e0259 100644 --- a/vendor/golang.org/x/crypto/md4/md4.go +++ b/vendor/golang.org/x/crypto/md4/md4.go @@ -7,7 +7,7 @@ // Deprecated: MD4 is cryptographically broken and should only be used // where compatibility with legacy systems, not security, is the goal. Instead, // use a secure hash like SHA-256 (from crypto/sha256). -package md4 // import "golang.org/x/crypto/md4" +package md4 import ( "crypto" diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go index 904b57e01d7..28cd99c7f3f 100644 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -16,7 +16,7 @@ Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To choose, you can pass the `New` functions from the different SHA packages to pbkdf2.Key. */ -package pbkdf2 // import "golang.org/x/crypto/pbkdf2" +package pbkdf2 import ( "crypto/hmac" diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 98a49c6b6ee..61f511f97aa 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -827,10 +827,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) cc.peerMaxHeaderTableSize = initialHeaderTableSize - if t.AllowHTTP { - cc.nextStreamID = 3 - } - if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index ba931c2c3de..7b82e7a0837 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -252,7 +252,10 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return computeTokenSource(account, 0, scope...) + // refresh 3 minutes and 45 seconds early. The shortest MDS cache is currently 4 minutes, so any + // refreshes earlier are a waste of compute. + earlyExpirySecs := 225 * time.Second + return computeTokenSource(account, earlyExpirySecs, scope...) } func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fd45fe529da..3a5e776f895 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { return mapper.Mremap(oldData, newLength, flags) } + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 59542a897d2..4cc7b005967 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -542,6 +542,18 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { } } +//sys pthread_chdir_np(path string) (err error) + +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8c7d..4e92e5aa406 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f240a4..07642c308d3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8b8bb284028..923e08cb792 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997b52..7d73dda6473 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 08362c1ab74..057700111e7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 6f7d2ac70a9..97651b5bd04 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -894,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1087,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1158,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { diff --git a/vendor/golang.org/x/sys/windows/svc/service.go b/vendor/golang.org/x/sys/windows/svc/service.go index c96932d962a..c4f74924dd1 100644 --- a/vendor/golang.org/x/sys/windows/svc/service.go +++ b/vendor/golang.org/x/sys/windows/svc/service.go @@ -199,9 +199,8 @@ var ( ) func ctlHandler(ctl, evtype, evdata, context uintptr) uintptr { - s := (*service)(unsafe.Pointer(context)) e := ctlEvent{cmd: Cmd(ctl), eventType: uint32(evtype), eventData: evdata, context: 123456} // Set context to 123456 to test issue #25660. - s.c <- e + theService.c <- e return 0 } @@ -210,7 +209,7 @@ var theService service // This is, unfortunately, a global, which means only one // serviceMain is the entry point called by the service manager, registered earlier by // the call to StartServiceCtrlDispatcher. func serviceMain(argc uint32, argv **uint16) uintptr { - handle, err := windows.RegisterServiceCtrlHandlerEx(windows.StringToUTF16Ptr(theService.name), ctlHandlerCallback, uintptr(unsafe.Pointer(&theService))) + handle, err := windows.RegisterServiceCtrlHandlerEx(windows.StringToUTF16Ptr(theService.name), ctlHandlerCallback, 0) if sysErr, ok := err.(windows.Errno); ok { return uintptr(sysErr) } else if err != nil { diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9f73df75b5f..eba761018aa 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -91,6 +91,7 @@ var ( procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -1224,6 +1225,14 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) { + r0, _, _ := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r0 == 0 { + ret = GetLastError() + } + return +} + func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json index 0c82b86a100..633d334df40 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json @@ -12,7 +12,7 @@ "baseUrl": "https://iamcredentials.googleapis.com/", "batchPath": "batch", "canonicalName": "IAM Credentials", - "description": "Creates short-lived credentials for impersonating IAM service accounts. To enable this API, you must enable the IAM API (iam.googleapis.com). ", + "description": "Creates short-lived credentials for impersonating IAM service accounts. Disabling this API also disables the IAM API (iam.googleapis.com). However, enabling this API doesn't enable the IAM API. ", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials", "fullyEncodeReservedExpansion": true, @@ -226,7 +226,7 @@ } } }, - "revision": "20211203", + "revision": "20240227", "rootUrl": "https://iamcredentials.googleapis.com/", "schemas": { "GenerateAccessTokenRequest": { diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 6b709da707e..af4a038d3e2 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -195,7 +195,7 @@ func (ds *DialSettings) IsUniverseDomainGDU() bool { // GetUniverseDomain returns the default service domain for a given Cloud // universe, from google.Credentials, for comparison with the value returned by // (*DialSettings).GetUniverseDomain. This wrapper function should be removed -// to close [TODO(chrisdsmith): issue link here]. See details below. +// to close https://github.com/googleapis/google-api-go-client/issues/2399. func GetUniverseDomain(creds *google.Credentials) (string, error) { timer := time.NewTimer(time.Second) defer timer.Stop() @@ -212,9 +212,10 @@ func GetUniverseDomain(creds *google.Credentials) (string, error) { }() select { - case err := <-errors: - // An error that is returned before the timer expires is legitimate. - return "", err + case <-errors: + // An error that is returned before the timer expires is likely to be + // connection refused. Temporarily (2024-03-21) return the GDU domain. + return universeDomainDefault, nil case res := <-results: return res, nil case <-timer.C: // Timer is expired. diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index fb21d338b94..eedb1faa7ba 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.169.0" +const Version = "0.171.0" diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index cb34638d2be..e100da3e294 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -33,7 +33,7 @@ "location": "me-central2" } ], - "etag": "\"3135323132313733303039343736303631393739\"", + "etag": "\"33383639383534303336303331313132393335\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -1632,6 +1632,11 @@ "managedFolder" ], "parameters": { + "allowNonEmpty": { + "description": "Allows the deletion of a managed folder even if it is not empty. A managed folder is empty if there are no objects or managed folders that it applies to. Callers must have storage.managedFolders.setIamPolicy permission.", + "location": "query", + "type": "boolean" + }, "bucket": { "description": "Name of the bucket containing the managed folder.", "location": "path", @@ -3212,9 +3217,6 @@ } }, "path": "b/{bucket}/o/{object}/restore", - "request": { - "$ref": "Object" - }, "response": { "$ref": "Object" }, @@ -4040,7 +4042,7 @@ } } }, - "revision": "20240209", + "revision": "20240311", "rootUrl": "https://storage.googleapis.com/", "schemas": { "AnywhereCache": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 6f01795faa9..3fa41847266 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -9452,6 +9452,16 @@ func (r *ManagedFoldersService) Delete(bucket string, managedFolder string) *Man return c } +// AllowNonEmpty sets the optional parameter "allowNonEmpty": Allows the +// deletion of a managed folder even if it is not empty. A managed +// folder is empty if there are no objects or managed folders that it +// applies to. Callers must have storage.managedFolders.setIamPolicy +// permission. +func (c *ManagedFoldersDeleteCall) AllowNonEmpty(allowNonEmpty bool) *ManagedFoldersDeleteCall { + c.urlParams_.Set("allowNonEmpty", fmt.Sprint(allowNonEmpty)) + return c +} + // IfMetagenerationMatch sets the optional parameter // "ifMetagenerationMatch": If set, only deletes the managed folder if // its metageneration matches this value. @@ -9538,6 +9548,11 @@ func (c *ManagedFoldersDeleteCall) Do(opts ...googleapi.CallOption) error { // "managedFolder" // ], // "parameters": { + // "allowNonEmpty": { + // "description": "Allows the deletion of a managed folder even if it is not empty. A managed folder is empty if there are no objects or managed folders that it applies to. Callers must have storage.managedFolders.setIamPolicy permission.", + // "location": "query", + // "type": "boolean" + // }, // "bucket": { // "description": "Name of the bucket containing the managed folder.", // "location": "path", @@ -15052,7 +15067,6 @@ type ObjectsRestoreCall struct { s *Service bucket string object string - object2 *Object urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header @@ -15064,11 +15078,10 @@ type ObjectsRestoreCall struct { // - generation: Selects a specific revision of this object. // - object: Name of the object. For information about how to URL encode // object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectsService) Restore(bucket string, object string, object2 *Object) *ObjectsRestoreCall { +func (r *ObjectsService) Restore(bucket string, object string) *ObjectsRestoreCall { c := &ObjectsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object - c.object2 = object2 return c } @@ -15168,11 +15181,6 @@ func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/restore") @@ -15303,9 +15311,6 @@ func (c *ObjectsRestoreCall) Do(opts ...googleapi.CallOption) (*Object, error) { // } // }, // "path": "b/{bucket}/o/{object}/restore", - // "request": { - // "$ref": "Object" - // }, // "response": { // "$ref": "Object" // }, diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index ab0fbb79b86..b572707c623 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -10,7 +10,7 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Prerequisites -- **[Go][]**: any one of the **three latest major** [releases][go-releases]. +- **[Go][]**: any one of the **two latest major** [releases][go-releases]. ## Installation diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index bdf93dbfeff..0adc98866c0 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index c57857ac0e1..57a792a7b48 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go index 8942c31310a..96a57c8c70c 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -21,14 +21,14 @@ package grpclb import ( "encoding/json" - "google.golang.org/grpc" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/serviceconfig" ) const ( roundRobinName = roundrobin.Name - pickFirstName = grpc.PickFirstBalancerName + pickFirstName = pickfirst.Name ) type grpclbServiceConfig struct { diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go index 20c5f2ec396..671bc663fcb 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -19,13 +19,13 @@ package grpclb import ( + "math/rand" "sync" "sync/atomic" "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/status" ) @@ -112,7 +112,7 @@ type rrPicker struct { func newRRPicker(readySCs []balancer.SubConn) *rrPicker { return &rrPicker{ subConns: readySCs, - subConnsNext: grpcrand.Intn(len(readySCs)), + subConnsNext: rand.Intn(len(readySCs)), } } @@ -147,7 +147,7 @@ func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats * return &lbPicker{ serverList: serverList, subConns: readySCs, - subConnsNext: grpcrand.Intn(len(readySCs)), + subConnsNext: rand.Intn(len(readySCs)), stats: stats, } } diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go similarity index 89% rename from vendor/google.golang.org/grpc/pickfirst.go rename to vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 8853626614e..07527603f1d 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -16,26 +16,36 @@ * */ -package grpc +// Package pickfirst contains the pick_first load balancing policy. +package pickfirst import ( "encoding/json" "errors" "fmt" + "math/rand" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) +func init() { + balancer.Register(pickfirstBuilder{}) + internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } +} + +var logger = grpclog.Component("pick-first-lb") + const ( - // PickFirstBalancerName is the name of the pick_first balancer. - PickFirstBalancerName = "pick_first" - logPrefix = "[pick-first-lb %p] " + // Name is the name of the pick_first balancer. + Name = "pick_first" + logPrefix = "[pick-first-lb %p] " ) type pickfirstBuilder struct{} @@ -47,7 +57,7 @@ func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) } func (pickfirstBuilder) Name() string { - return PickFirstBalancerName + return Name } type pfConfig struct { @@ -93,6 +103,12 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +type Shuffler interface { + ShuffleAddressListForTesting(n int, swap func(i, j int)) +} + +func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } + func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // The resolver reported an empty address list. Treat it like an error by @@ -124,7 +140,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - grpcrand.Shuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each @@ -145,7 +161,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState addrs = state.ResolverState.Addresses if cfg.ShuffleAddressList { addrs = append([]resolver.Address{}, addrs...) - grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) } } diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index f7031ad2251..260255d31b6 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,12 +22,12 @@ package roundrobin import ( + "math/rand" "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcrand" ) // Name is the name of round_robin balancer. @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(grpcrand.Intn(len(scs))), + next: uint32(rand.Intn(len(scs))), } } diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index af39b8a4c73..4161fdf47a8 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -198,6 +198,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.cc.mu.Lock() defer ccb.cc.mu.Unlock() + if ccb.cc.conns == nil { + // The CC has been closed; ignore this update. + return + } ccb.mu.Lock() if ccb.closed { diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 1afb1e84ac0..63c639e4fe9 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 2359f94b8a4..423be7b43b0 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" @@ -72,6 +73,8 @@ var ( // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = pickfirst.Name ) // The following errors are returned from Dial and DialContext @@ -152,6 +155,16 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) for _, opt := range opts { opt.apply(&cc.dopts) } + + // Determine the resolver to use. + if err := cc.initParsedTargetAndResolverBuilder(); err != nil { + return nil, err + } + + for _, opt := range globalPerTargetDialOptions { + opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts) + } + chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) @@ -160,7 +173,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } if cc.dopts.defaultServiceConfigRawJSON != nil { - scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts) if scpr.Err != nil { return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) } @@ -168,25 +181,16 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } cc.mkp = cc.dopts.copts.KeepaliveParams - // Register ClientConn with channelz. - cc.channelzRegistration(target) - - // TODO: Ideally it should be impossible to error from this function after - // channelz registration. This will require removing some channelz logs - // from the following functions that can error. Errors can be returned to - // the user, and successful logs can be emitted here, after the checks have - // passed and channelz is subsequently registered. - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - channelz.RemoveEntry(cc.channelz.ID) - return nil, err - } - if err = cc.determineAuthority(); err != nil { - channelz.RemoveEntry(cc.channelz.ID) + if err = cc.initAuthority(); err != nil { return nil, err } + // Register ClientConn with channelz. Note that this is only done after + // channel creation cannot fail. + cc.channelzRegistration(target) + channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget) + channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) @@ -587,11 +591,11 @@ type ClientConn struct { // The following are initialized at dial time, and are read-only after that. target string // User's dial target. - parsedTarget resolver.Target // See parseTargetAndFindResolver(). - authority string // See determineAuthority(). + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). dopts dialOptions // Default and user specified dial options. channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't @@ -692,8 +696,7 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { var emptyServiceConfig *ServiceConfig func init() { - balancer.Register(pickfirstBuilder{}) - cfg := parseServiceConfig("{}") + cfg := parseServiceConfig("{}", defaultMaxCallAttempts) if cfg.Err != nil { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } @@ -1673,22 +1676,19 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -// parseTargetAndFindResolver parses the user's dial target and stores the -// parsed target in `cc.parsedTarget`. +// initParsedTargetAndResolverBuilder parses the user's dial target and stores +// the parsed target in `cc.parsedTarget`. // // The resolver to use is determined based on the scheme in the parsed target // and the same is stored in `cc.resolverBuilder`. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) parseTargetAndFindResolver() error { - channelz.Infof(logger, cc.channelz, "original dial target is: %q", cc.target) +func (cc *ClientConn) initParsedTargetAndResolverBuilder() error { + logger.Infof("original dial target is: %q", cc.target) var rb resolver.Builder parsedTarget, err := parseTarget(cc.target) - if err != nil { - channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", cc.target, err) - } else { - channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", parsedTarget) + if err == nil { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1707,15 +1707,12 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { defScheme = resolver.GetDefaultScheme() } - channelz.Infof(logger, cc.channelz, "fallback to scheme %q", defScheme) canonicalTarget := defScheme + ":///" + cc.target parsedTarget, err = parseTarget(canonicalTarget) if err != nil { - channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", canonicalTarget, err) return err } - channelz.Infof(logger, cc.channelz, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) @@ -1805,7 +1802,7 @@ func encodeAuthority(authority string) string { // credentials do not match the authority configured through the dial option. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) determineAuthority() error { +func (cc *ClientConn) initAuthority() error { dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials @@ -1838,6 +1835,5 @@ func (cc *ClientConn) determineAuthority() error { } else { cc.authority = encodeAuthority(endpoint) } - channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) return nil } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index fe4488a95ed..38cb5cf0d74 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/gcp/altscontext.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index adbad6b2fa3..55fc7f65f10 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index d1af55260bd..358074b6494 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index d65ffe6e7be..18cc9cfbd59 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/gcp/transport_security_common.proto diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 5dafd34edf9..4114358545e 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -27,9 +27,13 @@ import ( "net/url" "os" + "google.golang.org/grpc/grpclog" credinternal "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/envconfig" ) +var logger = grpclog.Component("credentials") + // TLSInfo contains the auth information for a TLS authenticated connection. // It implements the AuthInfo interface. type TLSInfo struct { @@ -112,6 +116,22 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon conn.Close() return nil, nil, ctx.Err() } + + // The negotiated protocol can be either of the following: + // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since + // it is the only protocol advertised by the client during the handshake. + // The tls library ensures that the server chooses a protocol advertised + // by the client. + // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement + // for using HTTP/2 over TLS. We can terminate the connection immediately. + np := conn.ConnectionState().NegotiatedProtocol + if np == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } + logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) + } tlsInfo := TLSInfo{ State: conn.ConnectionState(), CommonAuthInfo: CommonAuthInfo{ @@ -131,8 +151,20 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) conn.Close() return nil, nil, err } + cs := conn.ConnectionState() + // The negotiated application protocol can be empty only if the client doesn't + // support ALPN. In such cases, we can close the connection since ALPN is required + // for using HTTP/2 over TLS. + if cs.NegotiatedProtocol == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } else if logger.V(2) { + logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") + } + } tlsInfo := TLSInfo{ - State: conn.ConnectionState(), + State: cs, CommonAuthInfo: CommonAuthInfo{ SecurityLevel: PrivacyAndIntegrity, }, diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 00273702b69..f5453d48a53 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -21,6 +21,7 @@ package grpc import ( "context" "net" + "net/url" "time" "google.golang.org/grpc/backoff" @@ -36,6 +37,11 @@ import ( "google.golang.org/grpc/stats" ) +const ( + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges + defaultMaxCallAttempts = 5 +) + func init() { internal.AddGlobalDialOptions = func(opt ...DialOption) { globalDialOptions = append(globalDialOptions, opt...) @@ -43,6 +49,14 @@ func init() { internal.ClearGlobalDialOptions = func() { globalDialOptions = nil } + internal.AddGlobalPerTargetDialOptions = func(opt any) { + if ptdo, ok := opt.(perTargetDialOption); ok { + globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo) + } + } + internal.ClearGlobalPerTargetDialOptions = func() { + globalPerTargetDialOptions = nil + } internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions @@ -80,6 +94,7 @@ type dialOptions struct { idleTimeout time.Duration recvBufferPool SharedBufferPool defaultScheme string + maxCallAttempts int } // DialOption configures how we set up the connection. @@ -89,6 +104,19 @@ type DialOption interface { var globalDialOptions []DialOption +// perTargetDialOption takes a parsed target and returns a dial option to apply. +// +// This gets called after NewClient() parses the target, and allows per target +// configuration set through a returned DialOption. The DialOption will not take +// effect if specifies a resolver builder, as that Dial Option is factored in +// while parsing target. +type perTargetDialOption interface { + // DialOption returns a Dial Option to apply. + DialOptionForTarget(parsedTarget url.URL) DialOption +} + +var globalPerTargetDialOptions []perTargetDialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // @@ -655,6 +683,7 @@ func defaultDialOptions() dialOptions { idleTimeout: 30 * time.Minute, recvBufferPool: nopBufferPool{}, defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, } } @@ -712,6 +741,23 @@ func WithIdleTimeout(d time.Duration) DialOption { }) } +// WithMaxCallAttempts returns a DialOption that configures the maximum number +// of attempts per call (including retries and hedging) using the channel. +// Service owners may specify a higher value for these parameters, but higher +// values will be treated as equal to the maximum value by the client +// implementation. This mitigates security concerns related to the service +// config being transferred to the client via DNS. +// +// A value of 5 will be used if this dial option is not set or n < 2. +func WithMaxCallAttempts(n int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if n < 2 { + n = defaultMaxCallAttempts + } + o.maxCallAttempts = n + }) +} + // WithRecvBufferPool returns a DialOption that configures the ClientConn // to use the provided shared buffer pool for parsing incoming messages. Depending // on the application's workload, this could result in reduced memory allocation. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 6a93475a7fb..38b88350735 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 8f793e6e89f..51b736ba06e 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // source: grpc/health/v1/health.proto @@ -43,6 +43,10 @@ const ( // HealthClient is the client API for Health service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. type HealthClient interface { // Check gets the health of the specified service. If the requested service // is unknown, the call will fail with status NOT_FOUND. If the caller does @@ -126,6 +130,10 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { // HealthServer is the server API for Health service. // All implementations should embed UnimplementedHealthServer // for forward compatibility +// +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. type HealthServer interface { // Check gets the health of the specified service. If the requested service // is unknown, the call will fail with status NOT_FOUND. If the caller does diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index fed1c011a32..b15cf482d29 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,10 +25,10 @@ package backoff import ( "context" "errors" + "math/rand" "time" grpcbackoff "google.golang.org/grpc/backoff" - "google.golang.org/grpc/internal/grpcrand" ) // Strategy defines the methodology for backing off after a grpc connection @@ -67,7 +67,7 @@ func (bc Exponential) Backoff(retries int) time.Duration { } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. - backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1) if backoff < 0 { return 0 } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 9c915d9e4b2..d9064871394 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -40,6 +40,12 @@ var ( // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled + // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this + // option is present for backward compatibility. This option may be overridden + // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" + // or "false". + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go deleted file mode 100644 index 0126d6b5108..00000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ /dev/null @@ -1,100 +0,0 @@ -//go:build !go1.21 - -// TODO: when this file is deleted (after Go 1.20 support is dropped), delete -// all of grpcrand and call the rand package directly. - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import ( - "math/rand" - "sync" - "time" -) - -var ( - r = rand.New(rand.NewSource(time.Now().UnixNano())) - mu sync.Mutex -) - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - mu.Lock() - defer mu.Unlock() - return r.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - mu.Lock() - defer mu.Unlock() - return r.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - mu.Lock() - defer mu.Unlock() - return r.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - mu.Lock() - defer mu.Unlock() - return r.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - mu.Lock() - defer mu.Unlock() - return r.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - mu.Lock() - defer mu.Unlock() - return r.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - mu.Lock() - defer mu.Unlock() - return r.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - mu.Lock() - defer mu.Unlock() - return r.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - mu.Lock() - defer mu.Unlock() - r.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go deleted file mode 100644 index c37299af1ef..00000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build go1.21 - -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import "math/rand" - -// This implementation will be used for Go version 1.21 or newer. -// For older versions, the original implementation with mutex will be used. - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - return rand.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - return rand.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - return rand.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - return rand.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - return rand.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - return rand.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - return rand.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - return rand.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - rand.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 48d24bdb4e6..5d665398692 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -106,6 +106,14 @@ var ( // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. ClearGlobalDialOptions func() + + // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be + // configured for newly created ClientConns. + AddGlobalPerTargetDialOptions any // func (opt any) + // ClearGlobalPerTargetDialOptions clears the slice of global late apply + // dial options. + ClearGlobalPerTargetDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a // single dial option. JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption @@ -126,7 +134,8 @@ var ( // deleted or changed. BinaryLogger any // func(binarylog.Logger) grpc.ServerOption - // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a + // provided grpc.ClientConn. SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using @@ -184,25 +193,25 @@ var ( ChannelzTurnOffForTesting func() - // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found - // error for a given resource type and name. This is usually triggered when - // the associated watch timer fires. For testing purposes, having this - // function makes events more predictable than relying on timer events. - TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error + // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to + // invoke resource-not-found error for the given resource type and name. + TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error - // TriggerXDSResourceNameNotFoundClient invokes the testing xDS Client - // singleton to invoke resource not found for a resource type name and - // resource name. - TriggerXDSResourceNameNotFoundClient any // func(string, string) error - - // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + // FromOutgoingContextRaw returns the un-merged, intermediary contents of + // metadata.rawMD. FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) - // UserSetDefaultScheme is set to true if the user has overridden the default resolver scheme. + // UserSetDefaultScheme is set to true if the user has overridden the + // default resolver scheme. UserSetDefaultScheme bool = false + + // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n + // is the number of elements. swap swaps the elements with indexes i and j. + ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) ) -// HealthChecker defines the signature of the client-side LB channel health checking function. +// HealthChecker defines the signature of the client-side LB channel health +// checking function. // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index f3f52a59a86..4552db16b02 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,6 +24,7 @@ import ( "context" "encoding/json" "fmt" + "math/rand" "net" "os" "strconv" @@ -35,7 +36,6 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -63,6 +63,8 @@ var ( func init() { resolver.Register(NewBuilder()) internal.TimeAfterFunc = time.After + internal.TimeNowFunc = time.Now + internal.TimeUntilFunc = time.Until internal.NewNetResolver = newNetResolver internal.AddressDialer = addressDialer } @@ -209,12 +211,12 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var waitTime time.Duration + var nextResolutionTime time.Time if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - waitTime = MinResolutionInterval + nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval) select { case <-d.ctx.Done(): return @@ -223,13 +225,13 @@ func (d *dnsResolver) watcher() { } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - waitTime = backoff.DefaultExponential.Backoff(backoffIndex) + nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } select { case <-d.ctx.Done(): return - case <-internal.TimeAfterFunc(waitTime): + case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)): } } } @@ -423,7 +425,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return grpcrand.Intn(100)+1 <= *a + return rand.Intn(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go index a7ecaf8d522..c0eae4f5f83 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -51,11 +51,22 @@ var ( // The following vars are overridden from tests. var ( // TimeAfterFunc is used by the DNS resolver to wait for the given duration - // to elapse. In non-test code, this is implemented by time.After. In test + // to elapse. In non-test code, this is implemented by time.After. In test // code, this can be used to control the amount of time the resolver is // blocked waiting for the duration to elapse. TimeAfterFunc func(time.Duration) <-chan time.Time + // TimeNowFunc is used by the DNS resolver to get the current time. + // In non-test code, this is implemented by time.Now. In test code, + // this can be used to control the current time for the resolver. + TimeNowFunc func() time.Time + + // TimeUntilFunc is used by the DNS resolver to calculate the remaining + // wait time for re-resolution. In non-test code, this is implemented by + // time.Until. In test code, this can be used to control the remaining + // time for resolver to wait for re-resolution. + TimeUntilFunc func(time.Time) time.Duration + // NewNetResolver returns the net.Resolver instance for the given target. NewNetResolver func(string) (NetResolver, error) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index cab0e2d3d44..b7091165b50 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,6 +25,7 @@ import ( "fmt" "io" "math" + "math/rand" "net" "net/http" "strconv" @@ -43,7 +44,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -1440,7 +1440,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := grpcrand.Int63n(2*r) - r + j := rand.Int63n(2*r) - r return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 6c01a9b359c..1e9485fd6e2 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -90,21 +90,6 @@ func Pairs(kv ...string) MD { return md } -// String implements the Stringer interface for pretty-printing a MD. -// Ordering of the values is non-deterministic as it ranges over a map. -func (md MD) String() string { - var sb strings.Builder - fmt.Fprintf(&sb, "MD{") - for k, v := range md { - if sb.Len() > 3 { - fmt.Fprintf(&sb, ", ") - } - fmt.Fprintf(&sb, "%s=[%s]", k, strings.Join(v, ", ")) - } - fmt.Fprintf(&sb, "}") - return sb.String() -} - // Len returns the number of items in md. func (md MD) Len() int { return len(md) diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 56e8aba783f..bdaa2130e48 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -22,7 +22,7 @@ import ( "context" "fmt" "io" - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" @@ -33,35 +33,43 @@ import ( "google.golang.org/grpc/status" ) +// pickerGeneration stores a picker and a channel used to signal that a picker +// newer than this one is available. +type pickerGeneration struct { + // picker is the picker produced by the LB policy. May be nil if a picker + // has never been produced. + picker balancer.Picker + // blockingCh is closed when the picker has been invalidated because there + // is a new one available. + blockingCh chan struct{} +} + // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker + // If pickerGen holds a nil pointer, the pickerWrapper is closed. + pickerGen atomic.Pointer[pickerGeneration] statsHandlers []stats.Handler // to record blocking picker calls } func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - return &pickerWrapper{ - blockingCh: make(chan struct{}), + pw := &pickerWrapper{ statsHandlers: statsHandlers, } + pw.pickerGen.Store(&pickerGeneration{ + blockingCh: make(chan struct{}), + }) + return pw } -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +// updatePicker is called by UpdateState calls from the LB policy. It +// unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() - return - } - pw.picker = p - // pw.blockingCh should never be nil. - close(pw.blockingCh) - pw.blockingCh = make(chan struct{}) - pw.mu.Unlock() + old := pw.pickerGen.Swap(&pickerGeneration{ + picker: p, + blockingCh: make(chan struct{}), + }) + close(old.blockingCh) } // doneChannelzWrapper performs the following: @@ -98,20 +106,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var lastPickErr error for { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() + pg := pw.pickerGen.Load() + if pg == nil { return nil, balancer.PickResult{}, ErrClientConnClosing } - - if pw.picker == nil { - ch = pw.blockingCh + if pg.picker == nil { + ch = pg.blockingCh } - if ch == pw.blockingCh { + if ch == pg.blockingCh { // This could happen when either: // - pw.picker is nil (the previous if condition), or - // - has called pick on the current picker. - pw.mu.Unlock() + // - we have already called pick on the current picker. select { case <-ctx.Done(): var errStr string @@ -145,9 +150,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } } - ch = pw.blockingCh - p := pw.picker - pw.mu.Unlock() + ch = pg.blockingCh + p := pg.picker pickResult, err := p.Pick(info) if err != nil { @@ -197,24 +201,15 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } func (pw *pickerWrapper) close() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.done = true - close(pw.blockingCh) + old := pw.pickerGen.Swap(nil) + close(old.blockingCh) } // reset clears the pickerWrapper and prepares it for being used again when idle // mode is exited. func (pw *pickerWrapper) reset() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.blockingCh = make(chan struct{}) + old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})}) + close(old.blockingCh) } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go index 6c49c2333b1..666eda8e5f3 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/reflection/v1/reflection.proto diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go index 6e544f81e4e..17d21fde22a 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // source: grpc/reflection/v1/reflection.proto diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 909b24a19da..cd032acefca 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index 3de5dc354f6..93886e38216 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index 9dcc9780f89..c5fb45236fa 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -171,7 +171,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { // ParseServiceConfig is called by resolver implementations to parse a JSON // representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) + return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts) } // addChannelzTraceEvent adds a channelz trace event containing the new diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 9da8fc8027d..2671c5ef69f 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" @@ -163,9 +164,11 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfig = parseServiceConfig + internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult { + return parseServiceConfig(js, defaultMaxCallAttempts) + } } -func parseServiceConfig(js string) *serviceconfig.ParseResult { +func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} } @@ -183,12 +186,12 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { } c := rsc.LoadBalancingConfig if c == nil { - name := PickFirstBalancerName + name := pickfirst.Name if rsc.LoadBalancingPolicy != nil { name = *rsc.LoadBalancingPolicy } if balancer.Get(name) == nil { - name = PickFirstBalancerName + name = pickfirst.Name } cfg := []map[string]any{{name: struct{}{}}} strCfg, err := json.Marshal(cfg) @@ -218,7 +221,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { WaitForReady: m.WaitForReady, Timeout: (*time.Duration)(m.Timeout), } - if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil { logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -264,7 +267,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { +func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } @@ -278,17 +281,16 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol return nil, nil } + if jrp.MaxAttempts < maxAttempts { + maxAttempts = jrp.MaxAttempts + } rp := &internalserviceconfig.RetryPolicy{ - MaxAttempts: jrp.MaxAttempts, + MaxAttempts: maxAttempts, InitialBackoff: time.Duration(jrp.InitialBackoff), MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } - if rp.MaxAttempts > 5 { - // TODO(retry): Make the max maxAttempts configurable. - rp.MaxAttempts = 5 - } for _, code := range jrp.RetryableStatusCodes { rp.RetryableStatusCodes[code] = true } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index b54563e81cd..8051ef5b514 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,6 +23,7 @@ import ( "errors" "io" "math" + "math/rand" "strconv" "sync" "time" @@ -34,7 +35,6 @@ import ( "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" @@ -699,7 +699,7 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { if max := float64(rp.MaxBackoff); cur > max { cur = max } - dur = time.Duration(grpcrand.Int63n(int64(cur))) + dur = time.Duration(rand.Int63n(int64(cur))) cs.numRetriesSincePushback++ } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index e1806e76000..bafaef99be9 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.64.0" +const Version = "1.65.0" diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index f47902371a6..bb2966e3b4c 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -102,7 +102,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -114,7 +114,7 @@ func (d decoder) unexpectedTokenError(tok json.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index a45f112bce3..24bc98ac422 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -84,7 +84,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index d2b3ac031e1..ea1d3e65a57 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -214,7 +214,7 @@ func (d *Decoder) parseNext() (Token, error) { // newSyntaxError returns an error with line and column information useful for // syntax errors. -func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(pos) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 87853e786d0..099b2bf451b 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { // newSyntaxError returns a syntax error with line and column information for // current position. -func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(len(d.orig) - len(d.in)) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index d96719829c2..c2d6bd5265d 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -17,7 +17,7 @@ var Error = errors.New("protobuf error") // New formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func New(f string, x ...interface{}) error { +func New(f string, x ...any) error { return &prefixError{s: format(f, x...)} } @@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error { // Wrap returns an error that has a "proto" prefix, the formatted string described // by the format specifier and arguments, and a suffix of err. The error wraps err. -func Wrap(err error, f string, x ...interface{}) error { +func Wrap(err error, f string, x ...any) error { return &wrapError{ s: format(f, x...), err: err, @@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool { return target == Error } -func format(f string, x ...interface{}) string { +func format(f string, x ...any) string { // avoid "proto: " prefix when chaining for i := 0; i < len(x); i++ { switch e := x[i].(type) { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index ece53bea328..df53ff40b25 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -383,6 +383,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor { } return fd.L1.Message } +func (fd *Field) IsMapEntry() bool { + parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor) + return ok && parent.IsMapEntry() +} func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 3bc3b1cdf80..8a57d60b08c 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -534,7 +534,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor } var nameBuilderPool = sync.Pool{ - New: func() interface{} { return new(strs.Builder) }, + New: func() any { return new(strs.Builder) }, } func getBuilder() *strs.Builder { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 570181eb487..e56c91a8dbe 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -45,6 +45,11 @@ func (file *File) resolveMessages() { case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ + if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + fd.L1.Kind = protoreflect.MessageKind + } } // Default is resolved here since it depends on Enum being resolved. diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index f0e38c4ef4e..ba83fea44c3 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -68,7 +68,7 @@ type Builder struct { // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of // dependencies themselves is unspecified. - GoTypes []interface{} + GoTypes []any // DependencyIndexes is an ordered list of indexes into GoTypes for the // dependencies of messages, extensions, or services. @@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 { type ( resolverByIndex struct { - goTypes []interface{} + goTypes []any depIdxs depIdxs fileRegistry } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 1447a11987b..f30ab6b586f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -860,11 +860,13 @@ const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumValueOptions_Features_field_name protoreflect.Name = "features" EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" + EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) @@ -873,6 +875,7 @@ const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 + EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index a371f98de14..5d5771c2ed5 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -22,13 +22,13 @@ type Export struct{} // NewError formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func (Export) NewError(f string, x ...interface{}) error { +func (Export) NewError(f string, x ...any) error { return errors.New(f, x...) } // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. -type enum = interface{} +type enum = any // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. @@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu // message is any message type generated by protoc-gen-go // and must be a pointer to a named struct type. -type message = interface{} +type message = any // legacyMessageWrapper wraps a v2 message as a v1 message. type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index bff041edc94..f29e6a8fa88 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -68,7 +68,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { } for _, x := range *ext { ei := getExtensionFieldInfo(x.Type()) - if ei.funcs.isInit == nil { + if ei.funcs.isInit == nil || x.isUnexpandedLazy() { continue } v := x.Value() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 2b8f122c27b..4bb0a7a20ce 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -99,6 +99,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { return false } +// isUnexpandedLazy returns true if the ExensionField is lazy and not +// yet expanded, which means it's present and already checked for +// initialized required fields. +func (f *ExtensionField) isUnexpandedLazy() bool { + return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} + +// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded. +// +// The returned buffer has to be kept over whatever operation we're planning, +// as re-retrieving it will fail after the message is lazily decoded. +func (f *ExtensionField) lazyBuffer() []byte { + // This function might be in the critical path, so check the atomic without + // taking a look first, then only take the lock if needed. + if !f.isUnexpandedLazy() { + return nil + } + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + return f.lazy.b +} + func (f *ExtensionField) lazyInit() { f.lazy.mu.Lock() defer f.lazy.mu.Unlock() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index b7a23faf1e4..7a16ec13dd1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) } num, _ := protowire.DecodeTag(xi.wiretag) size += messageset.SizeField(num) + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + // Don't count the tag size in the extension buffer, it's already added. + size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize + continue + } + } size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } @@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma xi := getExtensionFieldInfo(x.Type()) num, _ := protowire.DecodeTag(xi.wiretag) b = messageset.AppendFieldStart(b, num) + + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + // The tag inside the lazy buffer is a different tag (the extension + // number), but what we need here is the tag for FieldMessage: + b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType)) + b = append(b, lb[xi.tagsize:]...) + b = messageset.AppendFieldEnd(b) + return b, nil + } + } + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 185ef2efa5b..e06ece55a26 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -14,7 +14,7 @@ import ( // unwrapper unwraps the value to the underlying value. // This is implemented by List and Map. type unwrapper interface { - protoUnwrap() interface{} + protoUnwrap() any } // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index f89136516f9..18cb96fd70a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value { func (ls *listReflect) IsValid() bool { return !ls.v.IsNil() } -func (ls *listReflect) protoUnwrap() interface{} { +func (ls *listReflect) protoUnwrap() any { return ls.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index f30b0a0576d..304244a651d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value { func (ms *mapReflect) IsValid() bool { return !ms.v.IsNil() } -func (ms *mapReflect) protoUnwrap() interface{} { +func (ms *mapReflect) protoUnwrap() any { return ms.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 845c67d6e7e..febd2122472 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -49,8 +49,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { return 0 } if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { - if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { - return int(size) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 { + return int(size - 1) } } return mi.sizePointerSlow(p, opts) @@ -60,7 +63,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int if flags.ProtoLegacy && mi.isMessageSet { size = sizeMessageSet(mi, p, opts) if mi.sizecacheOffset.IsValid() { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } return size } @@ -84,13 +87,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int } } if mi.sizecacheOffset.IsValid() { - if size > math.MaxInt32 { + if size > (math.MaxInt32 - 1) { // The size is too large for the int32 sizecache field. // We will need to recompute the size when encoding; // unfortunately expensive, but better than invalid output. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0) } else { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } } return size @@ -149,6 +155,14 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, nil } +// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal. +func fullyLazyExtensions(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // extensions to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 @@ -158,6 +172,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha if xi.funcs.size == nil { continue } + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + n += len(lb) + continue + } + } n += xi.funcs.size(x.Value(), xi.tagsize, opts) } return n @@ -176,6 +198,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, var err error for _, x := range *ext { xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) } return b, err @@ -191,6 +220,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, for _, k := range keys { x := (*ext)[int32(k)] xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index cb25b0bae1d..e31249f64f7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -53,7 +53,7 @@ type ExtensionInfo struct { // type returned by InterfaceOf may not be identical. // // Deprecated: Use InterfaceOf(xt.Zero()) instead. - ExtensionType interface{} + ExtensionType any // Field is the field number of the extension. // @@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value { func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { +func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any { return xi.lazyInit().GoValueOf(v).Interface() } func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } -func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { +func (xi *ExtensionInfo) IsValidInterface(v any) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index c1c33d0057e..81b2b1a763d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } -func (e *legacyEnumWrapper) protoUnwrap() interface{} { +func (e *legacyEnumWrapper) protoUnwrap() any { v := reflect.New(e.goTyp).Elem() v.SetInt(int64(e.num)) return v.Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 950e9a1fe7a..bf0b6049b46 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -216,7 +216,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { for _, v := range vs { oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } @@ -567,6 +567,6 @@ func (m aberrantMessage) IsValid() bool { func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } -func (m aberrantMessage) protoUnwrap() interface{} { +func (m aberrantMessage) protoUnwrap() any { return m.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 629bacdcedd..019399d454d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -35,7 +35,7 @@ type MessageInfo struct { Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. - OneofWrappers []interface{} + OneofWrappers []any initMu sync.Mutex // protects all unexported fields initDone uint32 @@ -47,7 +47,7 @@ type MessageInfo struct { // exporter is a function that returns a reference to the ith field of v, // where v is a pointer to a struct. It returns nil if it does not support // exporting the requested field (e.g., already exported). -type exporter func(v interface{}, i int) interface{} +type exporter func(v any, i int) any // getMessageInfo returns the MessageInfo for any message type that // is generated by our implementation of protoc-gen-go (for v2 and on). @@ -201,7 +201,7 @@ fieldLoop: } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { oneofWrappers = vs } } @@ -256,7 +256,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { type mapEntryType struct { desc protoreflect.MessageDescriptor - valType interface{} // zero value of enum or message type + valType any // zero value of enum or message type } func (mt mapEntryType) New() protoreflect.Message { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index a6f0dbdade6..ecb4623d701 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -20,7 +20,7 @@ type reflectMessageInfo struct { // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[protoreflect.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]any // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -28,7 +28,7 @@ type reflectMessageInfo struct { denseFields []*fieldInfo // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. - rangeInfos []interface{} // either *fieldInfo or *oneofInfo + rangeInfos []any // either *fieldInfo or *oneofInfo getUnknown func(pointer) protoreflect.RawFields setUnknown func(pointer, protoreflect.RawFields) @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]any) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -255,6 +255,10 @@ func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { if !ok { return false } + if x.isUnexpandedLazy() { + // Avoid calling x.Value(), which triggers a lazy unmarshal. + return true + } switch { case xd.IsList(): return x.Value().List().Len() > 0 @@ -389,7 +393,7 @@ var ( // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { +func (mi *MessageInfo) MessageOf(m any) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -417,7 +421,7 @@ func (m *messageIfaceWrapper) Reset() { func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } -func (m *messageIfaceWrapper) protoUnwrap() interface{} { +func (m *messageIfaceWrapper) protoUnwrap() any { return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 29ba6bd3552..99dc23c6f0a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -23,7 +23,7 @@ func (m *messageState) New() protoreflect.Message { func (m *messageState) Interface() protoreflect.ProtoMessage { return m.protoUnwrap().(protoreflect.ProtoMessage) } -func (m *messageState) protoUnwrap() interface{} { +func (m *messageState) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { @@ -154,7 +154,7 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { } return (*messageIfaceWrapper)(m) } -func (m *messageReflectWrapper) protoUnwrap() interface{} { +func (m *messageReflectWrapper) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 517e94434c7..da685e8a29d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -16,7 +16,7 @@ import ( const UnsafeEnabled = false // Pointer is an opaque pointer type. -type Pointer interface{} +type Pointer any // offset represents the offset to a struct field, accessible from a pointer. // The offset is the field index into a struct. @@ -62,7 +62,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { return pointer{v: reflect.ValueOf(v)} } @@ -93,7 +93,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 4b020e31164..5f20ca5d8ab 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -50,7 +50,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { type ifaceHeader struct { Type unsafe.Pointer Data unsafe.Pointer @@ -80,7 +80,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { // TODO: Use tricky unsafe magic to directly create ifaceHeader. return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index 1665a68e5b7..a1f09162d05 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -18,7 +18,7 @@ type messageField struct { } var messageFieldPool = sync.Pool{ - New: func() interface{} { return new([]messageField) }, + New: func() any { return new([]messageField) }, } type ( @@ -69,7 +69,7 @@ type mapEntry struct { } var mapEntryPool = sync.Pool{ - New: func() interface{} { return new([]mapEntry) }, + New: func() any { return new([]mapEntry) }, } type ( diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index a3cba508022..dbbf1f6862c 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 34 - Patch = 1 + Patch = 2 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index c9c8721a697..d248f292846 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,7 +39,7 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. -func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { +func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { return xt.InterfaceOf(xt.Zero()) @@ -51,7 +51,7 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. -func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { +func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) @@ -78,7 +78,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { // It returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current extension field. -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) { // Treat nil message interface as an empty message; nothing to range over. if m == nil { return diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 254ca585424..f3cebab29c8 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -46,6 +46,11 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } + if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + f.L1.Kind = protoreflect.MessageKind + } if fd.DefaultValue != nil { v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) if err != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index c6293086750..6de31c2ebdb 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -116,18 +116,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds if m.ExtensionRanges().Len() > 0 { return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) } - // Verify that field names in proto3 do not conflict if lowercased - // with all underscores removed. - // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 - names := map[string]protoreflect.FieldDescriptor{} - for i := 0; i < m.Fields().Len(); i++ { - f1 := m.Fields().Get(i) - s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) - if f2, ok := names[s]; ok { - return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) - } - names[s] = f1 - } } for j, fd := range md.GetField() { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 00102d31178..ea154eec44d 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -485,6 +485,8 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "debug_redact", nil) + case 4: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 5b80afe5204..cd8fadbaf8f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -510,7 +510,7 @@ type ExtensionType interface { // // ValueOf is more extensive than protoreflect.ValueOf for a given field's // value as it has more type information available. - ValueOf(interface{}) Value + ValueOf(any) Value // InterfaceOf completely unwraps the Value to the underlying Go type. // InterfaceOf panics if the input is nil or does not represent the @@ -519,13 +519,13 @@ type ExtensionType interface { // // InterfaceOf is able to unwrap the Value further than Value.Interface // as it has more type information available. - InterfaceOf(Value) interface{} + InterfaceOf(Value) any // IsValidValue reports whether the Value is valid to assign to the field. IsValidValue(Value) bool // IsValidInterface reports whether the input is valid to assign to the field. - IsValidInterface(interface{}) bool + IsValidInterface(any) bool } // EnumDescriptor describes an enum and diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 7ced876f4e8..75f83a2af03 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -32,11 +32,11 @@ const ( type value struct { pragma.DoNotCompare // 0B - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface interface{} // 16B + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface any // 16B } func valueOfString(v string) Value { @@ -45,7 +45,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, bin: v} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { return Value{typ: ifaceType, iface: v} } @@ -55,6 +55,6 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return v.bin } -func (v Value) getIface() interface{} { +func (v Value) getIface() any { return v.iface } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 1603097311e..9fe83cef5a9 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -69,8 +69,8 @@ import ( // composite Value. Modifying an empty, read-only value panics. type Value value -// The protoreflect API uses a custom Value union type instead of interface{} -// to keep the future open for performance optimizations. Using an interface{} +// The protoreflect API uses a custom Value union type instead of any +// to keep the future open for performance optimizations. Using an any // always incurs an allocation for primitives (e.g., int64) since it needs to // be boxed on the heap (as interfaces can only contain pointers natively). // Instead, we represent the Value union as a flat struct that internally keeps @@ -85,7 +85,7 @@ type Value value // ValueOf returns a Value initialized with the concrete value stored in v. // This panics if the type does not match one of the allowed types in the // Value union. -func ValueOf(v interface{}) Value { +func ValueOf(v any) Value { switch v := v.(type) { case nil: return Value{} @@ -192,10 +192,10 @@ func (v Value) IsValid() bool { return v.typ != nilType } -// Interface returns v as an interface{}. +// Interface returns v as an any. // // Invariant: v == ValueOf(v).Interface() -func (v Value) Interface() interface{} { +func (v Value) Interface() any { switch v.typ { case nilType: return nil @@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool { return Value(k).IsValid() } -// Interface returns k as an interface{}. -func (k MapKey) Interface() interface{} { +// Interface returns k as an any. +func (k MapKey) Interface() any { return Value(k).Interface() } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index b1fdbe3e8e1..7f3583ead81 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -45,7 +45,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -80,7 +80,7 @@ func valueOfBytes(v []byte) Value { p := (*sliceHeader)(unsafe.Pointer(&v)) return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -93,7 +93,7 @@ func (v Value) getBytes() (x []byte) { *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} return x } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index 43547011173..f7d386990a0 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -15,7 +15,7 @@ import ( type ( ifaceHeader struct { - _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. Type unsafe.Pointer Data unsafe.Pointer } @@ -37,7 +37,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -70,7 +70,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -81,7 +81,7 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return unsafe.Slice((*byte)(v.ptr), v.num) } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 6267dc52a67..de177733914 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -95,7 +95,7 @@ type Files struct { // multiple files. Only top-level declarations are registered. // Note that enum values are in the top-level since that are in the same // scope as the parent enum. - descsByName map[protoreflect.FullName]interface{} + descsByName map[protoreflect.FullName]any filesByPath map[string][]protoreflect.FileDescriptor numFiles int } @@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { defer globalMutex.Unlock() } if r.descsByName == nil { - r.descsByName = map[protoreflect.FullName]interface{}{ + r.descsByName = map[protoreflect.FullName]any{ "": &packageDescriptor{}, } r.filesByPath = make(map[string][]protoreflect.FileDescriptor) @@ -485,7 +485,7 @@ type Types struct { } type ( - typesByName map[protoreflect.FullName]interface{} + typesByName map[protoreflect.FullName]any extensionsByMessage map[protoreflect.FullName]extensionsByNumber extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType ) @@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { return nil } -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error { name := desc.FullName() prev := r.typesByName[name] if prev != nil { @@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p } } -func typeName(t interface{}) string { +func typeName(t any) string { switch t.(type) { case protoreflect.EnumType: return "enum" @@ -854,7 +854,7 @@ func typeName(t interface{}) string { } } -func amendErrorWithCaller(err error, prev, curr interface{}) error { +func amendErrorWithCaller(err error, prev, curr any) error { prevPkg := goPackage(prev) currPkg := goPackage(curr) if prevPkg == "" || currPkg == "" || prevPkg == currPkg { @@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error { return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) } -func goPackage(v interface{}) string { +func goPackage(v any) string { switch d := v.(type) { case protoreflect.EnumType: v = d.Descriptor() diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 10c9030eb03..9403eb07507 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -3012,6 +3012,8 @@ type EnumValueOptions struct { // out when using debug formats, e.g. when the field contains sensitive // credentials. DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + // Information about the support window of a feature value. + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -3075,6 +3077,13 @@ func (x *EnumValueOptions) GetDebugRedact() bool { return Default_EnumValueOptions_DebugRedact } +func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -4706,7 +4715,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, @@ -4779,438 +4788,445 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, - 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, - 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, - 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, - 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, - 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, - 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, - 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, - 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, - 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, - 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, - 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, - 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, - 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, - 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, - 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, - 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, - 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, - 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, - 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, - 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, - 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, - 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, - 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, - 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, - 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, - 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, - 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, - 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, - 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, - 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, - 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, + 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, + 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, + 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, + 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, + 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, + 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, + 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, + 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, - 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, - 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, - 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, - 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x0a, 0x0a, - 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, - 0x3f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, - 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, - 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, - 0x6c, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, - 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, - 0x08, 0xe8, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, - 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, - 0x6e, 0x67, 0x42, 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, - 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, - 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, - 0x07, 0x52, 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, - 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, - 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, - 0x45, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, - 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, - 0x26, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, - 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, - 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, - 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, - 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, - 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, - 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, - 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, - 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, - 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, - 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, - 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, - 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, - 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, - 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, - 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, - 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, - 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, - 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, - 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, - 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, - 0x03, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, - 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, - 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, - 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, - 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, - 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, - 0x2a, 0x06, 0x08, 0xea, 0x07, 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x86, 0x4e, 0x10, 0x87, 0x4e, - 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, - 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xd9, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, - 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, - 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, - 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, - 0xe2, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, + 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, + 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, - 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, - 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, - 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, - 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, - 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, - 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, - 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, - 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, - 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, - 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, - 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, - 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, + 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, + 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, + 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, + 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, + 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, + 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, + 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, + 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, + 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, + 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, + 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, + 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, + 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, + 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, + 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, + 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, + 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, + 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, + 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, + 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, + 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, + 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, + 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, + 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, + 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, + 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, + 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, + 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, + 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, + 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, + 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, + 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, + 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, + 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, + 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, + 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, + 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, + 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -5227,7 +5243,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) -var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ +var file_google_protobuf_descriptor_proto_goTypes = []any{ (Edition)(0), // 0: google.protobuf.Edition (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type @@ -5329,38 +5345,39 @@ var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 49: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 50: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 51: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 52: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 53: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 54: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 46, // 55: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 56: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 57: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 58: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 59: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 60: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 61: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 47, // 62: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 63: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 64: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 48, // 65: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 49, // 66: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 67: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 68: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 69: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 16, // 75: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 76, // [76:76] is the sub-list for method output_type - 76, // [76:76] is the sub-list for method input_type - 76, // [76:76] is the sub-list for extension type_name - 76, // [76:76] is the sub-list for extension extendee - 0, // [0:76] is the sub-list for field type_name + 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 77, // [77:77] is the sub-list for method output_type + 77, // [77:77] is the sub-list for method input_type + 77, // [77:77] is the sub-list for extension type_name + 77, // [77:77] is the sub-list for extension extendee + 0, // [0:77] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5369,7 +5386,7 @@ func file_google_protobuf_descriptor_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorSet); i { case 0: return &v.state @@ -5381,7 +5398,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorProto); i { case 0: return &v.state @@ -5393,7 +5410,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto); i { case 0: return &v.state @@ -5405,7 +5422,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions); i { case 0: return &v.state @@ -5419,7 +5436,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*FieldDescriptorProto); i { case 0: return &v.state @@ -5431,7 +5448,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OneofDescriptorProto); i { case 0: return &v.state @@ -5443,7 +5460,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto); i { case 0: return &v.state @@ -5455,7 +5472,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*EnumValueDescriptorProto); i { case 0: return &v.state @@ -5467,7 +5484,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ServiceDescriptorProto); i { case 0: return &v.state @@ -5479,7 +5496,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*MethodDescriptorProto); i { case 0: return &v.state @@ -5491,7 +5508,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*FileOptions); i { case 0: return &v.state @@ -5505,7 +5522,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*MessageOptions); i { case 0: return &v.state @@ -5519,7 +5536,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions); i { case 0: return &v.state @@ -5533,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*OneofOptions); i { case 0: return &v.state @@ -5547,7 +5564,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*EnumOptions); i { case 0: return &v.state @@ -5561,7 +5578,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*EnumValueOptions); i { case 0: return &v.state @@ -5575,7 +5592,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ServiceOptions); i { case 0: return &v.state @@ -5589,7 +5606,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*MethodOptions); i { case 0: return &v.state @@ -5603,7 +5620,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption); i { case 0: return &v.state @@ -5615,7 +5632,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*FeatureSet); i { case 0: return &v.state @@ -5629,7 +5646,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state @@ -5641,7 +5658,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo); i { case 0: return &v.state @@ -5653,7 +5670,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state @@ -5665,7 +5682,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state @@ -5677,7 +5694,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state @@ -5689,7 +5706,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state @@ -5701,7 +5718,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state @@ -5713,7 +5730,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions_EditionDefault); i { case 0: return &v.state @@ -5725,7 +5742,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions_FeatureSupport); i { case 0: return &v.state @@ -5737,7 +5754,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state @@ -5749,7 +5766,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { case 0: return &v.state @@ -5761,7 +5778,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo_Location); i { case 0: return &v.state @@ -5773,7 +5790,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index b0df3fb3340..a2ca940c50f 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -90,27 +90,27 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc9, 0x01, 0x0a, 0x0a, 0x47, 0x6f, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xba, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x7d, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, 0x65, 0x18, 0x84, - 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, 0x07, 0xb2, 0x01, - 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, 0x20, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x53, 0x4f, - 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, - 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x66, 0x75, 0x74, - 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x17, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x73, 0x6f, - 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, - 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, - 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x70, 0x62, + 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, + 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, + 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, + 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, + 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, + 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, + 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, } var ( @@ -126,7 +126,7 @@ func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { } var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_go_features_proto_goTypes = []interface{}{ +var file_google_protobuf_go_features_proto_goTypes = []any{ (*GoFeatures)(nil), // 0: pb.GoFeatures (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet } @@ -146,7 +146,7 @@ func file_google_protobuf_go_features_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GoFeatures); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 9de51be5403..7172b43d383 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -445,7 +445,7 @@ func file_google_protobuf_any_proto_rawDescGZIP() []byte { } var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []interface{}{ +var file_google_protobuf_any_proto_goTypes = []any{ (*Any)(nil), // 0: google.protobuf.Any } var file_google_protobuf_any_proto_depIdxs = []int32{ @@ -462,7 +462,7 @@ func file_google_protobuf_any_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Any); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index df709a8dd4c..1b71bcd910a 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -323,7 +323,7 @@ func file_google_protobuf_duration_proto_rawDescGZIP() []byte { } var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_duration_proto_goTypes = []interface{}{ +var file_google_protobuf_duration_proto_goTypes = []any{ (*Duration)(nil), // 0: google.protobuf.Duration } var file_google_protobuf_duration_proto_depIdxs = []int32{ @@ -340,7 +340,7 @@ func file_google_protobuf_duration_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Duration); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index 9a7277ba394..d87b4fb8281 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -115,7 +115,7 @@ func file_google_protobuf_empty_proto_rawDescGZIP() []byte { } var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_empty_proto_goTypes = []interface{}{ +var file_google_protobuf_empty_proto_goTypes = []any{ (*Empty)(nil), // 0: google.protobuf.Empty } var file_google_protobuf_empty_proto_depIdxs = []int32{ @@ -132,7 +132,7 @@ func file_google_protobuf_empty_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index e8789cb331e..ac1e91bb6dd 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -537,7 +537,7 @@ func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { } var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_field_mask_proto_goTypes = []interface{}{ +var file_google_protobuf_field_mask_proto_goTypes = []any{ (*FieldMask)(nil), // 0: google.protobuf.FieldMask } var file_google_protobuf_field_mask_proto_depIdxs = []int32{ @@ -554,7 +554,7 @@ func file_google_protobuf_field_mask_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FieldMask); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index d2bac8b88ea..d45361cbc72 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -49,11 +49,11 @@ // The standard Go "encoding/json" package has functionality to serialize // arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and // ListValue.AsSlice methods can convert the protobuf message representation into -// a form represented by interface{}, map[string]interface{}, and []interface{}. +// a form represented by any, map[string]any, and []any. // This form can be used with other packages that operate on such data structures // and also directly with the standard json package. // -// In order to convert the interface{}, map[string]interface{}, and []interface{} +// In order to convert the any, map[string]any, and []any // forms back as Value, Struct, and ListValue messages, use the NewStruct, // NewList, and NewValue constructor functions. // @@ -88,28 +88,28 @@ // // To construct a Value message representing the above JSON object: // -// m, err := structpb.NewValue(map[string]interface{}{ +// m, err := structpb.NewValue(map[string]any{ // "firstName": "John", // "lastName": "Smith", // "isAlive": true, // "age": 27, -// "address": map[string]interface{}{ +// "address": map[string]any{ // "streetAddress": "21 2nd Street", // "city": "New York", // "state": "NY", // "postalCode": "10021-3100", // }, -// "phoneNumbers": []interface{}{ -// map[string]interface{}{ +// "phoneNumbers": []any{ +// map[string]any{ // "type": "home", // "number": "212 555-1234", // }, -// map[string]interface{}{ +// map[string]any{ // "type": "office", // "number": "646 555-4567", // }, // }, -// "children": []interface{}{}, +// "children": []any{}, // "spouse": nil, // }) // if err != nil { @@ -197,7 +197,7 @@ type Struct struct { // NewStruct constructs a Struct from a general-purpose Go map. // The map keys must be valid UTF-8. // The map values are converted using NewValue. -func NewStruct(v map[string]interface{}) (*Struct, error) { +func NewStruct(v map[string]any) (*Struct, error) { x := &Struct{Fields: make(map[string]*Value, len(v))} for k, v := range v { if !utf8.ValidString(k) { @@ -214,9 +214,9 @@ func NewStruct(v map[string]interface{}) (*Struct, error) { // AsMap converts x to a general-purpose Go map. // The map values are converted by calling Value.AsInterface. -func (x *Struct) AsMap() map[string]interface{} { +func (x *Struct) AsMap() map[string]any { f := x.GetFields() - vs := make(map[string]interface{}, len(f)) + vs := make(map[string]any, len(f)) for k, v := range f { vs[k] = v.AsInterface() } @@ -306,13 +306,13 @@ type Value struct { // ║ float32, float64 │ stored as NumberValue ║ // ║ string │ stored as StringValue; must be valid UTF-8 ║ // ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]interface{} │ stored as StructValue ║ -// ║ []interface{} │ stored as ListValue ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ // ╚════════════════════════╧════════════════════════════════════════════╝ // // When converting an int64 or uint64 to a NumberValue, numeric precision loss // is possible since they are stored as a float64. -func NewValue(v interface{}) (*Value, error) { +func NewValue(v any) (*Value, error) { switch v := v.(type) { case nil: return NewNullValue(), nil @@ -342,13 +342,13 @@ func NewValue(v interface{}) (*Value, error) { case []byte: s := base64.StdEncoding.EncodeToString(v) return NewStringValue(s), nil - case map[string]interface{}: + case map[string]any: v2, err := NewStruct(v) if err != nil { return nil, err } return NewStructValue(v2), nil - case []interface{}: + case []any: v2, err := NewList(v) if err != nil { return nil, err @@ -396,7 +396,7 @@ func NewListValue(v *ListValue) *Value { // // Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are // converted as strings to remain compatible with MarshalJSON. -func (x *Value) AsInterface() interface{} { +func (x *Value) AsInterface() any { switch v := x.GetKind().(type) { case *Value_NumberValue: if v != nil { @@ -580,7 +580,7 @@ type ListValue struct { // NewList constructs a ListValue from a general-purpose Go slice. // The slice elements are converted using NewValue. -func NewList(v []interface{}) (*ListValue, error) { +func NewList(v []any) (*ListValue, error) { x := &ListValue{Values: make([]*Value, len(v))} for i, v := range v { var err error @@ -594,9 +594,9 @@ func NewList(v []interface{}) (*ListValue, error) { // AsSlice converts x to a general-purpose Go slice. // The slice elements are converted by calling Value.AsInterface. -func (x *ListValue) AsSlice() []interface{} { +func (x *ListValue) AsSlice() []any { vals := x.GetValues() - vs := make([]interface{}, len(vals)) + vs := make([]any, len(vals)) for i, v := range vals { vs[i] = v.AsInterface() } @@ -716,7 +716,7 @@ func file_google_protobuf_struct_proto_rawDescGZIP() []byte { var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_protobuf_struct_proto_goTypes = []interface{}{ +var file_google_protobuf_struct_proto_goTypes = []any{ (NullValue)(0), // 0: google.protobuf.NullValue (*Struct)(nil), // 1: google.protobuf.Struct (*Value)(nil), // 2: google.protobuf.Value @@ -743,7 +743,7 @@ func file_google_protobuf_struct_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Struct); i { case 0: return &v.state @@ -755,7 +755,7 @@ func file_google_protobuf_struct_proto_init() { return nil } } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Value); i { case 0: return &v.state @@ -767,7 +767,7 @@ func file_google_protobuf_struct_proto_init() { return nil } } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ListValue); i { case 0: return &v.state @@ -780,7 +780,7 @@ func file_google_protobuf_struct_proto_init() { } } } - file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), (*Value_StringValue)(nil), diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 81511a3363e..83a5a645b08 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -332,7 +332,7 @@ func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { } var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ +var file_google_protobuf_timestamp_proto_goTypes = []any{ (*Timestamp)(nil), // 0: google.protobuf.Timestamp } var file_google_protobuf_timestamp_proto_depIdxs = []int32{ @@ -349,7 +349,7 @@ func file_google_protobuf_timestamp_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Timestamp); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 762a87130f8..e473f826aa3 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -605,7 +605,7 @@ func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { } var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_google_protobuf_wrappers_proto_goTypes = []interface{}{ +var file_google_protobuf_wrappers_proto_goTypes = []any{ (*DoubleValue)(nil), // 0: google.protobuf.DoubleValue (*FloatValue)(nil), // 1: google.protobuf.FloatValue (*Int64Value)(nil), // 2: google.protobuf.Int64Value @@ -630,7 +630,7 @@ func file_google_protobuf_wrappers_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*DoubleValue); i { case 0: return &v.state @@ -642,7 +642,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FloatValue); i { case 0: return &v.state @@ -654,7 +654,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Int64Value); i { case 0: return &v.state @@ -666,7 +666,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*UInt64Value); i { case 0: return &v.state @@ -678,7 +678,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Int32Value); i { case 0: return &v.state @@ -690,7 +690,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*UInt32Value); i { case 0: return &v.state @@ -702,7 +702,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*BoolValue); i { case 0: return &v.state @@ -714,7 +714,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*StringValue); i { case 0: return &v.state @@ -726,7 +726,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*BytesValue); i { case 0: return &v.state diff --git a/vendor/modules.txt b/vendor/modules.txt index daed90a44d1..7dd349d5945 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -93,9 +93,6 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/public # github.com/IBM/sarama v1.43.2 ## explicit; go 1.19 github.com/IBM/sarama -# github.com/VividCortex/gohistogram v1.0.0 -## explicit -github.com/VividCortex/gohistogram # github.com/alecthomas/kong v0.8.0 ## explicit; go 1.18 github.com/alecthomas/kong @@ -284,12 +281,6 @@ github.com/felixge/httpsnoop # github.com/fsnotify/fsnotify v1.7.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify -# github.com/go-kit/kit v0.13.0 -## explicit; go 1.17 -github.com/go-kit/kit/metrics -github.com/go-kit/kit/metrics/expvar -github.com/go-kit/kit/metrics/generic -github.com/go-kit/kit/metrics/internal/lv # github.com/go-kit/log v0.2.1 ## explicit; go 1.17 github.com/go-kit/log @@ -297,7 +288,7 @@ github.com/go-kit/log/level # github.com/go-logfmt/logfmt v0.6.0 ## explicit; go 1.17 github.com/go-logfmt/logfmt -# github.com/go-logr/logr v1.4.1 +# github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -451,15 +442,15 @@ github.com/google/uuid ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.12.2 +# github.com/googleapis/gax-go/v2 v2.12.3 ## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal -# github.com/gorilla/handlers v1.5.1 -## explicit; go 1.14 +# github.com/gorilla/handlers v1.5.2 +## explicit; go 1.20 github.com/gorilla/handlers # github.com/gorilla/mux v1.8.1 ## explicit; go 1.20 @@ -527,14 +518,10 @@ github.com/grafana/regexp/syntax # github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 ## explicit; go 1.14 github.com/grpc-ecosystem/go-grpc-middleware -github.com/grpc-ecosystem/go-grpc-middleware/logging -github.com/grpc-ecosystem/go-grpc-middleware/logging/settable -github.com/grpc-ecosystem/go-grpc-middleware/logging/zap -github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap -github.com/grpc-ecosystem/go-grpc-middleware/retry -github.com/grpc-ecosystem/go-grpc-middleware/tags -github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils -github.com/grpc-ecosystem/go-grpc-middleware/util/metautils +# github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 +## explicit; go 1.19 +github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry +github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata # github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 ## explicit; go 1.20 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule @@ -543,7 +530,7 @@ github.com/grpc-ecosystem/grpc-gateway/v2/utilities # github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 ## explicit github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc -# github.com/hashicorp/consul/api v1.25.1 +# github.com/hashicorp/consul/api v1.28.2 ## explicit; go 1.19 github.com/hashicorp/consul/api # github.com/hashicorp/errwrap v1.1.0 @@ -618,8 +605,8 @@ github.com/iancoleman/strcase # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap -# github.com/jaegertracing/jaeger v1.57.0 -## explicit; go 1.21 +# github.com/jaegertracing/jaeger v1.59.0 +## explicit; go 1.21.0 github.com/jaegertracing/jaeger/cmd/agent/app/configmanager github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc github.com/jaegertracing/jaeger/cmd/agent/app/customtransport @@ -631,10 +618,9 @@ github.com/jaegertracing/jaeger/cmd/agent/app/servers github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp github.com/jaegertracing/jaeger/cmd/all-in-one/setupcontext github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model -github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore +github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin github.com/jaegertracing/jaeger/cmd/internal/flags -github.com/jaegertracing/jaeger/internal/metrics/expvar github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder github.com/jaegertracing/jaeger/internal/metrics/prometheus github.com/jaegertracing/jaeger/model @@ -654,11 +640,7 @@ github.com/jaegertracing/jaeger/pkg/healthcheck github.com/jaegertracing/jaeger/pkg/metrics github.com/jaegertracing/jaeger/pkg/netutils github.com/jaegertracing/jaeger/pkg/recoveryhandler -github.com/jaegertracing/jaeger/pkg/tenancy github.com/jaegertracing/jaeger/pkg/version -github.com/jaegertracing/jaeger/plugin -github.com/jaegertracing/jaeger/plugin/storage/grpc -github.com/jaegertracing/jaeger/plugin/storage/grpc/config github.com/jaegertracing/jaeger/plugin/storage/grpc/shared github.com/jaegertracing/jaeger/ports github.com/jaegertracing/jaeger/proto-gen/api_v2 @@ -742,7 +724,7 @@ github.com/jsternberg/zap-logfmt # github.com/julienschmidt/httprouter v1.3.0 ## explicit; go 1.7 github.com/julienschmidt/httprouter -# github.com/klauspost/compress v1.17.8 +# github.com/klauspost/compress v1.17.9 ## explicit; go 1.20 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -836,7 +818,9 @@ github.com/modern-go/reflect2 # github.com/mostynb/go-grpc-compression v1.2.3 ## explicit; go 1.17 github.com/mostynb/go-grpc-compression/internal/snappy +github.com/mostynb/go-grpc-compression/internal/zstd github.com/mostynb/go-grpc-compression/nonclobbering/snappy +github.com/mostynb/go-grpc-compression/nonclobbering/zstd # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg @@ -852,7 +836,7 @@ github.com/oklog/ulid # github.com/olekukonko/tablewriter v0.0.5 ## explicit; go 1.12 github.com/olekukonko/tablewriter -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.104.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata @@ -860,10 +844,10 @@ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.104.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.104.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions @@ -886,14 +870,14 @@ github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filter github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/strict github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.104.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/awsmsk # github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.104.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal # github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0 @@ -908,19 +892,19 @@ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottl github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/internal/ottlcommon github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.104.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.104.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.104.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger # github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.102.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.104.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/internal/zipkin github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1 @@ -929,11 +913,11 @@ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/ ## explicit; go 1.21 github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.104.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.104.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata @@ -943,7 +927,7 @@ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusrec github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/ocmetrics github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/octrace -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.104.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata @@ -991,7 +975,7 @@ github.com/parquet-go/parquet-go/internal/bytealg github.com/parquet-go/parquet-go/internal/debug github.com/parquet-go/parquet-go/internal/unsafecast github.com/parquet-go/parquet-go/sparse -# github.com/pelletier/go-toml/v2 v2.1.0 +# github.com/pelletier/go-toml/v2 v2.2.2 ## explicit; go 1.16 github.com/pelletier/go-toml/v2 github.com/pelletier/go-toml/v2/internal/characters @@ -1038,11 +1022,10 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.53.0 +# github.com/prometheus/common v0.55.0 ## explicit; go 1.20 github.com/prometheus/common/config github.com/prometheus/common/expfmt -github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model github.com/prometheus/common/route github.com/prometheus/common/version @@ -1052,8 +1035,8 @@ github.com/prometheus/common/sigv4 # github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 ## explicit; go 1.18 github.com/prometheus/exporter-toolkit/web -# github.com/prometheus/procfs v0.15.0 -## explicit; go 1.21 +# github.com/prometheus/procfs v0.15.1 +## explicit; go 1.20 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util @@ -1148,14 +1131,14 @@ github.com/segmentio/fasthash/fnv1a # github.com/sercand/kuberesolver/v5 v5.1.1 ## explicit; go 1.18 github.com/sercand/kuberesolver/v5 -# github.com/shirou/gopsutil/v3 v3.24.4 -## explicit; go 1.15 -github.com/shirou/gopsutil/v3/common -github.com/shirou/gopsutil/v3/cpu -github.com/shirou/gopsutil/v3/internal/common -github.com/shirou/gopsutil/v3/mem -github.com/shirou/gopsutil/v3/net -github.com/shirou/gopsutil/v3/process +# github.com/shirou/gopsutil/v4 v4.24.5 +## explicit; go 1.18 +github.com/shirou/gopsutil/v4/common +github.com/shirou/gopsutil/v4/cpu +github.com/shirou/gopsutil/v4/internal/common +github.com/shirou/gopsutil/v4/mem +github.com/shirou/gopsutil/v4/net +github.com/shirou/gopsutil/v4/process # github.com/shoenig/go-m1cpu v0.1.6 ## explicit; go 1.20 github.com/shoenig/go-m1cpu @@ -1182,14 +1165,14 @@ github.com/spf13/afero/mem # github.com/spf13/cast v1.6.0 ## explicit; go 1.19 github.com/spf13/cast -# github.com/spf13/cobra v1.8.0 +# github.com/spf13/cobra v1.8.1 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/spf13/viper v1.18.2 -## explicit; go 1.18 +# github.com/spf13/viper v1.19.0 +## explicit; go 1.20 github.com/spf13/viper github.com/spf13/viper/internal/encoding github.com/spf13/viper/internal/encoding/dotenv @@ -1272,7 +1255,7 @@ github.com/yuin/gopher-lua/pm # github.com/yusufpapurcu/wmi v1.2.4 ## explicit; go 1.16 github.com/yusufpapurcu/wmi -# go.etcd.io/etcd/api/v3 v3.5.10 +# go.etcd.io/etcd/api/v3 v3.5.12 ## explicit; go 1.20 go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/etcdserverpb @@ -1280,7 +1263,7 @@ go.etcd.io/etcd/api/v3/membershippb go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/version -# go.etcd.io/etcd/client/pkg/v3 v3.5.10 +# go.etcd.io/etcd/client/pkg/v3 v3.5.12 ## explicit; go 1.20 go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/logutil @@ -1288,7 +1271,7 @@ go.etcd.io/etcd/client/pkg/v3/systemd go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types -# go.etcd.io/etcd/client/v3 v3.5.10 +# go.etcd.io/etcd/client/v3 v3.5.12 ## explicit; go 1.20 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials @@ -1323,84 +1306,64 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/collector v0.102.1 +# go.opentelemetry.io/collector v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/client go.opentelemetry.io/collector/internal/fanoutconsumer +go.opentelemetry.io/collector/internal/featuregates go.opentelemetry.io/collector/internal/httphelper go.opentelemetry.io/collector/internal/localhostgate go.opentelemetry.io/collector/internal/obsreportconfig go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics go.opentelemetry.io/collector/internal/sharedcomponent -# go.opentelemetry.io/collector/component v0.102.1 +# go.opentelemetry.io/collector/component v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/component go.opentelemetry.io/collector/component/componenttest -# go.opentelemetry.io/collector/config/configauth v0.102.1 +# go.opentelemetry.io/collector/config/configauth v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/config/configauth -# go.opentelemetry.io/collector/config/configcompression v1.9.0 +# go.opentelemetry.io/collector/config/configcompression v1.11.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/config/configcompression -# go.opentelemetry.io/collector/config/configgrpc v0.102.1 +# go.opentelemetry.io/collector/config/configgrpc v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/config/configgrpc -go.opentelemetry.io/collector/config/configgrpc/internal -# go.opentelemetry.io/collector/config/confighttp v0.102.1 +# go.opentelemetry.io/collector/config/confighttp v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/config/confighttp -# go.opentelemetry.io/collector/config/confignet v0.102.1 +# go.opentelemetry.io/collector/config/confignet v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/config/confignet -# go.opentelemetry.io/collector/config/configopaque v1.9.0 +# go.opentelemetry.io/collector/config/configopaque v1.11.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/config/configopaque -# go.opentelemetry.io/collector/config/configretry v0.102.1 +# go.opentelemetry.io/collector/config/configretry v1.11.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/config/configretry -# go.opentelemetry.io/collector/config/configtelemetry v0.102.1 +# go.opentelemetry.io/collector/config/configtelemetry v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/config/configtelemetry -# go.opentelemetry.io/collector/config/configtls v0.102.1 +# go.opentelemetry.io/collector/config/configtls v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/config/configtls -# go.opentelemetry.io/collector/config/internal v0.102.1 +# go.opentelemetry.io/collector/config/internal v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/config/internal -# go.opentelemetry.io/collector/confmap v0.102.1 +# go.opentelemetry.io/collector/confmap v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/confmap -go.opentelemetry.io/collector/confmap/internal/envvar +go.opentelemetry.io/collector/confmap/internal go.opentelemetry.io/collector/confmap/internal/mapstructure -go.opentelemetry.io/collector/confmap/provider/internal -go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider -# go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/converter/expandconverter -# go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/envprovider -# go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/fileprovider -# go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/httpprovider -# go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/httpsprovider -# go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/yamlprovider -# go.opentelemetry.io/collector/connector v0.102.1 +# go.opentelemetry.io/collector/connector v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/connector -# go.opentelemetry.io/collector/consumer v0.102.1 +# go.opentelemetry.io/collector/consumer v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/consumer go.opentelemetry.io/collector/consumer/consumererror go.opentelemetry.io/collector/consumer/consumertest -# go.opentelemetry.io/collector/exporter v0.102.1 +# go.opentelemetry.io/collector/exporter v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/exporter go.opentelemetry.io/collector/exporter/exporterbatcher @@ -1410,35 +1373,37 @@ go.opentelemetry.io/collector/exporter/exporterqueue go.opentelemetry.io/collector/exporter/exportertest go.opentelemetry.io/collector/exporter/internal/experr go.opentelemetry.io/collector/exporter/internal/queue -# go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1 +# go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/exporter/otlpexporter go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata -# go.opentelemetry.io/collector/extension v0.102.1 +# go.opentelemetry.io/collector/extension v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/extension go.opentelemetry.io/collector/extension/experimental/storage -# go.opentelemetry.io/collector/extension/auth v0.102.1 +# go.opentelemetry.io/collector/extension/auth v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/extension/auth -# go.opentelemetry.io/collector/featuregate v1.9.0 +# go.opentelemetry.io/collector/featuregate v1.11.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/featuregate -# go.opentelemetry.io/collector/otelcol v0.102.1 +# go.opentelemetry.io/collector/otelcol v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/otelcol go.opentelemetry.io/collector/otelcol/internal/configunmarshaler go.opentelemetry.io/collector/otelcol/internal/grpclog -# go.opentelemetry.io/collector/pdata v1.9.0 +# go.opentelemetry.io/collector/pdata v1.11.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/pdata/internal go.opentelemetry.io/collector/pdata/internal/data go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1 +go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1 +go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1 go.opentelemetry.io/collector/pdata/internal/json @@ -1450,18 +1415,18 @@ go.opentelemetry.io/collector/pdata/pmetric go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp go.opentelemetry.io/collector/pdata/ptrace go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp -# go.opentelemetry.io/collector/processor v0.102.1 +# go.opentelemetry.io/collector/processor v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/processor go.opentelemetry.io/collector/processor/processorhelper go.opentelemetry.io/collector/processor/processorhelper/internal/metadata -# go.opentelemetry.io/collector/receiver v0.102.1 +# go.opentelemetry.io/collector/receiver v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/receiver go.opentelemetry.io/collector/receiver/receiverhelper go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata go.opentelemetry.io/collector/receiver/receivertest -# go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1 +# go.opentelemetry.io/collector/receiver/otlpreceiver v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/receiver/otlpreceiver go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors @@ -1469,19 +1434,21 @@ go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace -# go.opentelemetry.io/collector/semconv v0.102.1 +# go.opentelemetry.io/collector/semconv v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/semconv/v1.13.0 go.opentelemetry.io/collector/semconv/v1.18.0 +go.opentelemetry.io/collector/semconv/v1.25.0 go.opentelemetry.io/collector/semconv/v1.6.1 go.opentelemetry.io/collector/semconv/v1.9.0 -# go.opentelemetry.io/collector/service v0.102.1 +# go.opentelemetry.io/collector/service v0.104.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/service go.opentelemetry.io/collector/service/extensions go.opentelemetry.io/collector/service/internal/capabilityconsumer go.opentelemetry.io/collector/service/internal/components go.opentelemetry.io/collector/service/internal/graph +go.opentelemetry.io/collector/service/internal/metadata go.opentelemetry.io/collector/service/internal/proctelemetry go.opentelemetry.io/collector/service/internal/resource go.opentelemetry.io/collector/service/internal/servicetelemetry @@ -1493,11 +1460,11 @@ go.opentelemetry.io/collector/service/telemetry/internal # go.opentelemetry.io/contrib/config v0.7.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/config -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv @@ -1505,7 +1472,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvut # go.opentelemetry.io/contrib/propagators/b3 v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/b3 -# go.opentelemetry.io/otel v1.27.0 +# go.opentelemetry.io/otel v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1519,7 +1486,9 @@ go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 +go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.25.0 +go.opentelemetry.io/otel/semconv/v1.26.0 # go.opentelemetry.io/otel/bridge/opencensus v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel/bridge/opencensus @@ -1554,48 +1523,48 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envco go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry -# go.opentelemetry.io/otel/exporters/prometheus v0.49.0 +# go.opentelemetry.io/otel/exporters/prometheus v0.50.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/prometheus # go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric -# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 +# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/stdout/stdouttrace -# go.opentelemetry.io/otel/metric v1.27.0 +# go.opentelemetry.io/otel/metric v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.27.0 +# go.opentelemetry.io/otel/sdk v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation -go.opentelemetry.io/otel/sdk/internal go.opentelemetry.io/otel/sdk/internal/env +go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/sdk/metric v1.27.0 +# go.opentelemetry.io/otel/sdk/metric v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel/sdk/metric go.opentelemetry.io/otel/sdk/metric/internal @@ -1603,12 +1572,12 @@ go.opentelemetry.io/otel/sdk/metric/internal/aggregate go.opentelemetry.io/otel/sdk/metric/internal/exemplar go.opentelemetry.io/otel/sdk/metric/internal/x go.opentelemetry.io/otel/sdk/metric/metricdata -# go.opentelemetry.io/otel/trace v1.27.0 +# go.opentelemetry.io/otel/trace v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.2.0 +# go.opentelemetry.io/proto/otlp v1.3.1 ## explicit; go 1.17 go.opentelemetry.io/proto/otlp/collector/metrics/v1 go.opentelemetry.io/proto/otlp/collector/trace/v1 @@ -1638,8 +1607,8 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.24.0 -## explicit; go 1.18 +# golang.org/x/crypto v0.25.0 +## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt golang.org/x/crypto/blake2b @@ -1666,7 +1635,7 @@ golang.org/x/exp/slog/internal/buffer # golang.org/x/mod v0.17.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.26.0 +# golang.org/x/net v0.27.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context @@ -1686,7 +1655,7 @@ golang.org/x/net/netutil golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.20.0 +# golang.org/x/oauth2 v0.21.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1703,7 +1672,7 @@ golang.org/x/oauth2/jwt ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.21.0 +# golang.org/x/sys v0.22.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix @@ -1778,7 +1747,7 @@ gonum.org/v1/gonum/lapack gonum.org/v1/gonum/lapack/gonum gonum.org/v1/gonum/lapack/lapack64 gonum.org/v1/gonum/mat -# google.golang.org/api v0.169.0 +# google.golang.org/api v0.171.0 ## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -1802,18 +1771,18 @@ google.golang.org/api/transport/http/internal/propagation ## explicit; go 1.19 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -# google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 +# google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 ## explicit; go 1.20 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 ## explicit; go 1.20 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.64.0 -## explicit; go 1.19 +# google.golang.org/grpc v1.65.0 +## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1822,6 +1791,7 @@ google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -1856,7 +1826,6 @@ google.golang.org/grpc/internal/credentials google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/googlecloud google.golang.org/grpc/internal/grpclog -google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/idle @@ -1888,8 +1857,8 @@ google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap google.golang.org/grpc/test/bufconn -# google.golang.org/protobuf v1.34.1 -## explicit; go 1.17 +# google.golang.org/protobuf v1.34.2 +## explicit; go 1.20 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext