From 7306683306c971eafc937df23dce5b3ed2569f20 Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Thu, 1 Aug 2024 17:15:54 +0000 Subject: [PATCH 01/65] Add environment variable for pod name Signed-off-by: Bugra Gedik --- .../pluginmachinery/flytek8s/k8s_resource_adds.go | 8 ++++++++ .../pluginmachinery/flytek8s/k8s_resource_adds_test.go | 10 +++++----- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go index 34e13adfa8..b0025fdddf 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go @@ -60,6 +60,14 @@ func GetExecutionEnvVars(id pluginsCore.TaskExecutionID, consoleURL string) []v1 Name: "FLYTE_INTERNAL_EXECUTION_DOMAIN", Value: nodeExecutionID.Domain, }, + { + Name: "FLYTE_INTERNAL_POD_NAME", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, { Name: "FLYTE_ATTEMPT_NUMBER", Value: attemptNumber, diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds_test.go index 4015a8d9b8..fd4828fbbd 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds_test.go @@ -27,13 +27,13 @@ func TestGetExecutionEnvVars(t *testing.T) { }{ { "no-console-url", - 12, + 13, "", nil, }, { "with-console-url", - 13, + 14, "scheme://host/path", &v12.EnvVar{ Name: "FLYTE_EXECUTION_URL", @@ -42,7 +42,7 @@ func TestGetExecutionEnvVars(t *testing.T) { }, { "with-console-url-ending-in-single-slash", - 13, + 14, "scheme://host/path/", &v12.EnvVar{ Name: "FLYTE_EXECUTION_URL", @@ -51,7 +51,7 @@ func TestGetExecutionEnvVars(t *testing.T) { }, { "with-console-url-ending-in-multiple-slashes", - 13, + 14, "scheme://host/path////", &v12.EnvVar{ Name: "FLYTE_EXECUTION_URL", @@ -63,7 +63,7 @@ func TestGetExecutionEnvVars(t *testing.T) { envVars := GetExecutionEnvVars(mock, tt.consoleURL) assert.Len(t, envVars, tt.expectedEnvVars) if tt.expectedEnvVar != nil { - assert.True(t, proto.Equal(&envVars[4], tt.expectedEnvVar)) + assert.True(t, proto.Equal(&envVars[5], tt.expectedEnvVar)) } } } From dd2957b7696d6ae12317a92cff9136f7f8892843 Mon Sep 17 00:00:00 2001 From: Jason Parraga Date: Thu, 1 Aug 2024 15:19:05 -0700 Subject: [PATCH 02/65] [flyteadmin] Refactor panic recovery into middleware (#5546) * Refactor panic handling to middleware Signed-off-by: Jason Parraga * Remove registration of old panicCounter Signed-off-by: Jason Parraga * Add test coverage Signed-off-by: Jason Parraga --------- Signed-off-by: Jason Parraga Signed-off-by: Bugra Gedik --- flyteadmin/pkg/rpc/adminservice/attributes.go | 10 --- flyteadmin/pkg/rpc/adminservice/base.go | 13 --- flyteadmin/pkg/rpc/adminservice/base_test.go | 40 --------- .../rpc/adminservice/description_entity.go | 2 - flyteadmin/pkg/rpc/adminservice/execution.go | 10 --- .../pkg/rpc/adminservice/launch_plan.go | 7 -- flyteadmin/pkg/rpc/adminservice/metrics.go | 7 +- .../middleware/recovery_interceptor.go | 61 +++++++++++++ .../middleware/recovery_interceptor_test.go | 90 +++++++++++++++++++ .../pkg/rpc/adminservice/named_entity.go | 3 - .../pkg/rpc/adminservice/node_execution.go | 6 -- flyteadmin/pkg/rpc/adminservice/project.go | 5 -- flyteadmin/pkg/rpc/adminservice/task.go | 4 - .../pkg/rpc/adminservice/task_execution.go | 4 - flyteadmin/pkg/rpc/adminservice/version.go | 1 - flyteadmin/pkg/rpc/adminservice/workflow.go | 4 - flyteadmin/pkg/server/service.go | 29 +++++- 17 files changed, 177 insertions(+), 119 deletions(-) delete mode 100644 flyteadmin/pkg/rpc/adminservice/base_test.go create mode 100644 flyteadmin/pkg/rpc/adminservice/middleware/recovery_interceptor.go create mode 100644 flyteadmin/pkg/rpc/adminservice/middleware/recovery_interceptor_test.go diff --git a/flyteadmin/pkg/rpc/adminservice/attributes.go b/flyteadmin/pkg/rpc/adminservice/attributes.go index 46607da93e..62002a0e6e 100644 --- a/flyteadmin/pkg/rpc/adminservice/attributes.go +++ b/flyteadmin/pkg/rpc/adminservice/attributes.go @@ -12,7 +12,6 @@ import ( func (m *AdminService) UpdateWorkflowAttributes(ctx context.Context, request *admin.WorkflowAttributesUpdateRequest) ( *admin.WorkflowAttributesUpdateResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -30,7 +29,6 @@ func (m *AdminService) UpdateWorkflowAttributes(ctx context.Context, request *ad func (m *AdminService) GetWorkflowAttributes(ctx context.Context, request *admin.WorkflowAttributesGetRequest) ( *admin.WorkflowAttributesGetResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -48,7 +46,6 @@ func (m *AdminService) GetWorkflowAttributes(ctx context.Context, request *admin func (m *AdminService) DeleteWorkflowAttributes(ctx context.Context, request *admin.WorkflowAttributesDeleteRequest) ( *admin.WorkflowAttributesDeleteResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -66,7 +63,6 @@ func (m *AdminService) DeleteWorkflowAttributes(ctx context.Context, request *ad func (m *AdminService) UpdateProjectDomainAttributes(ctx context.Context, request *admin.ProjectDomainAttributesUpdateRequest) ( *admin.ProjectDomainAttributesUpdateResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -84,7 +80,6 @@ func (m *AdminService) UpdateProjectDomainAttributes(ctx context.Context, reques func (m *AdminService) GetProjectDomainAttributes(ctx context.Context, request *admin.ProjectDomainAttributesGetRequest) ( *admin.ProjectDomainAttributesGetResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -102,7 +97,6 @@ func (m *AdminService) GetProjectDomainAttributes(ctx context.Context, request * func (m *AdminService) DeleteProjectDomainAttributes(ctx context.Context, request *admin.ProjectDomainAttributesDeleteRequest) ( *admin.ProjectDomainAttributesDeleteResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -121,7 +115,6 @@ func (m *AdminService) DeleteProjectDomainAttributes(ctx context.Context, reques func (m *AdminService) UpdateProjectAttributes(ctx context.Context, request *admin.ProjectAttributesUpdateRequest) ( *admin.ProjectAttributesUpdateResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -140,7 +133,6 @@ func (m *AdminService) UpdateProjectAttributes(ctx context.Context, request *adm func (m *AdminService) GetProjectAttributes(ctx context.Context, request *admin.ProjectAttributesGetRequest) ( *admin.ProjectAttributesGetResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -159,7 +151,6 @@ func (m *AdminService) GetProjectAttributes(ctx context.Context, request *admin. func (m *AdminService) DeleteProjectAttributes(ctx context.Context, request *admin.ProjectAttributesDeleteRequest) ( *admin.ProjectAttributesDeleteResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -177,7 +168,6 @@ func (m *AdminService) DeleteProjectAttributes(ctx context.Context, request *adm func (m *AdminService) ListMatchableAttributes(ctx context.Context, request *admin.ListMatchableAttributesRequest) ( *admin.ListMatchableAttributesResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } diff --git a/flyteadmin/pkg/rpc/adminservice/base.go b/flyteadmin/pkg/rpc/adminservice/base.go index 5a2cb2ad89..8df2c595c7 100644 --- a/flyteadmin/pkg/rpc/adminservice/base.go +++ b/flyteadmin/pkg/rpc/adminservice/base.go @@ -5,8 +5,6 @@ import ( "fmt" "runtime/debug" - "github.com/golang/protobuf/proto" - "github.com/flyteorg/flyte/flyteadmin/pkg/async/cloudevent" eventWriter "github.com/flyteorg/flyte/flyteadmin/pkg/async/events/implementations" "github.com/flyteorg/flyte/flyteadmin/pkg/async/notifications" @@ -44,17 +42,6 @@ type AdminService struct { Metrics AdminMetrics } -// Intercepts all admin requests to handle panics during execution. -func (m *AdminService) interceptPanic(ctx context.Context, request proto.Message) { - err := recover() - if err == nil { - return - } - - m.Metrics.PanicCounter.Inc() - logger.Fatalf(ctx, "panic-ed for request: [%+v] with err: %v with Stack: %v", request, err, string(debug.Stack())) -} - const defaultRetries = 3 func NewAdminServer(ctx context.Context, pluginRegistry *plugins.Registry, configuration runtimeIfaces.Configuration, diff --git a/flyteadmin/pkg/rpc/adminservice/base_test.go b/flyteadmin/pkg/rpc/adminservice/base_test.go deleted file mode 100644 index 9b1cb626d5..0000000000 --- a/flyteadmin/pkg/rpc/adminservice/base_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package adminservice - -import ( - "context" - "testing" - - "github.com/golang/protobuf/proto" - "github.com/stretchr/testify/assert" - - "github.com/flyteorg/flyte/flytestdlib/logger" - "github.com/flyteorg/flyte/flytestdlib/promutils" -) - -func Test_interceptPanic(t *testing.T) { - m := AdminService{ - Metrics: InitMetrics(promutils.NewTestScope()), - } - - ctx := context.Background() - - // Mute logs to avoid .Fatal() (called in interceptPanic) causing the process to close - assert.NoError(t, logger.SetConfig(&logger.Config{Mute: true})) - - func() { - defer func() { - if err := recover(); err != nil { - assert.Fail(t, "Unexpected error", err) - } - }() - - a := func() { - defer m.interceptPanic(ctx, proto.Message(nil)) - - var x *int - *x = 10 - } - - a() - }() -} diff --git a/flyteadmin/pkg/rpc/adminservice/description_entity.go b/flyteadmin/pkg/rpc/adminservice/description_entity.go index 1d08234051..bc2d794aed 100644 --- a/flyteadmin/pkg/rpc/adminservice/description_entity.go +++ b/flyteadmin/pkg/rpc/adminservice/description_entity.go @@ -13,7 +13,6 @@ import ( ) func (m *AdminService) GetDescriptionEntity(ctx context.Context, request *admin.ObjectGetRequest) (*admin.DescriptionEntity, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -36,7 +35,6 @@ func (m *AdminService) GetDescriptionEntity(ctx context.Context, request *admin. } func (m *AdminService) ListDescriptionEntities(ctx context.Context, request *admin.DescriptionEntityListRequest) (*admin.DescriptionEntityList, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } diff --git a/flyteadmin/pkg/rpc/adminservice/execution.go b/flyteadmin/pkg/rpc/adminservice/execution.go index 919ed851a3..15caf5aa75 100644 --- a/flyteadmin/pkg/rpc/adminservice/execution.go +++ b/flyteadmin/pkg/rpc/adminservice/execution.go @@ -13,7 +13,6 @@ import ( func (m *AdminService) CreateExecution( ctx context.Context, request *admin.ExecutionCreateRequest) (*admin.ExecutionCreateResponse, error) { - defer m.interceptPanic(ctx, request) requestedAt := time.Now() if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") @@ -32,7 +31,6 @@ func (m *AdminService) CreateExecution( func (m *AdminService) RelaunchExecution( ctx context.Context, request *admin.ExecutionRelaunchRequest) (*admin.ExecutionCreateResponse, error) { - defer m.interceptPanic(ctx, request) requestedAt := time.Now() if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") @@ -51,7 +49,6 @@ func (m *AdminService) RelaunchExecution( func (m *AdminService) RecoverExecution( ctx context.Context, request *admin.ExecutionRecoverRequest) (*admin.ExecutionCreateResponse, error) { - defer m.interceptPanic(ctx, request) requestedAt := time.Now() if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") @@ -70,7 +67,6 @@ func (m *AdminService) RecoverExecution( func (m *AdminService) CreateWorkflowEvent( ctx context.Context, request *admin.WorkflowExecutionEventRequest) (*admin.WorkflowExecutionEventResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -89,7 +85,6 @@ func (m *AdminService) CreateWorkflowEvent( func (m *AdminService) GetExecution( ctx context.Context, request *admin.WorkflowExecutionGetRequest) (*admin.Execution, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -107,7 +102,6 @@ func (m *AdminService) GetExecution( func (m *AdminService) UpdateExecution( ctx context.Context, request *admin.ExecutionUpdateRequest) (*admin.ExecutionUpdateResponse, error) { - defer m.interceptPanic(ctx, request) requestedAt := time.Now() if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") @@ -126,7 +120,6 @@ func (m *AdminService) UpdateExecution( func (m *AdminService) GetExecutionData( ctx context.Context, request *admin.WorkflowExecutionGetDataRequest) (*admin.WorkflowExecutionGetDataResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -144,7 +137,6 @@ func (m *AdminService) GetExecutionData( func (m *AdminService) GetExecutionMetrics( ctx context.Context, request *admin.WorkflowExecutionGetMetricsRequest) (*admin.WorkflowExecutionGetMetricsResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -162,7 +154,6 @@ func (m *AdminService) GetExecutionMetrics( func (m *AdminService) ListExecutions( ctx context.Context, request *admin.ResourceListRequest) (*admin.ExecutionList, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -180,7 +171,6 @@ func (m *AdminService) ListExecutions( func (m *AdminService) TerminateExecution( ctx context.Context, request *admin.ExecutionTerminateRequest) (*admin.ExecutionTerminateResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } diff --git a/flyteadmin/pkg/rpc/adminservice/launch_plan.go b/flyteadmin/pkg/rpc/adminservice/launch_plan.go index ff3c2480e0..1586c3f542 100644 --- a/flyteadmin/pkg/rpc/adminservice/launch_plan.go +++ b/flyteadmin/pkg/rpc/adminservice/launch_plan.go @@ -14,7 +14,6 @@ import ( func (m *AdminService) CreateLaunchPlan( ctx context.Context, request *admin.LaunchPlanCreateRequest) (*admin.LaunchPlanCreateResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -31,7 +30,6 @@ func (m *AdminService) CreateLaunchPlan( } func (m *AdminService) GetLaunchPlan(ctx context.Context, request *admin.ObjectGetRequest) (*admin.LaunchPlan, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -55,7 +53,6 @@ func (m *AdminService) GetLaunchPlan(ctx context.Context, request *admin.ObjectG } func (m *AdminService) GetActiveLaunchPlan(ctx context.Context, request *admin.ActiveLaunchPlanRequest) (*admin.LaunchPlan, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -73,7 +70,6 @@ func (m *AdminService) GetActiveLaunchPlan(ctx context.Context, request *admin.A func (m *AdminService) UpdateLaunchPlan(ctx context.Context, request *admin.LaunchPlanUpdateRequest) ( *admin.LaunchPlanUpdateResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -97,7 +93,6 @@ func (m *AdminService) UpdateLaunchPlan(ctx context.Context, request *admin.Laun func (m *AdminService) ListLaunchPlans(ctx context.Context, request *admin.ResourceListRequest) ( *admin.LaunchPlanList, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Empty request. Please rephrase.") } @@ -116,7 +111,6 @@ func (m *AdminService) ListLaunchPlans(ctx context.Context, request *admin.Resou func (m *AdminService) ListActiveLaunchPlans(ctx context.Context, request *admin.ActiveLaunchPlanListRequest) ( *admin.LaunchPlanList, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Empty request. Please rephrase.") } @@ -135,7 +129,6 @@ func (m *AdminService) ListActiveLaunchPlans(ctx context.Context, request *admin func (m *AdminService) ListLaunchPlanIds(ctx context.Context, request *admin.NamedEntityIdentifierListRequest) ( *admin.NamedEntityIdentifierList, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Empty request. Please rephrase.") } diff --git a/flyteadmin/pkg/rpc/adminservice/metrics.go b/flyteadmin/pkg/rpc/adminservice/metrics.go index 65c6b741f3..f770665ef6 100644 --- a/flyteadmin/pkg/rpc/adminservice/metrics.go +++ b/flyteadmin/pkg/rpc/adminservice/metrics.go @@ -2,8 +2,6 @@ package adminservice import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/flyteorg/flyte/flyteadmin/pkg/rpc/adminservice/util" "github.com/flyteorg/flyte/flytestdlib/promutils" ) @@ -115,8 +113,7 @@ type descriptionEntityEndpointMetrics struct { } type AdminMetrics struct { - Scope promutils.Scope - PanicCounter prometheus.Counter + Scope promutils.Scope executionEndpointMetrics executionEndpointMetrics launchPlanEndpointMetrics launchPlanEndpointMetrics @@ -137,8 +134,6 @@ type AdminMetrics struct { func InitMetrics(adminScope promutils.Scope) AdminMetrics { return AdminMetrics{ Scope: adminScope, - PanicCounter: adminScope.MustNewCounter("handler_panic", - "panics encountered while handling requests to the admin service"), executionEndpointMetrics: executionEndpointMetrics{ scope: adminScope, diff --git a/flyteadmin/pkg/rpc/adminservice/middleware/recovery_interceptor.go b/flyteadmin/pkg/rpc/adminservice/middleware/recovery_interceptor.go new file mode 100644 index 0000000000..a0a699a4f0 --- /dev/null +++ b/flyteadmin/pkg/rpc/adminservice/middleware/recovery_interceptor.go @@ -0,0 +1,61 @@ +package middleware + +import ( + "context" + "runtime/debug" + + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/flyteorg/flyte/flytestdlib/logger" + "github.com/flyteorg/flyte/flytestdlib/promutils" +) + +// RecoveryInterceptor is a struct for creating gRPC interceptors that handle panics in go +type RecoveryInterceptor struct { + panicCounter prometheus.Counter +} + +// NewRecoveryInterceptor creates a new RecoveryInterceptor with metrics under the provided scope +func NewRecoveryInterceptor(adminScope promutils.Scope) *RecoveryInterceptor { + panicCounter := adminScope.MustNewCounter("handler_panic", "panics encountered while handling gRPC requests") + return &RecoveryInterceptor{ + panicCounter: panicCounter, + } +} + +// UnaryServerInterceptor returns a new unary server interceptor for panic recovery. +func (ri *RecoveryInterceptor) UnaryServerInterceptor() grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (_ any, err error) { + + defer func() { + if r := recover(); r != nil { + ri.panicCounter.Inc() + logger.Errorf(ctx, "panic-ed for request: [%+v] to %s with err: %v with Stack: %v", req, info.FullMethod, r, string(debug.Stack())) + // Return INTERNAL to client with no info as to not leak implementation details + err = status.Errorf(codes.Internal, "") + } + }() + + return handler(ctx, req) + } +} + +// StreamServerInterceptor returns a new streaming server interceptor for panic recovery. +func (ri *RecoveryInterceptor) StreamServerInterceptor() grpc.StreamServerInterceptor { + return func(srv any, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) (err error) { + + defer func() { + if r := recover(); r != nil { + ri.panicCounter.Inc() + logger.Errorf(stream.Context(), "panic-ed for stream to %s with err: %v with Stack: %v", info.FullMethod, r, string(debug.Stack())) + // Return INTERNAL to client with no info as to not leak implementation details + err = status.Errorf(codes.Internal, "") + } + }() + + return handler(srv, stream) + } +} diff --git a/flyteadmin/pkg/rpc/adminservice/middleware/recovery_interceptor_test.go b/flyteadmin/pkg/rpc/adminservice/middleware/recovery_interceptor_test.go new file mode 100644 index 0000000000..3928856067 --- /dev/null +++ b/flyteadmin/pkg/rpc/adminservice/middleware/recovery_interceptor_test.go @@ -0,0 +1,90 @@ +package middleware + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + mockScope "github.com/flyteorg/flyte/flytestdlib/promutils" +) + +func TestRecoveryInterceptor(t *testing.T) { + ctx := context.Background() + testScope := mockScope.NewTestScope() + recoveryInterceptor := NewRecoveryInterceptor(testScope) + unaryInterceptor := recoveryInterceptor.UnaryServerInterceptor() + streamInterceptor := recoveryInterceptor.StreamServerInterceptor() + unaryInfo := &grpc.UnaryServerInfo{} + streamInfo := &grpc.StreamServerInfo{} + req := "test-request" + + t.Run("unary should recover from panic", func(t *testing.T) { + _, err := unaryInterceptor(ctx, req, unaryInfo, func(ctx context.Context, req any) (any, error) { + panic("synthetic") + }) + expectedErr := status.Errorf(codes.Internal, "") + require.Error(t, err) + require.Equal(t, expectedErr, err) + }) + + t.Run("stream should recover from panic", func(t *testing.T) { + stream := testStream{} + err := streamInterceptor(nil, &stream, streamInfo, func(srv any, stream grpc.ServerStream) error { + panic("synthetic") + }) + expectedErr := status.Errorf(codes.Internal, "") + require.Error(t, err) + require.Equal(t, expectedErr, err) + }) + + t.Run("unary should plumb response without panic", func(t *testing.T) { + mockedResponse := "test" + resp, err := unaryInterceptor(ctx, req, unaryInfo, func(ctx context.Context, req any) (any, error) { + return mockedResponse, nil + }) + require.NoError(t, err) + require.Equal(t, mockedResponse, resp) + }) + + t.Run("stream should plumb response without panic", func(t *testing.T) { + stream := testStream{} + handlerCalled := false + err := streamInterceptor(nil, &stream, streamInfo, func(srv any, stream grpc.ServerStream) error { + handlerCalled = true + return nil + }) + require.NoError(t, err) + require.True(t, handlerCalled) + }) +} + +// testStream is an implementation of grpc.ServerStream for testing. +type testStream struct { +} + +func (s *testStream) SendMsg(m interface{}) error { + return nil +} + +func (s *testStream) RecvMsg(m interface{}) error { + return nil +} + +func (s *testStream) SetHeader(metadata.MD) error { + return nil +} + +func (s *testStream) SendHeader(metadata.MD) error { + return nil +} + +func (s *testStream) SetTrailer(metadata.MD) {} + +func (s *testStream) Context() context.Context { + return context.Background() +} diff --git a/flyteadmin/pkg/rpc/adminservice/named_entity.go b/flyteadmin/pkg/rpc/adminservice/named_entity.go index d48a0485e2..4ef8f3ee0b 100644 --- a/flyteadmin/pkg/rpc/adminservice/named_entity.go +++ b/flyteadmin/pkg/rpc/adminservice/named_entity.go @@ -11,7 +11,6 @@ import ( ) func (m *AdminService) GetNamedEntity(ctx context.Context, request *admin.NamedEntityGetRequest) (*admin.NamedEntity, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -31,7 +30,6 @@ func (m *AdminService) GetNamedEntity(ctx context.Context, request *admin.NamedE func (m *AdminService) UpdateNamedEntity(ctx context.Context, request *admin.NamedEntityUpdateRequest) ( *admin.NamedEntityUpdateResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -50,7 +48,6 @@ func (m *AdminService) UpdateNamedEntity(ctx context.Context, request *admin.Nam func (m *AdminService) ListNamedEntities(ctx context.Context, request *admin.NamedEntityListRequest) ( *admin.NamedEntityList, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } diff --git a/flyteadmin/pkg/rpc/adminservice/node_execution.go b/flyteadmin/pkg/rpc/adminservice/node_execution.go index cf17e3ff70..1b187f3a35 100644 --- a/flyteadmin/pkg/rpc/adminservice/node_execution.go +++ b/flyteadmin/pkg/rpc/adminservice/node_execution.go @@ -14,7 +14,6 @@ import ( func (m *AdminService) CreateNodeEvent( ctx context.Context, request *admin.NodeExecutionEventRequest) (*admin.NodeExecutionEventResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -32,7 +31,6 @@ func (m *AdminService) CreateNodeEvent( func (m *AdminService) GetNodeExecution( ctx context.Context, request *admin.NodeExecutionGetRequest) (*admin.NodeExecution, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -49,7 +47,6 @@ func (m *AdminService) GetNodeExecution( } func (m *AdminService) GetDynamicNodeWorkflow(ctx context.Context, request *admin.GetDynamicNodeWorkflowRequest) (*admin.DynamicNodeWorkflowResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -68,7 +65,6 @@ func (m *AdminService) GetDynamicNodeWorkflow(ctx context.Context, request *admi func (m *AdminService) ListNodeExecutions( ctx context.Context, request *admin.NodeExecutionListRequest) (*admin.NodeExecutionList, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -86,7 +82,6 @@ func (m *AdminService) ListNodeExecutions( func (m *AdminService) ListNodeExecutionsForTask( ctx context.Context, request *admin.NodeExecutionForTaskListRequest) (*admin.NodeExecutionList, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -111,7 +106,6 @@ func (m *AdminService) ListNodeExecutionsForTask( func (m *AdminService) GetNodeExecutionData( ctx context.Context, request *admin.NodeExecutionGetDataRequest) (*admin.NodeExecutionGetDataResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } diff --git a/flyteadmin/pkg/rpc/adminservice/project.go b/flyteadmin/pkg/rpc/adminservice/project.go index 5e7352ad93..ab8d8e4375 100644 --- a/flyteadmin/pkg/rpc/adminservice/project.go +++ b/flyteadmin/pkg/rpc/adminservice/project.go @@ -12,7 +12,6 @@ import ( func (m *AdminService) RegisterProject(ctx context.Context, request *admin.ProjectRegisterRequest) ( *admin.ProjectRegisterResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -29,7 +28,6 @@ func (m *AdminService) RegisterProject(ctx context.Context, request *admin.Proje } func (m *AdminService) ListProjects(ctx context.Context, request *admin.ProjectListRequest) (*admin.Projects, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -48,7 +46,6 @@ func (m *AdminService) ListProjects(ctx context.Context, request *admin.ProjectL func (m *AdminService) UpdateProject(ctx context.Context, request *admin.Project) ( *admin.ProjectUpdateResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -65,7 +62,6 @@ func (m *AdminService) UpdateProject(ctx context.Context, request *admin.Project } func (m *AdminService) GetProject(ctx context.Context, request *admin.ProjectGetRequest) (*admin.Project, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -83,7 +79,6 @@ func (m *AdminService) GetProject(ctx context.Context, request *admin.ProjectGet } func (m *AdminService) GetDomains(ctx context.Context, request *admin.GetDomainRequest) (*admin.GetDomainsResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } diff --git a/flyteadmin/pkg/rpc/adminservice/task.go b/flyteadmin/pkg/rpc/adminservice/task.go index 8899480489..7db51ed2eb 100644 --- a/flyteadmin/pkg/rpc/adminservice/task.go +++ b/flyteadmin/pkg/rpc/adminservice/task.go @@ -15,7 +15,6 @@ import ( func (m *AdminService) CreateTask( ctx context.Context, request *admin.TaskCreateRequest) (*admin.TaskCreateResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -32,7 +31,6 @@ func (m *AdminService) CreateTask( } func (m *AdminService) GetTask(ctx context.Context, request *admin.ObjectGetRequest) (*admin.Task, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -56,7 +54,6 @@ func (m *AdminService) GetTask(ctx context.Context, request *admin.ObjectGetRequ func (m *AdminService) ListTaskIds( ctx context.Context, request *admin.NamedEntityIdentifierListRequest) (*admin.NamedEntityIdentifierList, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -74,7 +71,6 @@ func (m *AdminService) ListTaskIds( } func (m *AdminService) ListTasks(ctx context.Context, request *admin.ResourceListRequest) (*admin.TaskList, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } diff --git a/flyteadmin/pkg/rpc/adminservice/task_execution.go b/flyteadmin/pkg/rpc/adminservice/task_execution.go index 0561a1ba36..0638c02aa3 100644 --- a/flyteadmin/pkg/rpc/adminservice/task_execution.go +++ b/flyteadmin/pkg/rpc/adminservice/task_execution.go @@ -15,7 +15,6 @@ import ( func (m *AdminService) CreateTaskEvent( ctx context.Context, request *admin.TaskExecutionEventRequest) (*admin.TaskExecutionEventResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -34,7 +33,6 @@ func (m *AdminService) CreateTaskEvent( func (m *AdminService) GetTaskExecution( ctx context.Context, request *admin.TaskExecutionGetRequest) (*admin.TaskExecution, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -62,7 +60,6 @@ func (m *AdminService) GetTaskExecution( func (m *AdminService) ListTaskExecutions( ctx context.Context, request *admin.TaskExecutionListRequest) (*admin.TaskExecutionList, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Nil request") } @@ -84,7 +81,6 @@ func (m *AdminService) ListTaskExecutions( func (m *AdminService) GetTaskExecutionData( ctx context.Context, request *admin.TaskExecutionGetDataRequest) (*admin.TaskExecutionGetDataResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } diff --git a/flyteadmin/pkg/rpc/adminservice/version.go b/flyteadmin/pkg/rpc/adminservice/version.go index 7fb5861e50..3049a723aa 100644 --- a/flyteadmin/pkg/rpc/adminservice/version.go +++ b/flyteadmin/pkg/rpc/adminservice/version.go @@ -8,7 +8,6 @@ import ( func (m *AdminService) GetVersion(ctx context.Context, request *admin.GetVersionRequest) (*admin.GetVersionResponse, error) { - defer m.interceptPanic(ctx, request) response, err := m.VersionManager.GetVersion(ctx, request) if err != nil { return nil, err diff --git a/flyteadmin/pkg/rpc/adminservice/workflow.go b/flyteadmin/pkg/rpc/adminservice/workflow.go index 9fcf87c453..7f6ecc4c13 100644 --- a/flyteadmin/pkg/rpc/adminservice/workflow.go +++ b/flyteadmin/pkg/rpc/adminservice/workflow.go @@ -15,7 +15,6 @@ import ( func (m *AdminService) CreateWorkflow( ctx context.Context, request *admin.WorkflowCreateRequest) (*admin.WorkflowCreateResponse, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -32,7 +31,6 @@ func (m *AdminService) CreateWorkflow( } func (m *AdminService) GetWorkflow(ctx context.Context, request *admin.ObjectGetRequest) (*admin.Workflow, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -56,7 +54,6 @@ func (m *AdminService) GetWorkflow(ctx context.Context, request *admin.ObjectGet func (m *AdminService) ListWorkflowIds(ctx context.Context, request *admin.NamedEntityIdentifierListRequest) ( *admin.NamedEntityIdentifierList, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } @@ -75,7 +72,6 @@ func (m *AdminService) ListWorkflowIds(ctx context.Context, request *admin.Named } func (m *AdminService) ListWorkflows(ctx context.Context, request *admin.ResourceListRequest) (*admin.WorkflowList, error) { - defer m.interceptPanic(ctx, request) if request == nil { return nil, status.Errorf(codes.InvalidArgument, "Incorrect request, nil requests not allowed") } diff --git a/flyteadmin/pkg/server/service.go b/flyteadmin/pkg/server/service.go index ff80c343d3..bb09f9f615 100644 --- a/flyteadmin/pkg/server/service.go +++ b/flyteadmin/pkg/server/service.go @@ -12,6 +12,7 @@ import ( "github.com/gorilla/handlers" grpcmiddleware "github.com/grpc-ecosystem/go-grpc-middleware" grpcauth "github.com/grpc-ecosystem/go-grpc-middleware/auth" + grpcrecovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" "github.com/pkg/errors" @@ -35,6 +36,7 @@ import ( "github.com/flyteorg/flyte/flyteadmin/pkg/config" "github.com/flyteorg/flyte/flyteadmin/pkg/rpc" "github.com/flyteorg/flyte/flyteadmin/pkg/rpc/adminservice" + "github.com/flyteorg/flyte/flyteadmin/pkg/rpc/adminservice/middleware" runtime2 "github.com/flyteorg/flyte/flyteadmin/pkg/runtime" runtimeIfaces "github.com/flyteorg/flyte/flyteadmin/pkg/runtime/interfaces" "github.com/flyteorg/flyte/flyteadmin/plugins" @@ -98,11 +100,18 @@ func newGRPCServer(ctx context.Context, pluginRegistry *plugins.Registry, cfg *c otelgrpc.WithPropagators(propagation.TraceContext{}), ) + adminScope := scope.NewSubScope("admin") + recoveryInterceptor := middleware.NewRecoveryInterceptor(adminScope) + var chainedUnaryInterceptors grpc.UnaryServerInterceptor if cfg.Security.UseAuth { logger.Infof(ctx, "Creating gRPC server with authentication") middlewareInterceptors := plugins.Get[grpc.UnaryServerInterceptor](pluginRegistry, plugins.PluginIDUnaryServiceMiddleware) - chainedUnaryInterceptors = grpcmiddleware.ChainUnaryServer(grpcprometheus.UnaryServerInterceptor, + chainedUnaryInterceptors = grpcmiddleware.ChainUnaryServer( + // recovery interceptor should always be first in order to handle any panics in the middleware or server + recoveryInterceptor.UnaryServerInterceptor(), + grpcrecovery.UnaryServerInterceptor(), + grpcprometheus.UnaryServerInterceptor, otelUnaryServerInterceptor, auth.GetAuthenticationCustomMetadataInterceptor(authCtx), grpcauth.UnaryServerInterceptor(auth.GetAuthenticationInterceptor(authCtx)), @@ -111,11 +120,23 @@ func newGRPCServer(ctx context.Context, pluginRegistry *plugins.Registry, cfg *c ) } else { logger.Infof(ctx, "Creating gRPC server without authentication") - chainedUnaryInterceptors = grpcmiddleware.ChainUnaryServer(grpcprometheus.UnaryServerInterceptor, otelUnaryServerInterceptor) + chainedUnaryInterceptors = grpcmiddleware.ChainUnaryServer( + // recovery interceptor should always be first in order to handle any panics in the middleware or server + recoveryInterceptor.UnaryServerInterceptor(), + grpcprometheus.UnaryServerInterceptor, + otelUnaryServerInterceptor, + ) } + chainedStreamInterceptors := grpcmiddleware.ChainStreamServer( + // recovery interceptor should always be first in order to handle any panics in the middleware or server + recoveryInterceptor.StreamServerInterceptor(), + grpcprometheus.StreamServerInterceptor, + ) + serverOpts := []grpc.ServerOption{ - grpc.StreamInterceptor(grpcprometheus.StreamServerInterceptor), + // recovery interceptor should always be first in order to handle any panics in the middleware or server + grpc.StreamInterceptor(chainedStreamInterceptors), grpc.UnaryInterceptor(chainedUnaryInterceptors), } if cfg.GrpcConfig.MaxMessageSizeBytes > 0 { @@ -131,7 +152,7 @@ func newGRPCServer(ctx context.Context, pluginRegistry *plugins.Registry, cfg *c } configuration := runtime2.NewConfigurationProvider() - adminServer := adminservice.NewAdminServer(ctx, pluginRegistry, configuration, cfg.KubeConfig, cfg.Master, dataStorageClient, scope.NewSubScope("admin")) + adminServer := adminservice.NewAdminServer(ctx, pluginRegistry, configuration, cfg.KubeConfig, cfg.Master, dataStorageClient, adminScope) grpcService.RegisterAdminServiceServer(grpcServer, adminServer) if cfg.Security.UseAuth { grpcService.RegisterAuthMetadataServiceServer(grpcServer, authCtx.AuthMetadataService()) From d8e7491e29672537f5ebd207135d3420a86f4080 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Fri, 2 Aug 2024 11:41:16 +0800 Subject: [PATCH 03/65] Snowflake agent Doc (#5620) * TEST build Signed-off-by: Future-Outlier * remove emphasize-lines Signed-off-by: Future-Outlier * test build Signed-off-by: Future-Outlier * revert Signed-off-by: Future-Outlier --------- Signed-off-by: Future-Outlier Signed-off-by: Bugra Gedik --- docs/deployment/agents/index.md | 6 ++++-- docs/deployment/agents/snowflake.rst | 18 +++++++++++++----- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/docs/deployment/agents/index.md b/docs/deployment/agents/index.md index 912ab8613c..11ce607788 100644 --- a/docs/deployment/agents/index.md +++ b/docs/deployment/agents/index.md @@ -25,10 +25,12 @@ If you are using a managed deployment of Flyte, you will need to contact your de - Configuring your Flyte deployment for the BigQuery agent. * - {ref}`MMCloud Agent ` - Configuring your Flyte deployment for the MMCloud agent. -* - {ref}`Sensor Agent ` - - Configuring your Flyte deployment for the sensor agent. * - {ref}`SageMaker Inference ` - Deploy models and create, as well as trigger inference endpoints on SageMaker. +* - {ref}`Sensor Agent ` + - Configuring your Flyte deployment for the sensor agent. +* - {ref}`Snowflake Agent ` + - Configuring your Flyte deployment for the SnowFlake agent. * - {ref}`OpenAI Batch ` - Submit requests to OpenAI GPT models for asynchronous batch processing. ``` diff --git a/docs/deployment/agents/snowflake.rst b/docs/deployment/agents/snowflake.rst index fe1c8482ae..a689c748bf 100644 --- a/docs/deployment/agents/snowflake.rst +++ b/docs/deployment/agents/snowflake.rst @@ -1,16 +1,25 @@ .. _deployment-agent-setup-snowflake: Snowflake agent -================= +=============== This guide provides an overview of how to set up the Snowflake agent in your Flyte deployment. 1. Set up the key pair authentication in Snowflake. For more details, see the `Snowflake key-pair authentication and key-pair rotation guide `__. -2. Create a secret with the group "snowflake" and the key "private_key". For more details, see `"Using Secrets in a Task" `__. +2. Create a secret with the group "private_key" and the key "snowflake". + This is hardcoded in the flytekit sdk, since we can't know the group and key name in advance. + This is for permission to upload and download data with structured dataset in python task pod. .. code-block:: bash - kubectl create secret generic snowflake-private-key --namespace=flytesnacks-development --from-file=your_private_key_above + kubectl create secret generic private-key --from-file=snowflake= --namespace=flytesnacks-development + +3. Create a secret in the flyteagent's pod, this is for execution snowflake query in the agent pod. + +.. code-block:: bash + + ENCODED_VALUE=$(cat | base64) && kubectl patch secret flyteagent -n flyte --patch "{\"data\":{\"snowflake_private_key\":\"$ENCODED_VALUE\"}}" + Specify agent configuration ---------------------------- @@ -73,7 +82,7 @@ Specify agent configuration supportedTaskTypes: - snowflake -Ensure that the propeller has the correct service account for BigQuery. +Ensure that the propeller has the correct service account for Snowflake. Upgrade the Flyte Helm release ------------------------------ @@ -97,7 +106,6 @@ Upgrade the Flyte Helm release helm upgrade flyte/flyte-core -n --values values-override.yaml Replace ```` with the name of your release (e.g., ``flyte``) - and ```` with the name of your namespace (e.g., ``flyte``). For Snowflake agent on the Flyte cluster, see `Snowflake agent `_. From 021c606edd2cddcaef2caf01a9c1d3da86d39fa5 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Fri, 2 Aug 2024 14:24:36 +0800 Subject: [PATCH 04/65] [flytepropeller][compiler] Error Handling when Type is not found (#5612) * FlytePropeller Compiler Avoid Crash when Type not found Signed-off-by: Future-Outlier * Update pingsu's error message advices Signed-off-by: Future-Outlier Co-authored-by: pingsutw * fix lint Signed-off-by: Future-Outlier * Trigger CI Signed-off-by: Future-Outlier * Trigger CI Signed-off-by: Future-Outlier --------- Signed-off-by: Future-Outlier Co-authored-by: pingsutw Signed-off-by: Bugra Gedik --- .../validation/launch_plan_validator_test.go | 14 ++++--- .../pkg/manager/impl/validation/validation.go | 17 +++++++++ .../impl/validation/validation_test.go | 38 +++++++++++++++++++ 3 files changed, 64 insertions(+), 5 deletions(-) diff --git a/flyteadmin/pkg/manager/impl/validation/launch_plan_validator_test.go b/flyteadmin/pkg/manager/impl/validation/launch_plan_validator_test.go index 86bfc5c6b7..178c2b497b 100644 --- a/flyteadmin/pkg/manager/impl/validation/launch_plan_validator_test.go +++ b/flyteadmin/pkg/manager/impl/validation/launch_plan_validator_test.go @@ -13,6 +13,10 @@ import ( "github.com/flyteorg/flyte/flytestdlib/utils" ) +const ( + foo = "foo" +) + var lpApplicationConfig = testutils.GetApplicationConfigWithDefaultDomains() func getWorkflowInterface() *core.TypedInterface { @@ -344,7 +348,7 @@ func TestValidateSchedule_KickoffTimeArgPointsAtWrongType(t *testing.T) { request := testutils.GetLaunchPlanRequestWithDeprecatedCronSchedule("* * * * * *") inputMap := &core.ParameterMap{ Parameters: map[string]*core.Parameter{ - "foo": { + foo: { Var: &core.Variable{ Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRING}}, }, @@ -354,7 +358,7 @@ func TestValidateSchedule_KickoffTimeArgPointsAtWrongType(t *testing.T) { }, }, } - request.Spec.EntityMetadata.Schedule.KickoffTimeInputArg = "foo" + request.Spec.EntityMetadata.Schedule.KickoffTimeInputArg = foo err := validateSchedule(request, inputMap) assert.NotNil(t, err) @@ -364,7 +368,7 @@ func TestValidateSchedule_NoRequired(t *testing.T) { request := testutils.GetLaunchPlanRequestWithDeprecatedCronSchedule("* * * * * *") inputMap := &core.ParameterMap{ Parameters: map[string]*core.Parameter{ - "foo": { + foo: { Var: &core.Variable{ Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRING}}, }, @@ -383,7 +387,7 @@ func TestValidateSchedule_KickoffTimeBound(t *testing.T) { request := testutils.GetLaunchPlanRequestWithDeprecatedCronSchedule("* * * * * *") inputMap := &core.ParameterMap{ Parameters: map[string]*core.Parameter{ - "foo": { + foo: { Var: &core.Variable{ Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_DATETIME}}, }, @@ -393,7 +397,7 @@ func TestValidateSchedule_KickoffTimeBound(t *testing.T) { }, }, } - request.Spec.EntityMetadata.Schedule.KickoffTimeInputArg = "foo" + request.Spec.EntityMetadata.Schedule.KickoffTimeInputArg = foo err := validateSchedule(request, inputMap) assert.Nil(t, err) diff --git a/flyteadmin/pkg/manager/impl/validation/validation.go b/flyteadmin/pkg/manager/impl/validation/validation.go index 1958f25021..55c45db9bb 100644 --- a/flyteadmin/pkg/manager/impl/validation/validation.go +++ b/flyteadmin/pkg/manager/impl/validation/validation.go @@ -1,6 +1,7 @@ package validation import ( + "fmt" "net/url" "strconv" "strings" @@ -282,11 +283,27 @@ func validateParameterMap(inputMap *core.ParameterMap, fieldName string) error { defaultValue := defaultInput.GetDefault() if defaultValue != nil { inputType := validators.LiteralTypeForLiteral(defaultValue) + + if inputType == nil { + return errors.NewFlyteAdminErrorf(codes.InvalidArgument, + fmt.Sprintf( + "Flyte encountered an issue while determining\n"+ + "the type of the default value for Parameter '%s' in '%s'.\n"+ + "Registered type: [%s].\n"+ + "Flyte needs to support the latest FlyteIDL to support this type.\n"+ + "Suggested solution: Please update all of your Flyte images to the latest version and "+ + "try again.", + name, fieldName, defaultInput.GetVar().GetType().String(), + ), + ) + } + if !validators.AreTypesCastable(inputType, defaultInput.GetVar().GetType()) { return errors.NewFlyteAdminErrorf(codes.InvalidArgument, "Type mismatch for Parameter %s in %s has type %s, expected %s", name, fieldName, defaultInput.GetVar().GetType().String(), inputType.String()) } + if defaultInput.GetVar().GetType().GetSimple() == core.SimpleType_DATETIME { // Make datetime specific validations return ValidateDatetime(defaultValue) diff --git a/flyteadmin/pkg/manager/impl/validation/validation_test.go b/flyteadmin/pkg/manager/impl/validation/validation_test.go index a9fed38ee9..1aa0bc7bab 100644 --- a/flyteadmin/pkg/manager/impl/validation/validation_test.go +++ b/flyteadmin/pkg/manager/impl/validation/validation_test.go @@ -320,6 +320,44 @@ func TestValidateParameterMap(t *testing.T) { err := validateParameterMap(&exampleMap, "some text") assert.NoError(t, err) }) + t.Run("invalid because inputType is nil", func(t *testing.T) { + // Create a literal that will cause LiteralTypeForLiteral to return nil. + // For example, a scalar with no value. + unsupportedLiteral := &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{}, + }, + } + + name := "foo" + fieldName := "test_field_name" + exampleMap := core.ParameterMap{ + Parameters: map[string]*core.Parameter{ + name: { + Var: &core.Variable{ + // 1000 means an unsupported type + Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: 1000}}, + }, + Behavior: &core.Parameter_Default{ + Default: unsupportedLiteral, + }, + }, + }, + } + err := validateParameterMap(&exampleMap, fieldName) + assert.Error(t, err) + fmt.Println(err.Error()) + expectedErrMsg := fmt.Sprintf( + "Flyte encountered an issue while determining\n"+ + "the type of the default value for Parameter '%s' in '%s'.\n"+ + "Registered type: [%s].\n"+ + "Flyte needs to support the latest FlyteIDL to support this type.\n"+ + "Suggested solution: Please update all of your Flyte images to the latest version and "+ + "try again.", + name, fieldName, exampleMap.Parameters[name].GetVar().GetType().String(), + ) + assert.Equal(t, expectedErrMsg, err.Error()) + }) } func TestValidateToken(t *testing.T) { From 91d6d401babe332cb1b3ee7f971641efb064586f Mon Sep 17 00:00:00 2001 From: Jason Parraga Date: Thu, 1 Aug 2024 23:46:52 -0700 Subject: [PATCH 05/65] Fix nil pointer when task plugin load returns error (#5622) Signed-off-by: Bugra Gedik --- .../pkg/controller/nodes/task/handler.go | 7 ++++--- .../pkg/controller/nodes/task/handler_test.go | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/flytepropeller/pkg/controller/nodes/task/handler.go b/flytepropeller/pkg/controller/nodes/task/handler.go index d1595890d8..9ec47985c9 100644 --- a/flytepropeller/pkg/controller/nodes/task/handler.go +++ b/flytepropeller/pkg/controller/nodes/task/handler.go @@ -248,13 +248,14 @@ func (t *Handler) Setup(ctx context.Context, sCtx interfaces.SetupContext) error logger.Infof(ctx, "Loading Plugin [%s] ENABLED", p.ID) cp, err := pluginCore.LoadPlugin(ctx, sCtxFinal, p) + if err != nil { + return regErrors.Wrapf(err, "failed to load plugin - %s", p.ID) + } + if cp.GetID() == agent.ID { t.agentService.CorePlugin = cp } - if err != nil { - return regErrors.Wrapf(err, "failed to load plugin - %s", p.ID) - } // For every default plugin for a task type specified in flytepropeller config we validate that the plugin's // static definition includes that task type as something it is registered to handle. for _, tt := range p.RegisteredTaskTypes { diff --git a/flytepropeller/pkg/controller/nodes/task/handler_test.go b/flytepropeller/pkg/controller/nodes/task/handler_test.go index 4e6798cfef..31e1be9a7f 100644 --- a/flytepropeller/pkg/controller/nodes/task/handler_test.go +++ b/flytepropeller/pkg/controller/nodes/task/handler_test.go @@ -126,6 +126,8 @@ func Test_task_Setup(t *testing.T) { k8sPluginDefault := &pluginK8sMocks.Plugin{} k8sPluginDefault.OnGetProperties().Return(pluginK8s.PluginProperties{}) + loadErrorPluginType := "loadError" + corePluginEntry := pluginCore.PluginEntry{ ID: corePluginType, RegisteredTaskTypes: []pluginCore.TaskType{corePluginType}, @@ -154,6 +156,13 @@ func Test_task_Setup(t *testing.T) { RegisteredTaskTypes: []pluginCore.TaskType{k8sPluginDefaultType}, ResourceToWatch: &v1.Pod{}, } + loadErrorPluginEntry := pluginCore.PluginEntry{ + ID: loadErrorPluginType, + RegisteredTaskTypes: []pluginCore.TaskType{loadErrorPluginType}, + LoadPlugin: func(ctx context.Context, iCtx pluginCore.SetupContext) (pluginCore.Plugin, error) { + return nil, fmt.Errorf("test") + }, + } type wantFields struct { pluginIDs map[pluginCore.TaskType]string @@ -232,6 +241,15 @@ func Test_task_Setup(t *testing.T) { }, }, false}, + {"load-error", + testPluginRegistry{ + core: []pluginCore.PluginEntry{loadErrorPluginEntry}, + k8s: []pluginK8s.PluginEntry{}, + }, + []string{loadErrorPluginType}, + map[string]string{corePluginType: loadErrorPluginType}, + wantFields{}, + true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From eae4bf57937cf245177244ff6555dae85d2cfa23 Mon Sep 17 00:00:00 2001 From: Jason Parraga Date: Thu, 1 Aug 2024 23:48:45 -0700 Subject: [PATCH 06/65] Log stack trace when refresh cache sync recovers from panic (#5623) Signed-off-by: Bugra Gedik --- flytestdlib/cache/auto_refresh.go | 5 ++-- flytestdlib/cache/auto_refresh_test.go | 37 ++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/flytestdlib/cache/auto_refresh.go b/flytestdlib/cache/auto_refresh.go index bb23ef9369..8218e577a8 100644 --- a/flytestdlib/cache/auto_refresh.go +++ b/flytestdlib/cache/auto_refresh.go @@ -3,6 +3,7 @@ package cache import ( "context" "fmt" + "runtime/debug" "sync" "time" @@ -290,9 +291,9 @@ func (w *autoRefresh) sync(ctx context.Context) (err error) { } if err, isErr = rVal.(error); isErr { - err = fmt.Errorf("worker panic'd and is shutting down. Error: %w", err) + err = fmt.Errorf("worker panic'd and is shutting down. Error: %w with Stack: %v", err, string(debug.Stack())) } else { - err = fmt.Errorf("worker panic'd and is shutting down. Panic value: %v", rVal) + err = fmt.Errorf("worker panic'd and is shutting down. Panic value: %v with Stack: %v", rVal, string(debug.Stack())) } logger.Error(ctx, err) diff --git a/flytestdlib/cache/auto_refresh_test.go b/flytestdlib/cache/auto_refresh_test.go index e798300f5d..5e1c49777e 100644 --- a/flytestdlib/cache/auto_refresh_test.go +++ b/flytestdlib/cache/auto_refresh_test.go @@ -64,6 +64,15 @@ func syncTerminalItem(_ context.Context, batch Batch) ([]ItemSyncResponse, error panic("This should never be called") } +type panickingSyncer struct { + callCount atomic.Int32 +} + +func (p *panickingSyncer) sync(_ context.Context, _ Batch) ([]ItemSyncResponse, error) { + p.callCount.Inc() + panic("testing") +} + func TestCacheFour(t *testing.T) { testResyncPeriod := 10 * time.Millisecond rateLimiter := workqueue.DefaultControllerRateLimiter() @@ -172,6 +181,34 @@ func TestCacheFour(t *testing.T) { cancel() }) + + t.Run("Test panic on sync and shutdown", func(t *testing.T) { + syncer := &panickingSyncer{} + cache, err := NewAutoRefreshCache("fake3", syncer.sync, rateLimiter, testResyncPeriod, 10, 2, promutils.NewTestScope()) + assert.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + assert.NoError(t, cache.Start(ctx)) + + itemID := "dummy_id" + _, err = cache.GetOrCreate(itemID, fakeCacheItem{ + val: 0, + }) + assert.NoError(t, err) + + // wait for all workers to run + assert.Eventually(t, func() bool { + return syncer.callCount.Load() == int32(10) + }, 5*time.Second, time.Millisecond) + + // wait some more time + time.Sleep(500 * time.Millisecond) + + // all workers should have shut down. + assert.Equal(t, int32(10), syncer.callCount.Load()) + + cancel() + }) } func TestQueueBuildUp(t *testing.T) { From dcbc55a3d7b1b2c81654a03158584fdd10220f2d Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Fri, 2 Aug 2024 17:48:36 +0800 Subject: [PATCH 07/65] use private-key (#5626) Signed-off-by: Bugra Gedik --- docs/deployment/agents/snowflake.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/deployment/agents/snowflake.rst b/docs/deployment/agents/snowflake.rst index a689c748bf..d6ee74125b 100644 --- a/docs/deployment/agents/snowflake.rst +++ b/docs/deployment/agents/snowflake.rst @@ -6,7 +6,7 @@ Snowflake agent This guide provides an overview of how to set up the Snowflake agent in your Flyte deployment. 1. Set up the key pair authentication in Snowflake. For more details, see the `Snowflake key-pair authentication and key-pair rotation guide `__. -2. Create a secret with the group "private_key" and the key "snowflake". +2. Create a secret with the group "private-key" and the key "snowflake". This is hardcoded in the flytekit sdk, since we can't know the group and key name in advance. This is for permission to upload and download data with structured dataset in python task pod. From 61cffe82a2a0a2f352046dc194eba6695144ce49 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Fri, 2 Aug 2024 17:55:47 +0800 Subject: [PATCH 08/65] Explain how Agent Secret Works (#5625) * first version Signed-off-by: Future-Outlier * update Signed-off-by: Future-Outlier --------- Signed-off-by: Future-Outlier Signed-off-by: Bugra Gedik --- docs/flyte_agents/how_secret_works_in_agent.md | 17 +++++++++++++++++ docs/flyte_agents/index.md | 3 +++ 2 files changed, 20 insertions(+) create mode 100644 docs/flyte_agents/how_secret_works_in_agent.md diff --git a/docs/flyte_agents/how_secret_works_in_agent.md b/docs/flyte_agents/how_secret_works_in_agent.md new file mode 100644 index 0000000000..7abada46ac --- /dev/null +++ b/docs/flyte_agents/how_secret_works_in_agent.md @@ -0,0 +1,17 @@ +--- +jupytext: + formats: md:myst + text_representation: + extension: .md + format_name: myst +--- + +(how_secret_works_in_agent)= +# How Secret Works in Agent + +In Flyte agent's deployment, we mount secrets in Kubernetes with the namespace `flyte` and the name `flyteagent`. +If you want to add secrets for agents, you can use the following command: + +```bash +SECRET_VALUE=$( | base64) && kubectl patch secret flyteagent -n flyte --patch "{\"data\":{\"your_agent_secret_name\":\"$SECRET_VALUE\"}}" +``` diff --git a/docs/flyte_agents/index.md b/docs/flyte_agents/index.md index e7d627a670..a32200cde6 100644 --- a/docs/flyte_agents/index.md +++ b/docs/flyte_agents/index.md @@ -36,6 +36,8 @@ You can create different agent services that host different agents, e.g., a prod - Once you have tested your new agent in a local development cluster and want to use it in production, you should test it in the Flyte sandbox. * - {doc}`Implementing the agent metadata service ` - If you want to develop an agent server in a language other than Python (e.g., Rust or Java), you must implement the agent metadata service in your agent server. +* - {doc}`How secret works in agent ` + - Explain how secret works in your agent server. ``` ```{toctree} @@ -48,4 +50,5 @@ developing_agents testing_agents_in_a_local_development_cluster deploying_agents_to_the_flyte_sandbox implementing_the_agent_metadata_service +how_secret_works_in_agent ``` From 2cf52ef8f169bd38d9b39a37bbc2960db51ea8b0 Mon Sep 17 00:00:00 2001 From: ddl-rliu <140021987+ddl-rliu@users.noreply.github.com> Date: Fri, 2 Aug 2024 03:03:48 -0700 Subject: [PATCH 09/65] Fix typo in execution manager (#5619) Signed-off-by: ddl-rliu <140021987+ddl-rliu@users.noreply.github.com> Signed-off-by: Bugra Gedik --- flyteadmin/pkg/manager/impl/execution_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flyteadmin/pkg/manager/impl/execution_manager.go b/flyteadmin/pkg/manager/impl/execution_manager.go index 337301977e..13521cedbb 100644 --- a/flyteadmin/pkg/manager/impl/execution_manager.go +++ b/flyteadmin/pkg/manager/impl/execution_manager.go @@ -1719,7 +1719,7 @@ func (m *ExecutionManager) TerminateExecution( } if common.IsExecutionTerminal(core.WorkflowExecution_Phase(core.WorkflowExecution_Phase_value[executionModel.Phase])) { - return nil, errors.NewAlreadyInTerminalStateError(ctx, "Cannot abort an already terminate workflow execution", executionModel.Phase) + return nil, errors.NewAlreadyInTerminalStateError(ctx, "Cannot abort an already terminated workflow execution", executionModel.Phase) } err = transformers.SetExecutionAborting(&executionModel, request.Cause, getUser(ctx)) From a65a5903fcfe6522577b1300423f465e3bc632e3 Mon Sep 17 00:00:00 2001 From: Yee Hing Tong Date: Fri, 2 Aug 2024 12:53:33 -0700 Subject: [PATCH 10/65] Amend Admin to use grpc message size (#5628) * add send arg Signed-off-by: Yee Hing Tong * Add acction to remove cache in gh runner Signed-off-by: Eduardo Apolinario * Use correct checked out path Signed-off-by: Eduardo Apolinario * Path in strings Signed-off-by: Eduardo Apolinario * Checkout repo in root Signed-off-by: Eduardo Apolinario * Use the correct path to new action Signed-off-by: Eduardo Apolinario * Do not use gh var in path to clear-action-cache Signed-off-by: Eduardo Apolinario * Remove wrong invocation of clear-action-cache Signed-off-by: Eduardo Apolinario * GITHUB_WORKSPACE is implicit in the checkout action Signed-off-by: Eduardo Apolinario * Refer to local `flyte` directory Signed-off-by: Eduardo Apolinario --------- Signed-off-by: Yee Hing Tong Signed-off-by: Eduardo Apolinario Co-authored-by: Eduardo Apolinario Signed-off-by: Bugra Gedik --- .github/actions/clear-action-cache/action.yml | 11 +++++++++++ .github/workflows/tests.yml | 6 ++++-- flyteadmin/pkg/server/service.go | 2 +- 3 files changed, 16 insertions(+), 3 deletions(-) create mode 100644 .github/actions/clear-action-cache/action.yml diff --git a/.github/actions/clear-action-cache/action.yml b/.github/actions/clear-action-cache/action.yml new file mode 100644 index 0000000000..a29347b61c --- /dev/null +++ b/.github/actions/clear-action-cache/action.yml @@ -0,0 +1,11 @@ +name: 'Clear action cache' +description: 'As suggested by GitHub to prevent low disk space: https://github.com/actions/runner-images/issues/2840#issuecomment-790492173' +runs: + using: 'composite' + steps: + - shell: bash + run: | + rm -rf /usr/share/dotnet + rm -rf /opt/ghc + rm -rf "/usr/local/share/boost" + rm -rf "$AGENT_TOOLSDIRECTORY" diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index cbce9cd054..1d69466464 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -30,12 +30,14 @@ jobs: - name: Fetch flyte code uses: actions/checkout@v4 with: - path: "${{ github.workspace }}/flyte" + path: flyte + - name: 'Clear action cache' + uses: ./flyte/.github/actions/clear-action-cache - name: Fetch flytekit code uses: actions/checkout@v4 with: repository: flyteorg/flytekit - path: "${{ github.workspace }}/flytekit" + path: flytekit - uses: conda-incubator/setup-miniconda@v3 with: auto-update-conda: true diff --git a/flyteadmin/pkg/server/service.go b/flyteadmin/pkg/server/service.go index bb09f9f615..0a7371ef68 100644 --- a/flyteadmin/pkg/server/service.go +++ b/flyteadmin/pkg/server/service.go @@ -140,7 +140,7 @@ func newGRPCServer(ctx context.Context, pluginRegistry *plugins.Registry, cfg *c grpc.UnaryInterceptor(chainedUnaryInterceptors), } if cfg.GrpcConfig.MaxMessageSizeBytes > 0 { - serverOpts = append(serverOpts, grpc.MaxRecvMsgSize(cfg.GrpcConfig.MaxMessageSizeBytes)) + serverOpts = append(serverOpts, grpc.MaxRecvMsgSize(cfg.GrpcConfig.MaxMessageSizeBytes), grpc.MaxSendMsgSize(cfg.GrpcConfig.MaxMessageSizeBytes)) } serverOpts = append(serverOpts, opts...) grpcServer := grpc.NewServer(serverOpts...) From fe9fb6786551d872981fb61b5fb1cb56fa1a3349 Mon Sep 17 00:00:00 2001 From: Kevin Su Date: Tue, 6 Aug 2024 12:50:06 +0800 Subject: [PATCH 11/65] document the process of setting ttl for a ray cluster (#5636) Signed-off-by: Kevin Su Signed-off-by: Bugra Gedik --- docs/deployment/plugins/k8s/index.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/deployment/plugins/k8s/index.rst b/docs/deployment/plugins/k8s/index.rst index a46ec23815..64fbb41136 100644 --- a/docs/deployment/plugins/k8s/index.rst +++ b/docs/deployment/plugins/k8s/index.rst @@ -272,6 +272,10 @@ Specify plugin configuration - container: container - container_array: k8s-array - ray: ray + plugins: + ray: + // Shutdown Ray cluster after 1 hour of inactivity + ttlSecondsAfterFinished: 3600 .. group-tab:: Flyte core @@ -294,6 +298,10 @@ Specify plugin configuration sidecar: sidecar container_array: k8s-array ray: ray + plugins: + ray: + // Shutdown Ray cluster after 1 hour of inactivity + ttlSecondsAfterFinished: 3600 .. group-tab:: Spark From 051443086b41acfaea7bb7a93d54b5fba8b3b74d Mon Sep 17 00:00:00 2001 From: Andrew Dye Date: Wed, 7 Aug 2024 16:24:11 -0700 Subject: [PATCH 12/65] Add CustomHeaderMatcher to pass additional headers (#5563) Signed-off-by: Andrew Dye Signed-off-by: Bugra Gedik --- flyteadmin/auth/handlers.go | 20 ++++++++++++++++++++ flyteadmin/pkg/server/service.go | 3 +++ flyteadmin/plugins/registry.go | 9 +++++---- 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/flyteadmin/auth/handlers.go b/flyteadmin/auth/handlers.go index a6220db6e3..b839cf26d0 100644 --- a/flyteadmin/auth/handlers.go +++ b/flyteadmin/auth/handlers.go @@ -5,11 +5,13 @@ import ( "encoding/json" "fmt" "net/http" + "net/textproto" "net/url" "strings" "time" "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" "golang.org/x/oauth2" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -22,6 +24,7 @@ import ( "github.com/flyteorg/flyte/flyteadmin/pkg/common" "github.com/flyteorg/flyte/flyteadmin/plugins" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" + "github.com/flyteorg/flyte/flytestdlib/contextutils" "github.com/flyteorg/flyte/flytestdlib/errors" "github.com/flyteorg/flyte/flytestdlib/logger" ) @@ -32,6 +35,8 @@ const ( FromHTTPVal = "true" ) +var XRequestID = textproto.CanonicalMIMEHeaderKey(contextutils.RequestIDKey.String()) + type PreRedirectHookError struct { Message string Code int @@ -533,3 +538,18 @@ func GetUserInfoForwardResponseHandler() UserInfoForwardResponseHandler { return nil } } + +func GetCustomHeaderMatcher(pluginRegistry *plugins.Registry) runtime.HeaderMatcherFunc { + if fn := plugins.Get[runtime.HeaderMatcherFunc](pluginRegistry, plugins.PluginIDCustomerHeaderMatcher); fn != nil { + return fn + } + return func(key string) (string, bool) { + canonicalKey := textproto.CanonicalMIMEHeaderKey(key) + switch canonicalKey { + case XRequestID: + return canonicalKey, true + default: + return runtime.DefaultHeaderMatcher(key) + } + } +} diff --git a/flyteadmin/pkg/server/service.go b/flyteadmin/pkg/server/service.go index 0a7371ef68..587ea86e3b 100644 --- a/flyteadmin/pkg/server/service.go +++ b/flyteadmin/pkg/server/service.go @@ -240,6 +240,9 @@ func newHTTPServer(ctx context.Context, pluginRegistry *plugins.Registry, cfg *c // This option sets subject in the user info response gwmuxOptions = append(gwmuxOptions, runtime.WithForwardResponseOption(auth.GetUserInfoForwardResponseHandler())) + // Use custom header matcher to allow additional headers to be passed through + gwmuxOptions = append(gwmuxOptions, runtime.WithIncomingHeaderMatcher(auth.GetCustomHeaderMatcher(pluginRegistry))) + if cfg.Security.UseAuth { // Add HTTP handlers for OIDC endpoints auth.RegisterHandlers(ctx, mux, authCtx, pluginRegistry) diff --git a/flyteadmin/plugins/registry.go b/flyteadmin/plugins/registry.go index 92644b1367..a89a8dfeae 100644 --- a/flyteadmin/plugins/registry.go +++ b/flyteadmin/plugins/registry.go @@ -9,12 +9,13 @@ import ( type PluginID = string const ( - PluginIDWorkflowExecutor PluginID = "WorkflowExecutor" + PluginIDAdditionalGRPCService PluginID = "AdditionalGRPCService" + PluginIDCustomerHeaderMatcher PluginID = "CustomerHeaderMatcher" PluginIDDataProxy PluginID = "DataProxy" - PluginIDUnaryServiceMiddleware PluginID = "UnaryServiceMiddleware" - PluginIDPreRedirectHook PluginID = "PreRedirectHook" PluginIDLogoutHook PluginID = "LogoutHook" - PluginIDAdditionalGRPCService PluginID = "AdditionalGRPCService" + PluginIDPreRedirectHook PluginID = "PreRedirectHook" + PluginIDUnaryServiceMiddleware PluginID = "UnaryServiceMiddleware" + PluginIDWorkflowExecutor PluginID = "WorkflowExecutor" ) type AtomicRegistry struct { From 80c349d45994ced638aba02c82eed3ea6e4d8d71 Mon Sep 17 00:00:00 2001 From: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> Date: Thu, 8 Aug 2024 11:57:03 -0700 Subject: [PATCH 13/65] Turn flyteidl and flytectl releases into manual gh workflows (#5635) * Make flyteidl releases go through a manual gh workflow Signed-off-by: Eduardo Apolinario * Make flytectl releases go through a manual gh workflow Signed-off-by: Eduardo Apolinario * Rewrite the documentation for `version` and clarify wording in RELEASE.md Signed-off-by: Eduardo Apolinario --------- Signed-off-by: Eduardo Apolinario Co-authored-by: Eduardo Apolinario Signed-off-by: Bugra Gedik --- .github/workflows/create_release.yml | 1 - .github/workflows/flytectl-release.yml | 27 +++++++++++++++++++--- .github/workflows/flyteidl-release.yml | 31 ++++++++++++++++++++++---- flytectl/RELEASE.md | 2 +- flyteidl/RELEASE.md | 4 ++++ 5 files changed, 56 insertions(+), 9 deletions(-) create mode 100644 flyteidl/RELEASE.md diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 1db5986925..e00c09f2d7 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -28,7 +28,6 @@ jobs: "datacatalog", "flyteadmin", "flytecopilot", - "flyteidl", "flyteplugins", "flytepropeller", "flytestdlib", diff --git a/.github/workflows/flytectl-release.yml b/.github/workflows/flytectl-release.yml index 2bfa6f28eb..2aba67dbe9 100644 --- a/.github/workflows/flytectl-release.yml +++ b/.github/workflows/flytectl-release.yml @@ -1,13 +1,34 @@ name: Flytectl release on: - push: - tags: - - flytectl/v*.*.* + workflow_dispatch: + inputs: + version: + description: "version. Do *not* use the `flytectl/` prefix, e.g. `flytectl/v1.2.3`, instead use only `v1.2.3` (including the `v`)" + required: true jobs: + push-flytectl-tag: + name: Push git tag containing the `flyteidl/` prefix + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: '0' + - uses: actions/github-script@v6 + with: + github-token: ${{ secrets.FLYTE_BOT_PAT }} + script: | + github.rest.git.createRef({ + owner: context.repo.owner, + repo: context.repo.repo, + ref: `refs/tags/flytectl/${{ github.event.inputs.version }}`, + sha: context.sha + }) release: name: Goreleaser + needs: + - push-flytectl-tag runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/flyteidl-release.yml b/.github/workflows/flyteidl-release.yml index 94c13645b2..2b19f7942d 100644 --- a/.github/workflows/flyteidl-release.yml +++ b/.github/workflows/flyteidl-release.yml @@ -1,12 +1,33 @@ -name: Upload flyteidl to PyPI and npm +name: Release flyteidl on: - push: - tags: - - flyteidl/v*.*.* + workflow_dispatch: + inputs: + version: + description: "version. Do *not* use the `flyteidl/` prefix, e.g. `flyteidl/v1.2.3`, instead use only `v1.2.3` (including the `v`)" + required: true jobs: + push-flyteidl-tag: + name: Push git tag containing the `flyteidl/` prefix + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: '0' + - uses: actions/github-script@v6 + with: + github-token: ${{ secrets.FLYTE_BOT_PAT }} + script: | + github.rest.git.createRef({ + owner: context.repo.owner, + repo: context.repo.repo, + ref: `refs/tags/flyteidl/${{ github.event.inputs.version }}`, + sha: context.sha + }) deploy-to-pypi: + needs: + - push-flyteidl-tag runs-on: ubuntu-latest defaults: run: @@ -29,6 +50,8 @@ jobs: python -m build twine upload dist/* deploy-to-npm: + needs: + - push-flyteidl-tag runs-on: ubuntu-latest defaults: run: diff --git a/flytectl/RELEASE.md b/flytectl/RELEASE.md index 646f7465a6..414aa24199 100644 --- a/flytectl/RELEASE.md +++ b/flytectl/RELEASE.md @@ -2,4 +2,4 @@ Flytectl releases map to git tags with the prefix `flytectl/` followed by a semver string, e.g. [flytectl/v0.9.0](https://github.com/flyteorg/flyte/releases/tag/flytectl%2Fv0.9.0). -To release a new version of flytectl push a new git tag in the format described above. This will kick off a <[github workflow](https://github.com/flyteorg/flyte/blob/master/.github/workflows/flytectl-release.yml) responsible for releasing this new version. Note how the git tag has to be formatted a certain way for the workflow to run. +To release a new version of flytectl run the <[github workflow](https://github.com/flyteorg/flyte/blob/master/.github/workflows/flytectl-release.yml), which is responsible for releasing this new version. Remember to use valid semver versions, including adding the prefix `v`, e.g. `v1.2.3`. diff --git a/flyteidl/RELEASE.md b/flyteidl/RELEASE.md new file mode 100644 index 0000000000..eaaa4d51f6 --- /dev/null +++ b/flyteidl/RELEASE.md @@ -0,0 +1,4 @@ +# Release Process + +To release a new version of flyteidl run the <[github workflow](https://github.com/flyteorg/flyte/blob/master/.github/workflows/flyteidl-release.yml), which is responsible for releasing this new version. Remember to use valid semver versions, including adding the prefix `v`, e.g. `v1.2.3`. + From ee724b14ad027abea5a264a388bb4cead313a5f7 Mon Sep 17 00:00:00 2001 From: Christina <156356273+cratiu222@users.noreply.github.com> Date: Thu, 8 Aug 2024 22:42:44 +0300 Subject: [PATCH 14/65] docs: fix typo (#5643) * fix CHANGELOG-v0.2.0.md Signed-off-by: Christina <156356273+cratiu222@users.noreply.github.com> * fix CHANGELOG-v1.0.2-b1.md Signed-off-by: Christina <156356273+cratiu222@users.noreply.github.com> * fix CHANGELOG-v1.1.0.md Signed-off-by: Christina <156356273+cratiu222@users.noreply.github.com> * fix CHANGELOG-v1.3.0.md Signed-off-by: Christina <156356273+cratiu222@users.noreply.github.com> --------- Signed-off-by: Christina <156356273+cratiu222@users.noreply.github.com> Signed-off-by: Bugra Gedik --- CHANGELOG/CHANGELOG-v0.2.0.md | 2 +- CHANGELOG/CHANGELOG-v1.0.2-b1.md | 2 +- CHANGELOG/CHANGELOG-v1.1.0.md | 2 +- CHANGELOG/CHANGELOG-v1.3.0.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG/CHANGELOG-v0.2.0.md b/CHANGELOG/CHANGELOG-v0.2.0.md index 4c16bb0742..d3c85ff0fb 100644 --- a/CHANGELOG/CHANGELOG-v0.2.0.md +++ b/CHANGELOG/CHANGELOG-v0.2.0.md @@ -16,7 +16,7 @@ - RawOutputDirectories created in FlytePropeller - Improve visibility and observability - User/System error differentiation -- Optional interruptible tasks (lets use spot instances, reduce cost) +- Optional interruptible tasks (let's use spot instances, to reduce cost) - Caps on queue time for workflows - Multi cluster improvements - Visibility into execution cluster for the execution diff --git a/CHANGELOG/CHANGELOG-v1.0.2-b1.md b/CHANGELOG/CHANGELOG-v1.0.2-b1.md index 3ade2ab25e..ef216a0c25 100644 --- a/CHANGELOG/CHANGELOG-v1.0.2-b1.md +++ b/CHANGELOG/CHANGELOG-v1.0.2-b1.md @@ -5,7 +5,7 @@ 1. [Bugfix](https://github.com/flyteorg/flyte/issues/2444) With GRPC v1.46.0 non-ascii chars are not permitted in grpc metadata 1. [Housekeeping](https://github.com/flyteorg/flyte/issues/1698) Configure grpc_health_prob in admin 1. [Feature](https://github.com/flyteorg/flyte/issues/2329) In Flytectl use Launchplan with latest version for scheduled workflows -1. [Bugfix](https://github.com/flyteorg/flyte/issues/2262) Pods started before InjectFinalizer is disabled are never deleted +1. [Bugfix](https://github.com/flyteorg/flyte/issues/2262) Pods started before InjectFinalizer was disabled are never deleted 1. [Housekeeping](https://github.com/flyteorg/flyte/issues/2504) Checksum grpc_health_probe 1. [Feature](https://github.com/flyteorg/flyte/issues/2284) Allow to choose Spot Instances at workflow start time 1. [Feature](https://github.com/flyteorg/flyte/pull/2439) Use the same pod annotation formatting in syncresources cronjob diff --git a/CHANGELOG/CHANGELOG-v1.1.0.md b/CHANGELOG/CHANGELOG-v1.1.0.md index 9236270965..1cbad29584 100644 --- a/CHANGELOG/CHANGELOG-v1.1.0.md +++ b/CHANGELOG/CHANGELOG-v1.1.0.md @@ -17,7 +17,7 @@ Support for [Optional types](https://github.com/flyteorg/flyte/issues/2426). Wit ### Bug Fixes * [Propeller](https://github.com/flyteorg/flyte/issues/2298) calling finalize rather than abort -* [Propeller](https://github.com/flyteorg/flyte/issues/2404) correctly identify error when requesting a launch plan that does not exist. +* [Propeller](https://github.com/flyteorg/flyte/issues/2404) correctly identifies an error when requesting a launch plan that does not exist. * Better handle [execution CRDs](https://github.com/flyteorg/flyte/issues/2275) that don't exist in Admin. * [Fix panic](https://github.com/flyteorg/flyte/issues/2597) when creating additional label options. * Check [validity](https://github.com/flyteorg/flyte/issues/2601) of notifications. diff --git a/CHANGELOG/CHANGELOG-v1.3.0.md b/CHANGELOG/CHANGELOG-v1.3.0.md index c15224d4a6..0591f676ef 100644 --- a/CHANGELOG/CHANGELOG-v1.3.0.md +++ b/CHANGELOG/CHANGELOG-v1.3.0.md @@ -7,7 +7,7 @@ The main features of this 1.3 release are * Signaling/gate node support (human in the loop tasks) * User documentation support (backend and flytekit only, limited types) -The latter two are pending some work in Flyte console, they will be piped through fully by the end of Q1. Support for setting and approving gate nodes is supported in `FlyteRemote` however, though only a limited set of types can be passed in. +The latter two are pending some work in Flyte console, they will be piped through fully by the end of Q1. Support for setting and approving gate nodes is supported in `FlyteRemote` however, only a limited set of types can be passed in. ## Notes There are a couple things to point out with this release. From a9beb65c340088c2a6be92c6885bc2387470cbf4 Mon Sep 17 00:00:00 2001 From: "Thomas J. Fan" Date: Thu, 8 Aug 2024 15:53:22 -0400 Subject: [PATCH 15/65] Use enable_deck=True in docs (#5645) Signed-off-by: Bugra Gedik --- docs/core_use_cases/analytics.md | 2 +- docs/core_use_cases/machine_learning.md | 2 +- .../visualizing_task_input_and_output.md | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/core_use_cases/analytics.md b/docs/core_use_cases/analytics.md index 886b75618d..71b5530c03 100644 --- a/docs/core_use_cases/analytics.md +++ b/docs/core_use_cases/analytics.md @@ -48,7 +48,7 @@ of the map. In this case, we normalize the `people_vaccinated` by the `population` count of each country: ```{code-cell} ipython3 -@task(disable_deck=False) +@task(enable_deck=True) def plot(df: pd.DataFrame): """Render a Choropleth map.""" df["text"] = df["location"] + "
" + "Last updated on: " + df["date"] diff --git a/docs/core_use_cases/machine_learning.md b/docs/core_use_cases/machine_learning.md index 489b8b05f9..6368b0aa54 100644 --- a/docs/core_use_cases/machine_learning.md +++ b/docs/core_use_cases/machine_learning.md @@ -112,7 +112,7 @@ There are many ways to extend your workloads: {ref}`Kubeflow Pytorch` and {doc}`more <_tags/DistributedComputing>` to do distributed training. * - **🔎 Experiment Tracking** - Auto-capture training logs with the {py:func}`~flytekitplugins.mlflow.mlflow_autolog` - decorator, which can be viewed as Flyte Decks with `@task(disable_decks=False)`. + decorator, which can be viewed as Flyte Decks with `@task(enable_deck=True)`. * - **⏩ Inference Acceleration** - Serialize your models in ONNX format using the {ref}`ONNX plugin `, which supports ScikitLearn, TensorFlow, and PyTorch. diff --git a/docs/flyte_fundamentals/visualizing_task_input_and_output.md b/docs/flyte_fundamentals/visualizing_task_input_and_output.md index 487d1627c9..0390d6cf44 100644 --- a/docs/flyte_fundamentals/visualizing_task_input_and_output.md +++ b/docs/flyte_fundamentals/visualizing_task_input_and_output.md @@ -22,14 +22,14 @@ how to generate an HTML report from some Python object. ## Enabling Flyte decks -To enable Flyte decks, simply set `disable_deck=False` in the `@task` decorator: +To enable Flyte decks, simply set `enable_deck=True` in the `@task` decorator: ```{code-cell} ipython3 import pandas as pd from flytekit import task, workflow -@task(disable_deck=False) +@task(enable_deck=True) def iris_data() -> pd.DataFrame: ... ``` @@ -51,7 +51,7 @@ from typing import Optional from flytekit import task, workflow -@task(disable_deck=False) +@task(enable_deck=True) def iris_data( sample_frac: Optional[float] = None, random_state: Optional[int] = None, @@ -168,7 +168,7 @@ function. In the following example, we extend the `iris_data` task with: import flytekit from flytekitplugins.deck.renderer import MarkdownRenderer, BoxRenderer -@task(disable_deck=False) +@task(enable_deck=True) def iris_data( sample_frac: Optional[float] = None, random_state: Optional[int] = None, @@ -220,7 +220,7 @@ except ImportError: from typing_extensions import Annotated -@task(disable_deck=False) +@task(enable_deck=True) def iris_data( sample_frac: Optional[float] = None, random_state: Optional[int] = None, From 392632d30a6c65ea3309ec412d85bc5cd90072ba Mon Sep 17 00:00:00 2001 From: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> Date: Thu, 8 Aug 2024 14:44:11 -0700 Subject: [PATCH 16/65] Fix flyteidl release checkout all tags (#5646) * Fetch all tags in flyteidl-release.yml Signed-off-by: Eduardo Apolinario * Fix sed expression for npm job Signed-off-by: Eduardo Apolinario --------- Signed-off-by: Eduardo Apolinario Co-authored-by: Eduardo Apolinario Signed-off-by: Bugra Gedik --- .github/workflows/flyteidl-release.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/flyteidl-release.yml b/.github/workflows/flyteidl-release.yml index 2b19f7942d..c895beba4b 100644 --- a/.github/workflows/flyteidl-release.yml +++ b/.github/workflows/flyteidl-release.yml @@ -34,6 +34,8 @@ jobs: working-directory: flyteidl steps: - uses: actions/checkout@v4 + with: + fetch-depth: '0' - name: Set up Python uses: actions/setup-python@v1 with: @@ -64,8 +66,8 @@ jobs: registry-url: "https://registry.npmjs.org" - name: Set version in npm package run: | - # from refs/tags/v1.2.3 get 1.2.3 - VERSION=$(echo $GITHUB_REF | sed 's#.*/v##') + # v1.2.3 get 1.2.3 + VERSION=$(echo ${{ inputs.version }} | sed 's#.*v##') VERSION=$VERSION make update_npmversion shell: bash - run: | From ee4783e81fbff669b1419645b7ab20ce08e9fa02 Mon Sep 17 00:00:00 2001 From: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> Date: Thu, 8 Aug 2024 15:57:30 -0700 Subject: [PATCH 17/65] Install pyarrow in sandbox functional tests (#5647) Signed-off-by: Eduardo Apolinario Co-authored-by: Eduardo Apolinario Signed-off-by: Bugra Gedik --- .github/workflows/single-binary.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/single-binary.yml b/.github/workflows/single-binary.yml index 23d438b322..d4cb79f4d5 100644 --- a/.github/workflows/single-binary.yml +++ b/.github/workflows/single-binary.yml @@ -175,7 +175,7 @@ jobs: run: | python -m pip install --upgrade pip pip install uv - uv pip install --system flytekit flytekitplugins-deck-standard flytekitplugins-envd "numpy<2.0.0" + uv pip install --system flytekit flytekitplugins-deck-standard flytekitplugins-envd "numpy<2.0.0" pyarrow uv pip freeze - name: Checkout flytesnacks uses: actions/checkout@v4 From b21d674641cce0c44395b6a22bbda0a9003c0e6d Mon Sep 17 00:00:00 2001 From: desihsu <43691987+desihsu@users.noreply.github.com> Date: Fri, 9 Aug 2024 15:19:30 -0700 Subject: [PATCH 18/65] docs: add documentation for configuring notifications in GCP (#5545) * update Signed-off-by: Desi Hsu * dco Signed-off-by: Desi Hsu * dco Signed-off-by: Desi Hsu * typo Signed-off-by: Desi Hsu --------- Signed-off-by: Desi Hsu Signed-off-by: Bugra Gedik --- .../configuration/notifications.rst | 81 +++++++++++++++++-- 1 file changed, 73 insertions(+), 8 deletions(-) diff --git a/docs/deployment/configuration/notifications.rst b/docs/deployment/configuration/notifications.rst index 2e4a77ac53..fecad482fe 100644 --- a/docs/deployment/configuration/notifications.rst +++ b/docs/deployment/configuration/notifications.rst @@ -1,7 +1,8 @@ .. _deployment-configuration-notifications: +############# Notifications -------------- +############# .. tags:: Infrastructure, Advanced @@ -62,10 +63,10 @@ The ``notifications`` top-level portion of the FlyteAdmin config specifies how t As with schedules, the notifications handling is composed of two parts. One handles enqueuing notifications asynchronously and the second part handles processing pending notifications and actually firing off emails and alerts. -This is only supported for Flyte instances running on AWS. +This is only supported for Flyte instances running on AWS or GCP. -Config -======= +AWS Config +========== To publish notifications, you'll need to set up an `SNS topic `_. @@ -80,9 +81,7 @@ Let's look at the following config section and explain what each value represent .. code-block:: yaml notifications: - # Because AWS is the only cloud back-end supported for executing scheduled - # workflows in this case, only ``"aws"`` is a valid value. By default, the - #no-op executor is used. + # By default, the no-op executor is used. type: "aws" # This specifies which region AWS clients will use when creating SNS and SQS clients. @@ -126,10 +125,76 @@ into `code `__. .. rli:: https://raw.githubusercontent.com/flyteorg/flyteadmin/master/flyteadmin_config.yaml :caption: flyteadmin/flyteadmin_config.yaml :lines: 91-105 + +GCP Config +========== + +You'll need to set up a `Pub/Sub topic `__ to publish notifications to, +and a `Pub/Sub subscriber `__ to consume from that topic +and process notifications. The GCP service account used by FlyteAdmin must also have Pub/Sub publish and subscribe permissions. + +Email service +------------- + +In order to actually publish notifications, you'll need an account with an external email service which will be +used to send notification emails and alerts using email APIs. + +Currently, `SendGrid `__ is the only supported external email service, +and you will need to have a verified SendGrid sender. Create a SendGrid API key with ``Mail Send`` permissions +and save it to a file ``key``. + +Create a K8s secret in FlyteAdmin's cluster with that file: + +.. prompt:: bash $ + + kubectl create secret generic -n flyte --from-file key sendgrid-key + +Mount the secret by adding the following to the ``flyte-core`` values YAML: + +.. code-block:: yaml + + flyteadmin: + additionalVolumes: + - name: sendgrid-key + secret: + secretName: sendgrid-key + items: + - key: key + path: key + additionalVolumeMounts: + - name: sendgrid-key + mountPath: /sendgrid + +Config +------ + +In the ``flyte-core`` values YAML, the top-level ``notifications`` config should be +placed under ``workflow_notifications``. + +.. code-block:: yaml + + workflow_notifications: + enabled: true + config: + notifications: + type: gcp + gcp: + projectId: "{{ YOUR PROJECT ID }}" + publisher: + topicName: "{{ YOUR PUB/SUB TOPIC NAME }}" + processor: + queueName: "{{ YOUR PUB/SUB SUBSCRIBER NAME }}" + emailer: + emailServerConfig: + serviceName: sendgrid + apiKeyFilePath: /sendgrid/key + subject: "Flyte execution \"{{ name }}\" has {{ phase }} in \"{{ project }}\"." + sender: "{{ YOUR SENDGRID SENDER EMAIL }}" + body: View details at https://{{ YOUR FLYTE HOST }}/console/projects/{{ project }}/domains/{{ domain }}/executions/{{ name }} From aff319b223c21b160e4e6a72ac46b93dd5512098 Mon Sep 17 00:00:00 2001 From: ShengYu Date: Mon, 12 Aug 2024 14:12:02 +0800 Subject: [PATCH 19/65] Correct "sucessfile" to "successfile" (#5652) Signed-off-by: Bugra Gedik --- flytecopilot/cmd/sidecar_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flytecopilot/cmd/sidecar_test.go b/flytecopilot/cmd/sidecar_test.go index 6d261e2c48..a7cc1c964a 100644 --- a/flytecopilot/cmd/sidecar_test.go +++ b/flytecopilot/cmd/sidecar_test.go @@ -90,7 +90,7 @@ func TestUploadOptions_Upload(t *testing.T) { assert.NoError(t, ioutil.WriteFile(success, []byte("done"), os.ModePerm)) ok, err := containerwatcher.FileExists(success) assert.NoError(t, err) - assert.True(t, ok, "sucessfile not created") + assert.True(t, ok, "successfile not created") assert.NoError(t, uopts.Sidecar(ctx)) v, err := store.Head(ctx, "/output/errors.pb") assert.NoError(t, err) From 492952230f3ffcdaee4889ec8db84e92c7f29152 Mon Sep 17 00:00:00 2001 From: Katrina Rogan Date: Mon, 12 Aug 2024 18:10:16 +0200 Subject: [PATCH 20/65] Fix ordering for custom template values in cluster resource controller (#5648) Signed-off-by: Katrina Rogan Signed-off-by: Bugra Gedik --- flyteadmin/pkg/clusterresource/controller.go | 5 ++- .../pkg/clusterresource/controller_test.go | 32 +++++++++++++++++++ .../imagepullsecrets_templatized.yaml | 7 ++++ 3 files changed, 41 insertions(+), 3 deletions(-) create mode 100644 flyteadmin/pkg/clusterresource/testdata/imagepullsecrets_templatized.yaml diff --git a/flyteadmin/pkg/clusterresource/controller.go b/flyteadmin/pkg/clusterresource/controller.go index daad2600e8..6ea1731909 100644 --- a/flyteadmin/pkg/clusterresource/controller.go +++ b/flyteadmin/pkg/clusterresource/controller.go @@ -485,14 +485,13 @@ func (c *controller) createResourceFromTemplate(ctx context.Context, templateDir templateValues[fmt.Sprintf(templateVariableFormat, domainVariable)] = domain.Id var k8sManifest = string(template) - for templateKey, templateValue := range templateValues { + for templateKey, templateValue := range customTemplateValues { k8sManifest = strings.Replace(k8sManifest, templateKey, templateValue, replaceAllInstancesOfString) } // Replace remaining template variables from domain specific defaults. - for templateKey, templateValue := range customTemplateValues { + for templateKey, templateValue := range templateValues { k8sManifest = strings.Replace(k8sManifest, templateKey, templateValue, replaceAllInstancesOfString) } - return k8sManifest, nil } diff --git a/flyteadmin/pkg/clusterresource/controller_test.go b/flyteadmin/pkg/clusterresource/controller_test.go index dc3239cdc2..f6a966d6ef 100644 --- a/flyteadmin/pkg/clusterresource/controller_test.go +++ b/flyteadmin/pkg/clusterresource/controller_test.go @@ -293,6 +293,38 @@ kind: IAMServiceAccount metadata: name: my-project-dev-gsa namespace: my-project-dev +`, + wantErr: false, + }, + { + name: "test create resource from templatized imagepullsecrets.yaml", + args: args{ + ctx: context.Background(), + templateDir: "testdata", + templateFileName: "imagepullsecrets_templatized.yaml", + project: &admin.Project{ + Name: "my-project", + Id: "my-project", + }, + domain: &admin.Domain{ + Id: "dev", + Name: "dev", + }, + namespace: "my-project-dev", + templateValues: templateValuesType{ + "{{ imagePullSecretsName }}": "default", + }, + customTemplateValues: templateValuesType{ + "{{ imagePullSecretsName }}": "custom", + }, + }, + wantK8sManifest: `apiVersion: v1 +kind: ServiceAccount +metadata: + name: default + namespace: my-project-dev +imagePullSecrets: + - name: custom `, wantErr: false, }, diff --git a/flyteadmin/pkg/clusterresource/testdata/imagepullsecrets_templatized.yaml b/flyteadmin/pkg/clusterresource/testdata/imagepullsecrets_templatized.yaml new file mode 100644 index 0000000000..5c9d267382 --- /dev/null +++ b/flyteadmin/pkg/clusterresource/testdata/imagepullsecrets_templatized.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: default + namespace: {{ namespace }} +imagePullSecrets: + - name: {{ imagePullSecretsName }} From 5772d1f878506ecc2442801250c4fef81f2680bc Mon Sep 17 00:00:00 2001 From: Katrina Rogan Date: Mon, 12 Aug 2024 18:57:23 +0200 Subject: [PATCH 21/65] Don't error when attempting to trigger schedules for inactive projects (#5649) * Don't error when attempting to trigger schedules for inactive projects Signed-off-by: Katrina Rogan * regen Signed-off-by: Katrina Rogan --------- Signed-off-by: Katrina Rogan Signed-off-by: Bugra Gedik --- flyteadmin/pkg/errors/errors.go | 12 ++ flyteadmin/pkg/errors/errors_test.go | 12 ++ .../impl/validation/project_validator.go | 3 +- .../scheduler/executor/executor_impl.go | 19 +++ .../scheduler/executor/executor_impl_test.go | 12 ++ .../gen/pb-es/flyteidl/admin/project_pb.ts | 50 +++++++ .../gen/pb-go/flyteidl/admin/project.pb.go | 114 +++++++++++++--- flyteidl/gen/pb-js/flyteidl.d.ts | 58 ++++++++ flyteidl/gen/pb-js/flyteidl.js | 127 ++++++++++++++++++ .../pb_python/flyteidl/admin/project_pb2.py | 4 +- .../pb_python/flyteidl/admin/project_pb2.pyi | 8 ++ flyteidl/gen/pb_rust/flyteidl.admin.rs | 12 ++ flyteidl/protos/flyteidl/admin/project.proto | 12 ++ 13 files changed, 421 insertions(+), 22 deletions(-) diff --git a/flyteadmin/pkg/errors/errors.go b/flyteadmin/pkg/errors/errors.go index 51e5ede579..78727a7305 100644 --- a/flyteadmin/pkg/errors/errors.go +++ b/flyteadmin/pkg/errors/errors.go @@ -202,3 +202,15 @@ func IsDoesNotExistError(err error) bool { adminError, ok := err.(FlyteAdminError) return ok && adminError.Code() == codes.NotFound } + +func NewInactiveProjectError(ctx context.Context, id string) FlyteAdminError { + errMsg := fmt.Sprintf("project [%s] is not active", id) + statusErr, transformationErr := NewFlyteAdminError(codes.InvalidArgument, errMsg).WithDetails(&admin.InactiveProject{ + Id: id, + }) + if transformationErr != nil { + logger.Errorf(ctx, "failed to wrap grpc status in type 'Error': %v", transformationErr) + return NewFlyteAdminErrorf(codes.InvalidArgument, errMsg) + } + return statusErr +} diff --git a/flyteadmin/pkg/errors/errors_test.go b/flyteadmin/pkg/errors/errors_test.go index c126f96d6d..daaa060340 100644 --- a/flyteadmin/pkg/errors/errors_test.go +++ b/flyteadmin/pkg/errors/errors_test.go @@ -310,3 +310,15 @@ func TestIsNotDoesNotExistError(t *testing.T) { func TestIsNotDoesNotExistErrorBecauseOfNoneAdminError(t *testing.T) { assert.False(t, IsDoesNotExistError(errors.New("foo"))) } + +func TestNewInactiveProjectError(t *testing.T) { + err := NewInactiveProjectError(context.TODO(), identifier.GetProject()) + statusErr, ok := status.FromError(err) + + assert.True(t, ok) + + details, ok := statusErr.Details()[0].(*admin.InactiveProject) + + assert.True(t, ok) + assert.Equal(t, identifier.GetProject(), details.Id) +} diff --git a/flyteadmin/pkg/manager/impl/validation/project_validator.go b/flyteadmin/pkg/manager/impl/validation/project_validator.go index 8577c13e2b..8a76ce889d 100644 --- a/flyteadmin/pkg/manager/impl/validation/project_validator.go +++ b/flyteadmin/pkg/manager/impl/validation/project_validator.go @@ -71,8 +71,7 @@ func ValidateProjectAndDomain( projectID, domainID, err) } if *project.State != int32(admin.Project_ACTIVE) { - return errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "project [%s] is not active", projectID) + return errors.NewInactiveProjectError(ctx, projectID) } var validDomain bool domains := config.GetDomainsConfig() diff --git a/flyteadmin/scheduler/executor/executor_impl.go b/flyteadmin/scheduler/executor/executor_impl.go index 30ab7f0677..dffb98e1b6 100644 --- a/flyteadmin/scheduler/executor/executor_impl.go +++ b/flyteadmin/scheduler/executor/executor_impl.go @@ -114,6 +114,10 @@ func (w *executor) Execute(ctx context.Context, scheduledTime time.Time, s model }, func() error { _, execErr := w.adminServiceClient.CreateExecution(context.Background(), executionRequest) + if isInactiveProjectError(execErr) { + logger.Debugf(ctx, "project %+v is inactive, ignoring schedule create failure for %+v", s.Project, s) + return nil + } return execErr }, ) @@ -144,3 +148,18 @@ func getExecutorMetrics(scope promutils.Scope) executorMetrics { "count of successful attempts to fire execution for a schedules"), } } + +func isInactiveProjectError(err error) bool { + statusErr, ok := status.FromError(err) + if !ok { + return false + } + if len(statusErr.Details()) > 0 { + for _, detail := range statusErr.Details() { + if _, ok := detail.(*admin.InactiveProject); ok { + return true + } + } + } + return false +} diff --git a/flyteadmin/scheduler/executor/executor_impl_test.go b/flyteadmin/scheduler/executor/executor_impl_test.go index e864d68d79..fc75367ca9 100644 --- a/flyteadmin/scheduler/executor/executor_impl_test.go +++ b/flyteadmin/scheduler/executor/executor_impl_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "github.com/flyteorg/flyte/flyteadmin/pkg/errors" "github.com/flyteorg/flyte/flyteadmin/scheduler/repositories/models" @@ -98,3 +99,14 @@ func TestExecutorInactiveSchedule(t *testing.T) { err := executor.Execute(context.Background(), time.Now(), schedule) assert.Nil(t, err) } + +func TestIsInactiveProjectError(t *testing.T) { + statusErr := status.New(codes.InvalidArgument, "foo") + var transformationErr error + statusErr, transformationErr = statusErr.WithDetails(&admin.InactiveProject{ + Id: "project", + }) + assert.NoError(t, transformationErr) + + assert.True(t, isInactiveProjectError(statusErr.Err())) +} diff --git a/flyteidl/gen/pb-es/flyteidl/admin/project_pb.ts b/flyteidl/gen/pb-es/flyteidl/admin/project_pb.ts index 11f2726e08..a6fc913c03 100644 --- a/flyteidl/gen/pb-es/flyteidl/admin/project_pb.ts +++ b/flyteidl/gen/pb-es/flyteidl/admin/project_pb.ts @@ -540,3 +540,53 @@ export class ProjectGetRequest extends Message { } } +/** + * Error returned for inactive projects + * + * @generated from message flyteidl.admin.InactiveProject + */ +export class InactiveProject extends Message { + /** + * Indicates a unique project. + * +required + * + * @generated from field: string id = 1; + */ + id = ""; + + /** + * Optional, org key applied to the resource. + * + * @generated from field: string org = 2; + */ + org = ""; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "flyteidl.admin.InactiveProject"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "id", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 2, name: "org", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): InactiveProject { + return new InactiveProject().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): InactiveProject { + return new InactiveProject().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): InactiveProject { + return new InactiveProject().fromJsonString(jsonString, options); + } + + static equals(a: InactiveProject | PlainMessage | undefined, b: InactiveProject | PlainMessage | undefined): boolean { + return proto3.util.equals(InactiveProject, a, b); + } +} + diff --git a/flyteidl/gen/pb-go/flyteidl/admin/project.pb.go b/flyteidl/gen/pb-go/flyteidl/admin/project.pb.go index 243f46bf5d..d34451452b 100644 --- a/flyteidl/gen/pb-go/flyteidl/admin/project.pb.go +++ b/flyteidl/gen/pb-go/flyteidl/admin/project.pb.go @@ -661,6 +661,65 @@ func (x *ProjectGetRequest) GetOrg() string { return "" } +// Error returned for inactive projects +type InactiveProject struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates a unique project. + // +required + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Optional, org key applied to the resource. + Org string `protobuf:"bytes,2,opt,name=org,proto3" json:"org,omitempty"` +} + +func (x *InactiveProject) Reset() { + *x = InactiveProject{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl_admin_project_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InactiveProject) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InactiveProject) ProtoMessage() {} + +func (x *InactiveProject) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl_admin_project_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InactiveProject.ProtoReflect.Descriptor instead. +func (*InactiveProject) Descriptor() ([]byte, []int) { + return file_flyteidl_admin_project_proto_rawDescGZIP(), []int{10} +} + +func (x *InactiveProject) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *InactiveProject) GetOrg() string { + if x != nil { + return x.Org + } + return "" +} + var File_flyteidl_admin_project_proto protoreflect.FileDescriptor var file_flyteidl_admin_project_proto_rawDesc = []byte{ @@ -725,19 +784,23 @@ var file_flyteidl_admin_project_proto_rawDesc = []byte{ 0x73, 0x65, 0x22, 0x35, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x72, 0x67, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6f, 0x72, 0x67, 0x42, 0xb8, 0x01, 0x0a, 0x12, 0x63, 0x6f, - 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x42, 0x0c, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xa2, 0x02, 0x03, - 0x46, 0x41, 0x58, 0xaa, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0xca, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, - 0x41, 0x64, 0x6d, 0x69, 0x6e, 0xe2, 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0xea, 0x02, 0x0f, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6f, 0x72, 0x67, 0x22, 0x33, 0x0a, 0x0f, 0x49, 0x6e, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, + 0x6f, 0x72, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6f, 0x72, 0x67, 0x42, 0xb8, + 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x42, 0x0c, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, + 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0xa2, 0x02, 0x03, 0x46, 0x41, 0x58, 0xaa, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0xca, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0xe2, 0x02, 0x1a, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x47, 0x50, 0x42, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -753,7 +816,7 @@ func file_flyteidl_admin_project_proto_rawDescGZIP() []byte { } var file_flyteidl_admin_project_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_flyteidl_admin_project_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_flyteidl_admin_project_proto_msgTypes = make([]protoimpl.MessageInfo, 11) var file_flyteidl_admin_project_proto_goTypes = []interface{}{ (Project_ProjectState)(0), // 0: flyteidl.admin.Project.ProjectState (*GetDomainRequest)(nil), // 1: flyteidl.admin.GetDomainRequest @@ -766,16 +829,17 @@ var file_flyteidl_admin_project_proto_goTypes = []interface{}{ (*ProjectRegisterResponse)(nil), // 8: flyteidl.admin.ProjectRegisterResponse (*ProjectUpdateResponse)(nil), // 9: flyteidl.admin.ProjectUpdateResponse (*ProjectGetRequest)(nil), // 10: flyteidl.admin.ProjectGetRequest - (*Labels)(nil), // 11: flyteidl.admin.Labels - (*Sort)(nil), // 12: flyteidl.admin.Sort + (*InactiveProject)(nil), // 11: flyteidl.admin.InactiveProject + (*Labels)(nil), // 12: flyteidl.admin.Labels + (*Sort)(nil), // 13: flyteidl.admin.Sort } var file_flyteidl_admin_project_proto_depIdxs = []int32{ 2, // 0: flyteidl.admin.GetDomainsResponse.domains:type_name -> flyteidl.admin.Domain 2, // 1: flyteidl.admin.Project.domains:type_name -> flyteidl.admin.Domain - 11, // 2: flyteidl.admin.Project.labels:type_name -> flyteidl.admin.Labels + 12, // 2: flyteidl.admin.Project.labels:type_name -> flyteidl.admin.Labels 0, // 3: flyteidl.admin.Project.state:type_name -> flyteidl.admin.Project.ProjectState 4, // 4: flyteidl.admin.Projects.projects:type_name -> flyteidl.admin.Project - 12, // 5: flyteidl.admin.ProjectListRequest.sort_by:type_name -> flyteidl.admin.Sort + 13, // 5: flyteidl.admin.ProjectListRequest.sort_by:type_name -> flyteidl.admin.Sort 4, // 6: flyteidl.admin.ProjectRegisterRequest.project:type_name -> flyteidl.admin.Project 7, // [7:7] is the sub-list for method output_type 7, // [7:7] is the sub-list for method input_type @@ -911,6 +975,18 @@ func file_flyteidl_admin_project_proto_init() { return nil } } + file_flyteidl_admin_project_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InactiveProject); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -918,7 +994,7 @@ func file_flyteidl_admin_project_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_flyteidl_admin_project_proto_rawDesc, NumEnums: 1, - NumMessages: 10, + NumMessages: 11, NumExtensions: 0, NumServices: 0, }, diff --git a/flyteidl/gen/pb-js/flyteidl.d.ts b/flyteidl/gen/pb-js/flyteidl.d.ts index a1d9a34637..ada71f1f09 100644 --- a/flyteidl/gen/pb-js/flyteidl.d.ts +++ b/flyteidl/gen/pb-js/flyteidl.d.ts @@ -18184,6 +18184,64 @@ export namespace flyteidl { public static verify(message: { [k: string]: any }): (string|null); } + /** Properties of an InactiveProject. */ + interface IInactiveProject { + + /** InactiveProject id */ + id?: (string|null); + + /** InactiveProject org */ + org?: (string|null); + } + + /** Represents an InactiveProject. */ + class InactiveProject implements IInactiveProject { + + /** + * Constructs a new InactiveProject. + * @param [properties] Properties to set + */ + constructor(properties?: flyteidl.admin.IInactiveProject); + + /** InactiveProject id. */ + public id: string; + + /** InactiveProject org. */ + public org: string; + + /** + * Creates a new InactiveProject instance using the specified properties. + * @param [properties] Properties to set + * @returns InactiveProject instance + */ + public static create(properties?: flyteidl.admin.IInactiveProject): flyteidl.admin.InactiveProject; + + /** + * Encodes the specified InactiveProject message. Does not implicitly {@link flyteidl.admin.InactiveProject.verify|verify} messages. + * @param message InactiveProject message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: flyteidl.admin.IInactiveProject, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an InactiveProject message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns InactiveProject + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): flyteidl.admin.InactiveProject; + + /** + * Verifies an InactiveProject message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + } + /** Properties of a ProjectAttributes. */ interface IProjectAttributes { diff --git a/flyteidl/gen/pb-js/flyteidl.js b/flyteidl/gen/pb-js/flyteidl.js index 3402b1bdbb..8f446b4aba 100644 --- a/flyteidl/gen/pb-js/flyteidl.js +++ b/flyteidl/gen/pb-js/flyteidl.js @@ -43952,6 +43952,133 @@ return ProjectGetRequest; })(); + admin.InactiveProject = (function() { + + /** + * Properties of an InactiveProject. + * @memberof flyteidl.admin + * @interface IInactiveProject + * @property {string|null} [id] InactiveProject id + * @property {string|null} [org] InactiveProject org + */ + + /** + * Constructs a new InactiveProject. + * @memberof flyteidl.admin + * @classdesc Represents an InactiveProject. + * @implements IInactiveProject + * @constructor + * @param {flyteidl.admin.IInactiveProject=} [properties] Properties to set + */ + function InactiveProject(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * InactiveProject id. + * @member {string} id + * @memberof flyteidl.admin.InactiveProject + * @instance + */ + InactiveProject.prototype.id = ""; + + /** + * InactiveProject org. + * @member {string} org + * @memberof flyteidl.admin.InactiveProject + * @instance + */ + InactiveProject.prototype.org = ""; + + /** + * Creates a new InactiveProject instance using the specified properties. + * @function create + * @memberof flyteidl.admin.InactiveProject + * @static + * @param {flyteidl.admin.IInactiveProject=} [properties] Properties to set + * @returns {flyteidl.admin.InactiveProject} InactiveProject instance + */ + InactiveProject.create = function create(properties) { + return new InactiveProject(properties); + }; + + /** + * Encodes the specified InactiveProject message. Does not implicitly {@link flyteidl.admin.InactiveProject.verify|verify} messages. + * @function encode + * @memberof flyteidl.admin.InactiveProject + * @static + * @param {flyteidl.admin.IInactiveProject} message InactiveProject message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + InactiveProject.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.id != null && message.hasOwnProperty("id")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.id); + if (message.org != null && message.hasOwnProperty("org")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.org); + return writer; + }; + + /** + * Decodes an InactiveProject message from the specified reader or buffer. + * @function decode + * @memberof flyteidl.admin.InactiveProject + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {flyteidl.admin.InactiveProject} InactiveProject + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + InactiveProject.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.flyteidl.admin.InactiveProject(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.org = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Verifies an InactiveProject message. + * @function verify + * @memberof flyteidl.admin.InactiveProject + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + InactiveProject.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.id != null && message.hasOwnProperty("id")) + if (!$util.isString(message.id)) + return "id: string expected"; + if (message.org != null && message.hasOwnProperty("org")) + if (!$util.isString(message.org)) + return "org: string expected"; + return null; + }; + + return InactiveProject; + })(); + admin.ProjectAttributes = (function() { /** diff --git a/flyteidl/gen/pb_python/flyteidl/admin/project_pb2.py b/flyteidl/gen/pb_python/flyteidl/admin/project_pb2.py index 885ef84716..c04fdb67e1 100644 --- a/flyteidl/gen/pb_python/flyteidl/admin/project_pb2.py +++ b/flyteidl/gen/pb_python/flyteidl/admin/project_pb2.py @@ -14,7 +14,7 @@ from flyteidl.admin import common_pb2 as flyteidl_dot_admin_dot_common__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66lyteidl/admin/project.proto\x12\x0e\x66lyteidl.admin\x1a\x1b\x66lyteidl/admin/common.proto\"\x12\n\x10GetDomainRequest\",\n\x06\x44omain\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\"F\n\x12GetDomainsResponse\x12\x30\n\x07\x64omains\x18\x01 \x03(\x0b\x32\x16.flyteidl.admin.DomainR\x07\x64omains\"\xd4\x02\n\x07Project\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\x12\x30\n\x07\x64omains\x18\x03 \x03(\x0b\x32\x16.flyteidl.admin.DomainR\x07\x64omains\x12 \n\x0b\x64\x65scription\x18\x04 \x01(\tR\x0b\x64\x65scription\x12.\n\x06labels\x18\x05 \x01(\x0b\x32\x16.flyteidl.admin.LabelsR\x06labels\x12:\n\x05state\x18\x06 \x01(\x0e\x32$.flyteidl.admin.Project.ProjectStateR\x05state\x12\x10\n\x03org\x18\x07 \x01(\tR\x03org\"S\n\x0cProjectState\x12\n\n\x06\x41\x43TIVE\x10\x00\x12\x0c\n\x08\x41RCHIVED\x10\x01\x12\x14\n\x10SYSTEM_GENERATED\x10\x02\x12\x13\n\x0fSYSTEM_ARCHIVED\x10\x03\"U\n\x08Projects\x12\x33\n\x08projects\x18\x01 \x03(\x0b\x32\x17.flyteidl.admin.ProjectR\x08projects\x12\x14\n\x05token\x18\x02 \x01(\tR\x05token\"\x9b\x01\n\x12ProjectListRequest\x12\x14\n\x05limit\x18\x01 \x01(\rR\x05limit\x12\x14\n\x05token\x18\x02 \x01(\tR\x05token\x12\x18\n\x07\x66ilters\x18\x03 \x01(\tR\x07\x66ilters\x12-\n\x07sort_by\x18\x04 \x01(\x0b\x32\x14.flyteidl.admin.SortR\x06sortBy\x12\x10\n\x03org\x18\x05 \x01(\tR\x03org\"K\n\x16ProjectRegisterRequest\x12\x31\n\x07project\x18\x01 \x01(\x0b\x32\x17.flyteidl.admin.ProjectR\x07project\"\x19\n\x17ProjectRegisterResponse\"\x17\n\x15ProjectUpdateResponse\"5\n\x11ProjectGetRequest\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x10\n\x03org\x18\x02 \x01(\tR\x03orgB\xb8\x01\n\x12\x63om.flyteidl.adminB\x0cProjectProtoP\x01Z;github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin\xa2\x02\x03\x46\x41X\xaa\x02\x0e\x46lyteidl.Admin\xca\x02\x0e\x46lyteidl\\Admin\xe2\x02\x1a\x46lyteidl\\Admin\\GPBMetadata\xea\x02\x0f\x46lyteidl::Adminb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66lyteidl/admin/project.proto\x12\x0e\x66lyteidl.admin\x1a\x1b\x66lyteidl/admin/common.proto\"\x12\n\x10GetDomainRequest\",\n\x06\x44omain\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\"F\n\x12GetDomainsResponse\x12\x30\n\x07\x64omains\x18\x01 \x03(\x0b\x32\x16.flyteidl.admin.DomainR\x07\x64omains\"\xd4\x02\n\x07Project\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\x12\x30\n\x07\x64omains\x18\x03 \x03(\x0b\x32\x16.flyteidl.admin.DomainR\x07\x64omains\x12 \n\x0b\x64\x65scription\x18\x04 \x01(\tR\x0b\x64\x65scription\x12.\n\x06labels\x18\x05 \x01(\x0b\x32\x16.flyteidl.admin.LabelsR\x06labels\x12:\n\x05state\x18\x06 \x01(\x0e\x32$.flyteidl.admin.Project.ProjectStateR\x05state\x12\x10\n\x03org\x18\x07 \x01(\tR\x03org\"S\n\x0cProjectState\x12\n\n\x06\x41\x43TIVE\x10\x00\x12\x0c\n\x08\x41RCHIVED\x10\x01\x12\x14\n\x10SYSTEM_GENERATED\x10\x02\x12\x13\n\x0fSYSTEM_ARCHIVED\x10\x03\"U\n\x08Projects\x12\x33\n\x08projects\x18\x01 \x03(\x0b\x32\x17.flyteidl.admin.ProjectR\x08projects\x12\x14\n\x05token\x18\x02 \x01(\tR\x05token\"\x9b\x01\n\x12ProjectListRequest\x12\x14\n\x05limit\x18\x01 \x01(\rR\x05limit\x12\x14\n\x05token\x18\x02 \x01(\tR\x05token\x12\x18\n\x07\x66ilters\x18\x03 \x01(\tR\x07\x66ilters\x12-\n\x07sort_by\x18\x04 \x01(\x0b\x32\x14.flyteidl.admin.SortR\x06sortBy\x12\x10\n\x03org\x18\x05 \x01(\tR\x03org\"K\n\x16ProjectRegisterRequest\x12\x31\n\x07project\x18\x01 \x01(\x0b\x32\x17.flyteidl.admin.ProjectR\x07project\"\x19\n\x17ProjectRegisterResponse\"\x17\n\x15ProjectUpdateResponse\"5\n\x11ProjectGetRequest\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x10\n\x03org\x18\x02 \x01(\tR\x03org\"3\n\x0fInactiveProject\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x10\n\x03org\x18\x02 \x01(\tR\x03orgB\xb8\x01\n\x12\x63om.flyteidl.adminB\x0cProjectProtoP\x01Z;github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin\xa2\x02\x03\x46\x41X\xaa\x02\x0e\x46lyteidl.Admin\xca\x02\x0e\x46lyteidl\\Admin\xe2\x02\x1a\x46lyteidl\\Admin\\GPBMetadata\xea\x02\x0f\x46lyteidl::Adminb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -45,4 +45,6 @@ _globals['_PROJECTUPDATERESPONSE']._serialized_end=930 _globals['_PROJECTGETREQUEST']._serialized_start=932 _globals['_PROJECTGETREQUEST']._serialized_end=985 + _globals['_INACTIVEPROJECT']._serialized_start=987 + _globals['_INACTIVEPROJECT']._serialized_end=1038 # @@protoc_insertion_point(module_scope) diff --git a/flyteidl/gen/pb_python/flyteidl/admin/project_pb2.pyi b/flyteidl/gen/pb_python/flyteidl/admin/project_pb2.pyi index 12750a8959..c775c5aac8 100644 --- a/flyteidl/gen/pb_python/flyteidl/admin/project_pb2.pyi +++ b/flyteidl/gen/pb_python/flyteidl/admin/project_pb2.pyi @@ -96,3 +96,11 @@ class ProjectGetRequest(_message.Message): id: str org: str def __init__(self, id: _Optional[str] = ..., org: _Optional[str] = ...) -> None: ... + +class InactiveProject(_message.Message): + __slots__ = ["id", "org"] + ID_FIELD_NUMBER: _ClassVar[int] + ORG_FIELD_NUMBER: _ClassVar[int] + id: str + org: str + def __init__(self, id: _Optional[str] = ..., org: _Optional[str] = ...) -> None: ... diff --git a/flyteidl/gen/pb_rust/flyteidl.admin.rs b/flyteidl/gen/pb_rust/flyteidl.admin.rs index dcbf3b5df7..ca3270264b 100644 --- a/flyteidl/gen/pb_rust/flyteidl.admin.rs +++ b/flyteidl/gen/pb_rust/flyteidl.admin.rs @@ -2615,6 +2615,18 @@ pub struct ProjectGetRequest { #[prost(string, tag="2")] pub org: ::prost::alloc::string::String, } +/// Error returned for inactive projects +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InactiveProject { + /// Indicates a unique project. + /// +required + #[prost(string, tag="1")] + pub id: ::prost::alloc::string::String, + /// Optional, org key applied to the resource. + #[prost(string, tag="2")] + pub org: ::prost::alloc::string::String, +} /// Defines a set of custom matching attributes at the project level. /// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` #[allow(clippy::derive_partial_eq_without_eq)] diff --git a/flyteidl/protos/flyteidl/admin/project.proto b/flyteidl/protos/flyteidl/admin/project.proto index bbaccd70ff..8b994b7267 100644 --- a/flyteidl/protos/flyteidl/admin/project.proto +++ b/flyteidl/protos/flyteidl/admin/project.proto @@ -118,3 +118,15 @@ message ProjectGetRequest { // Optional, org key applied to the resource. string org = 2; } + + +// Error returned for inactive projects +message InactiveProject { + // Indicates a unique project. + // +required + string id = 1; + + // Optional, org key applied to the resource. + string org = 2; +} + From 2ddb4d28252d1f218a820b1d436e4a46effd3534 Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Thu, 15 Aug 2024 19:40:26 +0000 Subject: [PATCH 22/65] fix tests Signed-off-by: Bugra Gedik --- flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go b/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go index 7ea6c42be2..d657d4c273 100644 --- a/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go +++ b/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go @@ -853,7 +853,7 @@ func TestBuildResourcePodTemplate(t *testing.T) { assert.Equal(t, defaultConfig.DefaultEnvVars["foo"], findEnvVarByName(sparkApp.Spec.Driver.Env, "foo").Value) assert.Equal(t, defaultConfig.DefaultEnvVars["fooEnv"], findEnvVarByName(sparkApp.Spec.Driver.Env, "fooEnv").Value) assert.Equal(t, findEnvVarByName(dummyEnvVarsWithSecretRef, "SECRET"), findEnvVarByName(sparkApp.Spec.Driver.Env, "SECRET")) - assert.Equal(t, 9, len(sparkApp.Spec.Driver.Env)) + assert.Equal(t, 10, len(sparkApp.Spec.Driver.Env)) assert.Equal(t, testImage, *sparkApp.Spec.Driver.Image) assert.Equal(t, flytek8s.GetServiceAccountNameFromTaskExecutionMetadata(taskCtx.TaskExecutionMetadata()), *sparkApp.Spec.Driver.ServiceAccount) assert.Equal(t, defaultConfig.DefaultPodSecurityContext, sparkApp.Spec.Driver.SecurityContenxt) @@ -890,7 +890,7 @@ func TestBuildResourcePodTemplate(t *testing.T) { assert.Equal(t, defaultConfig.DefaultEnvVars["foo"], findEnvVarByName(sparkApp.Spec.Executor.Env, "foo").Value) assert.Equal(t, defaultConfig.DefaultEnvVars["fooEnv"], findEnvVarByName(sparkApp.Spec.Executor.Env, "fooEnv").Value) assert.Equal(t, findEnvVarByName(dummyEnvVarsWithSecretRef, "SECRET"), findEnvVarByName(sparkApp.Spec.Executor.Env, "SECRET")) - assert.Equal(t, 9, len(sparkApp.Spec.Executor.Env)) + assert.Equal(t, 10, len(sparkApp.Spec.Executor.Env)) assert.Equal(t, testImage, *sparkApp.Spec.Executor.Image) assert.Equal(t, defaultConfig.DefaultPodSecurityContext, sparkApp.Spec.Executor.SecurityContenxt) assert.Equal(t, defaultConfig.DefaultPodDNSConfig, sparkApp.Spec.Executor.DNSConfig) From 1b24ca2c73cc9932bdc37a5da150517759c3d17c Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Tue, 20 Aug 2024 00:20:50 +0000 Subject: [PATCH 23/65] change to shorter names Signed-off-by: Bugra Gedik --- .../go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go index b0025fdddf..a280106ff4 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go @@ -61,7 +61,7 @@ func GetExecutionEnvVars(id pluginsCore.TaskExecutionID, consoleURL string) []v1 Value: nodeExecutionID.Domain, }, { - Name: "FLYTE_INTERNAL_POD_NAME", + Name: "_F_PN", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ FieldPath: "metadata.name", From ede753656ca15adb26e6224dde437e978fcd3d53 Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Tue, 20 Aug 2024 00:47:17 +0000 Subject: [PATCH 24/65] change to shorter names Signed-off-by: Bugra Gedik --- .../pluginmachinery/flytek8s/k8s_resource_adds.go | 1 + flytestdlib/storage/storage.go | 12 ++++++++++-- flytestdlib/storage/stow_store.go | 14 ++++++++++++++ 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go index a280106ff4..5d145123de 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go @@ -61,6 +61,7 @@ func GetExecutionEnvVars(id pluginsCore.TaskExecutionID, consoleURL string) []v1 Value: nodeExecutionID.Domain, }, { + # FLYTE_INTERNAL_POD_NAME Name: "_F_PN", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ diff --git a/flytestdlib/storage/storage.go b/flytestdlib/storage/storage.go index 3e84cb7acb..3706d97b8c 100644 --- a/flytestdlib/storage/storage.go +++ b/flytestdlib/storage/storage.go @@ -40,8 +40,13 @@ type Metadata interface { ContentMD5() string } -// DataStore is a simplified interface for accessing and storing data in one of the Cloud stores. -// Today we rely on Stow for multi-cloud support, but this interface abstracts that part +type Cursor interface { + IsStartCursor() bool + IsEndCursor() bool + MoveToStart() + MoveToEnd() +} + type DataStore struct { ComposedProtobufStore ReferenceConstructor @@ -78,6 +83,9 @@ type RawStore interface { // Head gets metadata about the reference. This should generally be a light weight operation. Head(ctx context.Context, reference DataReference) (Metadata, error) + // List gets a list of items given a prefix, using a paginated API + List(ctx context.Context, reference DataReference, maxItems int, cursor Cursor) ([]Metadata, Cursor, error) + // ReadRaw retrieves a byte array from the Blob store or an error ReadRaw(ctx context.Context, reference DataReference) (io.ReadCloser, error) diff --git a/flytestdlib/storage/stow_store.go b/flytestdlib/storage/stow_store.go index ce4a75a0a1..1507c24bd6 100644 --- a/flytestdlib/storage/stow_store.go +++ b/flytestdlib/storage/stow_store.go @@ -126,6 +126,16 @@ func (s StowMetadata) ContentMD5() string { return s.contentMD5 } +type StowCursor struct { + value string +} + +func (s StowCursor) IsStartCursor() bool + +IsEndCursor() bool +MoveToStart() +MoveToEnd() + // Implements DataStore to talk to stow location store. type StowStore struct { copyImpl @@ -251,6 +261,10 @@ func (s *StowStore) Head(ctx context.Context, reference DataReference) (Metadata return StowMetadata{exists: false}, errs.Wrapf(err, "path:%v", k) } +func (s *StowStore) List(ctx context.Context, reference DataReference, maxItems int, cursor Cursor) ([]Metadata, Cursor, error) { + // TODO +} + func (s *StowStore) ReadRaw(ctx context.Context, reference DataReference) (io.ReadCloser, error) { _, c, k, err := reference.Split() if err != nil { From 7a146490f3e2f122cc0cc2e1e8961c2dd43418d9 Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Tue, 20 Aug 2024 05:21:04 +0000 Subject: [PATCH 25/65] change to shorter names Signed-off-by: Bugra Gedik --- flytestdlib/storage/storage.go | 10 ---------- flytestdlib/storage/stow_store.go | 10 ---------- 2 files changed, 20 deletions(-) diff --git a/flytestdlib/storage/storage.go b/flytestdlib/storage/storage.go index 3706d97b8c..f80dd49ab7 100644 --- a/flytestdlib/storage/storage.go +++ b/flytestdlib/storage/storage.go @@ -40,13 +40,6 @@ type Metadata interface { ContentMD5() string } -type Cursor interface { - IsStartCursor() bool - IsEndCursor() bool - MoveToStart() - MoveToEnd() -} - type DataStore struct { ComposedProtobufStore ReferenceConstructor @@ -83,9 +76,6 @@ type RawStore interface { // Head gets metadata about the reference. This should generally be a light weight operation. Head(ctx context.Context, reference DataReference) (Metadata, error) - // List gets a list of items given a prefix, using a paginated API - List(ctx context.Context, reference DataReference, maxItems int, cursor Cursor) ([]Metadata, Cursor, error) - // ReadRaw retrieves a byte array from the Blob store or an error ReadRaw(ctx context.Context, reference DataReference) (io.ReadCloser, error) diff --git a/flytestdlib/storage/stow_store.go b/flytestdlib/storage/stow_store.go index 1507c24bd6..219a566556 100644 --- a/flytestdlib/storage/stow_store.go +++ b/flytestdlib/storage/stow_store.go @@ -126,16 +126,6 @@ func (s StowMetadata) ContentMD5() string { return s.contentMD5 } -type StowCursor struct { - value string -} - -func (s StowCursor) IsStartCursor() bool - -IsEndCursor() bool -MoveToStart() -MoveToEnd() - // Implements DataStore to talk to stow location store. type StowStore struct { copyImpl From 783d75eae1968542baa2c00133d745177bc7863e Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Tue, 20 Aug 2024 05:21:50 +0000 Subject: [PATCH 26/65] change to shorter names Signed-off-by: Bugra Gedik --- flytestdlib/storage/storage.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/flytestdlib/storage/storage.go b/flytestdlib/storage/storage.go index f80dd49ab7..3e84cb7acb 100644 --- a/flytestdlib/storage/storage.go +++ b/flytestdlib/storage/storage.go @@ -40,6 +40,8 @@ type Metadata interface { ContentMD5() string } +// DataStore is a simplified interface for accessing and storing data in one of the Cloud stores. +// Today we rely on Stow for multi-cloud support, but this interface abstracts that part type DataStore struct { ComposedProtobufStore ReferenceConstructor From c89950fe9fcb8ef791ef277ae92e02c99b4ac013 Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Tue, 20 Aug 2024 05:22:16 +0000 Subject: [PATCH 27/65] change to shorter names Signed-off-by: Bugra Gedik --- flytestdlib/storage/stow_store.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/flytestdlib/storage/stow_store.go b/flytestdlib/storage/stow_store.go index 219a566556..ce4a75a0a1 100644 --- a/flytestdlib/storage/stow_store.go +++ b/flytestdlib/storage/stow_store.go @@ -251,10 +251,6 @@ func (s *StowStore) Head(ctx context.Context, reference DataReference) (Metadata return StowMetadata{exists: false}, errs.Wrapf(err, "path:%v", k) } -func (s *StowStore) List(ctx context.Context, reference DataReference, maxItems int, cursor Cursor) ([]Metadata, Cursor, error) { - // TODO -} - func (s *StowStore) ReadRaw(ctx context.Context, reference DataReference) (io.ReadCloser, error) { _, c, k, err := reference.Split() if err != nil { From 8215edc7f93e023ea7a8a5d37349b4247eb675ec Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Tue, 20 Aug 2024 07:33:16 +0000 Subject: [PATCH 28/65] Add list operation to stow container Signed-off-by: Bugra Gedik --- flyteadmin/pkg/common/mocks/storage.go | 4 + flytepropeller/pkg/utils/failing_datastore.go | 4 + flytestdlib/storage/cached_rawstore_test.go | 4 + flytestdlib/storage/mem_store.go | 4 + flytestdlib/storage/storage.go | 37 +++++++ flytestdlib/storage/stow_store.go | 46 +++++++++ flytestdlib/storage/stow_store_test.go | 96 ++++++++++++++++++- 7 files changed, 193 insertions(+), 2 deletions(-) diff --git a/flyteadmin/pkg/common/mocks/storage.go b/flyteadmin/pkg/common/mocks/storage.go index 7e91bf0485..3af2507d1e 100644 --- a/flyteadmin/pkg/common/mocks/storage.go +++ b/flyteadmin/pkg/common/mocks/storage.go @@ -33,6 +33,10 @@ func (t *TestDataStore) Head(ctx context.Context, reference storage.DataReferenc return t.HeadCb(ctx, reference) } +func (s *TestDataStore) List(ctx context.Context, reference storage.DataReference, maxItems int, cursor storage.Cursor) ([]storage.DataReference, storage.Cursor, error) { + return nil, storage.NewCursorAtEnd(), fmt.Errorf("Not implemented yet") +} + func (t *TestDataStore) ReadProtobuf(ctx context.Context, reference storage.DataReference, msg proto.Message) error { return t.ReadProtobufCb(ctx, reference, msg) } diff --git a/flytepropeller/pkg/utils/failing_datastore.go b/flytepropeller/pkg/utils/failing_datastore.go index f3b65471c7..b5cf9ecdcb 100644 --- a/flytepropeller/pkg/utils/failing_datastore.go +++ b/flytepropeller/pkg/utils/failing_datastore.go @@ -27,6 +27,10 @@ func (FailingRawStore) Head(ctx context.Context, reference storage.DataReference return nil, fmt.Errorf("failed metadata fetch") } +func (s *FailingRawStore) List(ctx context.Context, reference storage.DataReference, maxItems int, cursor storage.Cursor) ([]storage.DataReference, storage.Cursor, error) { + return nil, storage.NewCursorAtEnd(), fmt.Errorf("Not implemented yet") +} + func (FailingRawStore) ReadRaw(ctx context.Context, reference storage.DataReference) (io.ReadCloser, error) { return nil, fmt.Errorf("failed read raw") } diff --git a/flytestdlib/storage/cached_rawstore_test.go b/flytestdlib/storage/cached_rawstore_test.go index b9751d7fa1..0e7facb0f6 100644 --- a/flytestdlib/storage/cached_rawstore_test.go +++ b/flytestdlib/storage/cached_rawstore_test.go @@ -73,6 +73,10 @@ func (d *dummyStore) Head(ctx context.Context, reference DataReference) (Metadat return d.HeadCb(ctx, reference) } +func (s *dummyStore) List(ctx context.Context, reference DataReference, maxItems int, cursor Cursor) ([]DataReference, Cursor, error) { + return nil, NewCursorAtEnd(), fmt.Errorf("Not implemented yet") +} + func (d *dummyStore) ReadRaw(ctx context.Context, reference DataReference) (io.ReadCloser, error) { return d.ReadRawCb(ctx, reference) } diff --git a/flytestdlib/storage/mem_store.go b/flytestdlib/storage/mem_store.go index a95a0a49ca..94083f6646 100644 --- a/flytestdlib/storage/mem_store.go +++ b/flytestdlib/storage/mem_store.go @@ -54,6 +54,10 @@ func (s *InMemoryStore) Head(ctx context.Context, reference DataReference) (Meta }, nil } +func (s *InMemoryStore) List(ctx context.Context, reference DataReference, maxItems int, cursor Cursor) ([]DataReference, Cursor, error) { + return nil, NewCursorAtEnd(), fmt.Errorf("Not implemented yet") +} + func (s *InMemoryStore) ReadRaw(ctx context.Context, reference DataReference) (io.ReadCloser, error) { if raw, found := s.cache[reference]; found { return ioutil.NopCloser(bytes.NewReader(raw)), nil diff --git a/flytestdlib/storage/storage.go b/flytestdlib/storage/storage.go index 3e84cb7acb..d138d2881f 100644 --- a/flytestdlib/storage/storage.go +++ b/flytestdlib/storage/storage.go @@ -40,6 +40,40 @@ type Metadata interface { ContentMD5() string } +type CursorPosition int + +const ( + StartCursorPos CursorPosition = 0 + EndCursorPos CursorPosition = 1 + CustomCursorPos CursorPosition = 2 +) + +type Cursor struct { + cursorPos CursorPosition + customState string +} + +func NewCursorFromCustomState(customState string) Cursor { + return Cursor{ + cursorPos: CustomCursorPos, + customState: customState, + } +} + +func NewCursorAtStart() Cursor { + return Cursor{ + cursorPos: StartCursorPos, + customState: "", + } +} + +func NewCursorAtEnd() Cursor { + return Cursor{ + cursorPos: EndCursorPos, + customState: "", + } +} + // DataStore is a simplified interface for accessing and storing data in one of the Cloud stores. // Today we rely on Stow for multi-cloud support, but this interface abstracts that part type DataStore struct { @@ -78,6 +112,9 @@ type RawStore interface { // Head gets metadata about the reference. This should generally be a light weight operation. Head(ctx context.Context, reference DataReference) (Metadata, error) + // List gets a list of items given a prefix, using a paginated API + List(ctx context.Context, reference DataReference, maxItems int, cursor Cursor) ([]DataReference, Cursor, error) + // ReadRaw retrieves a byte array from the Blob store or an error ReadRaw(ctx context.Context, reference DataReference) (io.ReadCloser, error) diff --git a/flytestdlib/storage/stow_store.go b/flytestdlib/storage/stow_store.go index ce4a75a0a1..8172018b4a 100644 --- a/flytestdlib/storage/stow_store.go +++ b/flytestdlib/storage/stow_store.go @@ -92,6 +92,9 @@ type stowMetrics struct { HeadFailure labeled.Counter HeadLatency labeled.StopWatch + ListFailure labeled.Counter + ListLatency labeled.StopWatch + ReadFailure labeled.Counter ReadOpenLatency labeled.StopWatch @@ -251,6 +254,46 @@ func (s *StowStore) Head(ctx context.Context, reference DataReference) (Metadata return StowMetadata{exists: false}, errs.Wrapf(err, "path:%v", k) } +func (s *StowStore) List(ctx context.Context, reference DataReference, maxItems int, cursor Cursor) ([]DataReference, Cursor, error) { + _, c, k, err := reference.Split() + if err != nil { + s.metrics.BadReference.Inc(ctx) + return nil, NewCursorAtEnd(), err + } + + container, err := s.getContainer(ctx, locationIDMain, c) + if err != nil { + return nil, NewCursorAtEnd(), err + } + + t := s.metrics.ListLatency.Start(ctx) + var stowCursor string + if cursor.cursorPos == StartCursorPos { + stowCursor = stow.CursorStart + } else if cursor.cursorPos == EndCursorPos { + return nil, NewCursorAtEnd(), fmt.Errorf("Cursor cannot be at end for the List call") + } else { + stowCursor = cursor.customState + } + items, stowCursor, err := container.Items(k, stowCursor, maxItems) + if err == nil { + results := make([]DataReference, len(items)) + for index, item := range items { + results[index] = DataReference(item.URL().String()) + } + if stow.IsCursorEnd(stowCursor) { + cursor = NewCursorAtEnd() + } else { + cursor = NewCursorFromCustomState(stowCursor) + } + t.Stop() + return results, cursor, nil + } + + incFailureCounterForError(ctx, s.metrics.ListFailure, err) + return nil, NewCursorAtEnd(), errs.Wrapf(err, "path:%v", k) +} + func (s *StowStore) ReadRaw(ctx context.Context, reference DataReference) (io.ReadCloser, error) { _, c, k, err := reference.Split() if err != nil { @@ -434,6 +477,9 @@ func newStowMetrics(scope promutils.Scope) *stowMetrics { HeadFailure: labeled.NewCounter("head_failure", "Indicates failure in HEAD for a given reference", scope, labeled.EmitUnlabeledMetric), HeadLatency: labeled.NewStopWatch("head", "Indicates time to fetch metadata using the Head API", time.Millisecond, scope, labeled.EmitUnlabeledMetric), + ListFailure: labeled.NewCounter("list_failure", "Indicates failure in item listing for a given reference", scope, labeled.EmitUnlabeledMetric), + ListLatency: labeled.NewStopWatch("list", "Indicates time to fetch item listing using the List API", time.Millisecond, scope, labeled.EmitUnlabeledMetric), + ReadFailure: labeled.NewCounter("read_failure", "Indicates failure in GET for a given reference", scope, labeled.EmitUnlabeledMetric, failureTypeOption), ReadOpenLatency: labeled.NewStopWatch("read_open", "Indicates time to first byte when reading", time.Millisecond, scope, labeled.EmitUnlabeledMetric), diff --git a/flytestdlib/storage/stow_store_test.go b/flytestdlib/storage/stow_store_test.go index 99678eb8ad..3223774184 100644 --- a/flytestdlib/storage/stow_store_test.go +++ b/flytestdlib/storage/stow_store_test.go @@ -10,6 +10,8 @@ import ( "net/url" "os" "path/filepath" + "sort" + "strconv" "testing" "time" @@ -73,8 +75,37 @@ func (m mockStowContainer) Item(id string) (stow.Item, error) { return nil, stow.ErrNotFound } -func (mockStowContainer) Items(prefix, cursor string, count int) ([]stow.Item, string, error) { - return []stow.Item{}, "", nil +func (m mockStowContainer) Items(prefix, cursor string, count int) ([]stow.Item, string, error) { + startIndex := 0 + if cursor != "" { + index, err := strconv.Atoi(cursor) + if err != nil { + return nil, "", fmt.Errorf("Invalid cursor '%s'", cursor) + } + startIndex = index + } + endIndexExc := min(len(m.items), startIndex+count) + + itemKeys := make([]string, len(m.items)) + index := 0 + for key := range m.items { + itemKeys[index] = key + index += 1 + } + sort.Strings(itemKeys) + + numItems := endIndexExc - startIndex + results := make([]stow.Item, numItems) + for index, itemKey := range itemKeys[startIndex:endIndexExc] { + results[index] = m.items[itemKey] + } + + if endIndexExc == len(m.items) { + cursor = "" + } else { + cursor = fmt.Sprintf("%d", endIndexExc) + } + return results, cursor, nil } func (m mockStowContainer) RemoveItem(id string) error { @@ -361,6 +392,67 @@ func TestStowStore_ReadRaw(t *testing.T) { }) } +func TestStowStore_List(t *testing.T) { + const container = "container" + t.Run("Listing", func(t *testing.T) { + ctx := context.Background() + fn := fQNFn["s3"] + s, err := NewStowRawStore(fn(container), &mockStowLoc{ + ContainerCb: func(id string) (stow.Container, error) { + if id == container { + return newMockStowContainer(container), nil + } + return nil, fmt.Errorf("container is not supported") + }, + CreateContainerCb: func(name string) (stow.Container, error) { + if name == container { + return newMockStowContainer(container), nil + } + return nil, fmt.Errorf("container is not supported") + }, + }, nil, false, metrics) + assert.NoError(t, err) + writeTestFile(ctx, t, s, "s3://container/a/1") + writeTestFile(ctx, t, s, "s3://container/a/2") + var maxResults = 10 + var dataReference DataReference = "s3://container/a" + items, cursor, err := s.List(ctx, dataReference, maxResults, NewCursorAtStart()) + assert.NoError(t, err) + assert.Equal(t, NewCursorAtEnd(), cursor) + assert.Equal(t, []DataReference{"a/1", "a/2"}, items) + }) + + t.Run("Listing with pagination", func(t *testing.T) { + ctx := context.Background() + fn := fQNFn["s3"] + s, err := NewStowRawStore(fn(container), &mockStowLoc{ + ContainerCb: func(id string) (stow.Container, error) { + if id == container { + return newMockStowContainer(container), nil + } + return nil, fmt.Errorf("container is not supported") + }, + CreateContainerCb: func(name string) (stow.Container, error) { + if name == container { + return newMockStowContainer(container), nil + } + return nil, fmt.Errorf("container is not supported") + }, + }, nil, false, metrics) + assert.NoError(t, err) + writeTestFile(ctx, t, s, "s3://container/a/1") + writeTestFile(ctx, t, s, "s3://container/a/2") + var maxResults = 1 + var dataReference DataReference = "s3://container/a" + items, cursor, err := s.List(ctx, dataReference, maxResults, NewCursorAtStart()) + assert.NoError(t, err) + assert.Equal(t, []DataReference{"a/1"}, items) + items, cursor, err = s.List(ctx, dataReference, maxResults, cursor) + assert.NoError(t, err) + assert.Equal(t, []DataReference{"a/2"}, items) + }) +} + func TestNewLocalStore(t *testing.T) { labeled.SetMetricKeys(contextutils.ProjectKey, contextutils.DomainKey, contextutils.WorkflowIDKey, contextutils.TaskIDKey) t.Run("Valid config", func(t *testing.T) { From a4f87b416496649fc63cc40494399966cde4108d Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Tue, 20 Aug 2024 19:19:30 +0000 Subject: [PATCH 29/65] Minor fix --- .../go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go index 5d145123de..b77615120a 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go @@ -61,7 +61,7 @@ func GetExecutionEnvVars(id pluginsCore.TaskExecutionID, consoleURL string) []v1 Value: nodeExecutionID.Domain, }, { - # FLYTE_INTERNAL_POD_NAME + // FLYTE_INTERNAL_POD_NAME Name: "_F_PN", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ From e333bb69da5c70a051ebf9f46bae8f2cc5f058da Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Tue, 10 Sep 2024 22:43:16 +0000 Subject: [PATCH 30/65] renamings --- flytestdlib/storage/storage.go | 31 ++++++++++++++++--------------- flytestdlib/storage/stow_store.go | 8 ++++---- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/flytestdlib/storage/storage.go b/flytestdlib/storage/storage.go index d138d2881f..52e6905513 100644 --- a/flytestdlib/storage/storage.go +++ b/flytestdlib/storage/storage.go @@ -40,37 +40,38 @@ type Metadata interface { ContentMD5() string } -type CursorPosition int +type CursorState int const ( - StartCursorPos CursorPosition = 0 - EndCursorPos CursorPosition = 1 - CustomCursorPos CursorPosition = 2 + // Enum representing state of the cursor + AtStartCursorState CursorState = 0 + AtEndCursorState CursorState = 1 + AtCustomPosCursorState CursorState = 2 ) type Cursor struct { - cursorPos CursorPosition - customState string + cursorState CursorState + customPosition string } -func NewCursorFromCustomState(customState string) Cursor { +func NewCursorAtStart() Cursor { return Cursor{ - cursorPos: CustomCursorPos, - customState: customState, + cursorState: AtStartCursorState, + customPosition: "", } } -func NewCursorAtStart() Cursor { +func NewCursorAtEnd() Cursor { return Cursor{ - cursorPos: StartCursorPos, - customState: "", + cursorState: AtEndCursorState, + customPosition: "", } } -func NewCursorAtEnd() Cursor { +func NewCursorFromCustomPosition(customPosition string) Cursor { return Cursor{ - cursorPos: EndCursorPos, - customState: "", + cursorState: AtCustomPosCursorState, + customPosition: customPosition, } } diff --git a/flytestdlib/storage/stow_store.go b/flytestdlib/storage/stow_store.go index 8172018b4a..6b731b9c86 100644 --- a/flytestdlib/storage/stow_store.go +++ b/flytestdlib/storage/stow_store.go @@ -268,12 +268,12 @@ func (s *StowStore) List(ctx context.Context, reference DataReference, maxItems t := s.metrics.ListLatency.Start(ctx) var stowCursor string - if cursor.cursorPos == StartCursorPos { + if cursor.cursorState == AtStartCursorState { stowCursor = stow.CursorStart - } else if cursor.cursorPos == EndCursorPos { + } else if cursor.cursorState == AtEndCursorState { return nil, NewCursorAtEnd(), fmt.Errorf("Cursor cannot be at end for the List call") } else { - stowCursor = cursor.customState + stowCursor = cursor.customPosition } items, stowCursor, err := container.Items(k, stowCursor, maxItems) if err == nil { @@ -284,7 +284,7 @@ func (s *StowStore) List(ctx context.Context, reference DataReference, maxItems if stow.IsCursorEnd(stowCursor) { cursor = NewCursorAtEnd() } else { - cursor = NewCursorFromCustomState(stowCursor) + cursor = NewCursorFromCustomPosition(stowCursor) } t.Stop() return results, cursor, nil From 786903c2b27526112ebe2801ea26320f90381abd Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Wed, 11 Sep 2024 23:44:37 +0000 Subject: [PATCH 31/65] renamings --- flyteadmin/pkg/common/mocks/storage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flyteadmin/pkg/common/mocks/storage.go b/flyteadmin/pkg/common/mocks/storage.go index 3af2507d1e..bf29eedd3e 100644 --- a/flyteadmin/pkg/common/mocks/storage.go +++ b/flyteadmin/pkg/common/mocks/storage.go @@ -33,7 +33,7 @@ func (t *TestDataStore) Head(ctx context.Context, reference storage.DataReferenc return t.HeadCb(ctx, reference) } -func (s *TestDataStore) List(ctx context.Context, reference storage.DataReference, maxItems int, cursor storage.Cursor) ([]storage.DataReference, storage.Cursor, error) { +func (t *TestDataStore) List(ctx context.Context, reference storage.DataReference, maxItems int, cursor storage.Cursor) ([]storage.DataReference, storage.Cursor, error) { return nil, storage.NewCursorAtEnd(), fmt.Errorf("Not implemented yet") } From acab62a1e709ff14ff7f09dc46504e38e48b64f5 Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Thu, 12 Sep 2024 04:44:32 +0000 Subject: [PATCH 32/65] mockery --- .../storage/mocks/composed_protobuf_store.go | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/flytestdlib/storage/mocks/composed_protobuf_store.go b/flytestdlib/storage/mocks/composed_protobuf_store.go index c9064c2ac5..49a0ee89dd 100644 --- a/flytestdlib/storage/mocks/composed_protobuf_store.go +++ b/flytestdlib/storage/mocks/composed_protobuf_store.go @@ -194,6 +194,54 @@ func (_m *ComposedProtobufStore) Head(ctx context.Context, reference storage.Dat return r0, r1 } +type ComposedProtobufStore_List struct { + *mock.Call +} + +func (_m ComposedProtobufStore_List) Return(_a0 []storage.DataReference, _a1 storage.Cursor, _a2 error) *ComposedProtobufStore_List { + return &ComposedProtobufStore_List{Call: _m.Call.Return(_a0, _a1, _a2)} +} + +func (_m *ComposedProtobufStore) OnList(ctx context.Context, reference storage.DataReference, maxItems int, cursor storage.Cursor) *ComposedProtobufStore_List { + c_call := _m.On("List", ctx, reference, maxItems, cursor) + return &ComposedProtobufStore_List{Call: c_call} +} + +func (_m *ComposedProtobufStore) OnListMatch(matchers ...interface{}) *ComposedProtobufStore_List { + c_call := _m.On("List", matchers...) + return &ComposedProtobufStore_List{Call: c_call} +} + +// List provides a mock function with given fields: ctx, reference, maxItems, cursor +func (_m *ComposedProtobufStore) List(ctx context.Context, reference storage.DataReference, maxItems int, cursor storage.Cursor) ([]storage.DataReference, storage.Cursor, error) { + ret := _m.Called(ctx, reference, maxItems, cursor) + + var r0 []storage.DataReference + if rf, ok := ret.Get(0).(func(context.Context, storage.DataReference, int, storage.Cursor) []storage.DataReference); ok { + r0 = rf(ctx, reference, maxItems, cursor) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]storage.DataReference) + } + } + + var r1 storage.Cursor + if rf, ok := ret.Get(1).(func(context.Context, storage.DataReference, int, storage.Cursor) storage.Cursor); ok { + r1 = rf(ctx, reference, maxItems, cursor) + } else { + r1 = ret.Get(1).(storage.Cursor) + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, storage.DataReference, int, storage.Cursor) error); ok { + r2 = rf(ctx, reference, maxItems, cursor) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + type ComposedProtobufStore_ReadProtobuf struct { *mock.Call } From d02a58f6bb0b35a0cc8a301333584d180dd99987 Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Thu, 12 Sep 2024 04:53:49 +0000 Subject: [PATCH 33/65] mockery Signed-off-by: Bugra Gedik --- flytepropeller/pkg/utils/failing_datastore.go | 2 +- flytestdlib/storage/cached_rawstore_test.go | 2 +- flytestdlib/storage/stow_store_test.go | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/flytepropeller/pkg/utils/failing_datastore.go b/flytepropeller/pkg/utils/failing_datastore.go index b5cf9ecdcb..7948a85b81 100644 --- a/flytepropeller/pkg/utils/failing_datastore.go +++ b/flytepropeller/pkg/utils/failing_datastore.go @@ -27,7 +27,7 @@ func (FailingRawStore) Head(ctx context.Context, reference storage.DataReference return nil, fmt.Errorf("failed metadata fetch") } -func (s *FailingRawStore) List(ctx context.Context, reference storage.DataReference, maxItems int, cursor storage.Cursor) ([]storage.DataReference, storage.Cursor, error) { +func (FailingRawStore) List(ctx context.Context, reference storage.DataReference, maxItems int, cursor storage.Cursor) ([]storage.DataReference, storage.Cursor, error) { return nil, storage.NewCursorAtEnd(), fmt.Errorf("Not implemented yet") } diff --git a/flytestdlib/storage/cached_rawstore_test.go b/flytestdlib/storage/cached_rawstore_test.go index 0e7facb0f6..9c304790cb 100644 --- a/flytestdlib/storage/cached_rawstore_test.go +++ b/flytestdlib/storage/cached_rawstore_test.go @@ -73,7 +73,7 @@ func (d *dummyStore) Head(ctx context.Context, reference DataReference) (Metadat return d.HeadCb(ctx, reference) } -func (s *dummyStore) List(ctx context.Context, reference DataReference, maxItems int, cursor Cursor) ([]DataReference, Cursor, error) { +func (d *dummyStore) List(ctx context.Context, reference DataReference, maxItems int, cursor Cursor) ([]DataReference, Cursor, error) { return nil, NewCursorAtEnd(), fmt.Errorf("Not implemented yet") } diff --git a/flytestdlib/storage/stow_store_test.go b/flytestdlib/storage/stow_store_test.go index 3223774184..4de273dd93 100644 --- a/flytestdlib/storage/stow_store_test.go +++ b/flytestdlib/storage/stow_store_test.go @@ -90,7 +90,7 @@ func (m mockStowContainer) Items(prefix, cursor string, count int) ([]stow.Item, index := 0 for key := range m.items { itemKeys[index] = key - index += 1 + index++ } sort.Strings(itemKeys) @@ -447,7 +447,7 @@ func TestStowStore_List(t *testing.T) { items, cursor, err := s.List(ctx, dataReference, maxResults, NewCursorAtStart()) assert.NoError(t, err) assert.Equal(t, []DataReference{"a/1"}, items) - items, cursor, err = s.List(ctx, dataReference, maxResults, cursor) + items, _, err = s.List(ctx, dataReference, maxResults, cursor) assert.NoError(t, err) assert.Equal(t, []DataReference{"a/2"}, items) }) From 72095c00257b42303f4437e884ee1ad7b3519d35 Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Tue, 10 Sep 2024 22:43:16 +0000 Subject: [PATCH 34/65] renamings Signed-off-by: Bugra Gedik --- flytestdlib/storage/storage.go | 31 ++++++++++++++++--------------- flytestdlib/storage/stow_store.go | 8 ++++---- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/flytestdlib/storage/storage.go b/flytestdlib/storage/storage.go index d138d2881f..52e6905513 100644 --- a/flytestdlib/storage/storage.go +++ b/flytestdlib/storage/storage.go @@ -40,37 +40,38 @@ type Metadata interface { ContentMD5() string } -type CursorPosition int +type CursorState int const ( - StartCursorPos CursorPosition = 0 - EndCursorPos CursorPosition = 1 - CustomCursorPos CursorPosition = 2 + // Enum representing state of the cursor + AtStartCursorState CursorState = 0 + AtEndCursorState CursorState = 1 + AtCustomPosCursorState CursorState = 2 ) type Cursor struct { - cursorPos CursorPosition - customState string + cursorState CursorState + customPosition string } -func NewCursorFromCustomState(customState string) Cursor { +func NewCursorAtStart() Cursor { return Cursor{ - cursorPos: CustomCursorPos, - customState: customState, + cursorState: AtStartCursorState, + customPosition: "", } } -func NewCursorAtStart() Cursor { +func NewCursorAtEnd() Cursor { return Cursor{ - cursorPos: StartCursorPos, - customState: "", + cursorState: AtEndCursorState, + customPosition: "", } } -func NewCursorAtEnd() Cursor { +func NewCursorFromCustomPosition(customPosition string) Cursor { return Cursor{ - cursorPos: EndCursorPos, - customState: "", + cursorState: AtCustomPosCursorState, + customPosition: customPosition, } } diff --git a/flytestdlib/storage/stow_store.go b/flytestdlib/storage/stow_store.go index 8172018b4a..6b731b9c86 100644 --- a/flytestdlib/storage/stow_store.go +++ b/flytestdlib/storage/stow_store.go @@ -268,12 +268,12 @@ func (s *StowStore) List(ctx context.Context, reference DataReference, maxItems t := s.metrics.ListLatency.Start(ctx) var stowCursor string - if cursor.cursorPos == StartCursorPos { + if cursor.cursorState == AtStartCursorState { stowCursor = stow.CursorStart - } else if cursor.cursorPos == EndCursorPos { + } else if cursor.cursorState == AtEndCursorState { return nil, NewCursorAtEnd(), fmt.Errorf("Cursor cannot be at end for the List call") } else { - stowCursor = cursor.customState + stowCursor = cursor.customPosition } items, stowCursor, err := container.Items(k, stowCursor, maxItems) if err == nil { @@ -284,7 +284,7 @@ func (s *StowStore) List(ctx context.Context, reference DataReference, maxItems if stow.IsCursorEnd(stowCursor) { cursor = NewCursorAtEnd() } else { - cursor = NewCursorFromCustomState(stowCursor) + cursor = NewCursorFromCustomPosition(stowCursor) } t.Stop() return results, cursor, nil From 77f47d14b53edf6a982f7cb025bd2a78556c9384 Mon Sep 17 00:00:00 2001 From: Bugra Gedik Date: Wed, 11 Sep 2024 23:44:37 +0000 Subject: [PATCH 35/65] renamings Signed-off-by: Bugra Gedik --- flyteadmin/pkg/common/mocks/storage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flyteadmin/pkg/common/mocks/storage.go b/flyteadmin/pkg/common/mocks/storage.go index 3af2507d1e..bf29eedd3e 100644 --- a/flyteadmin/pkg/common/mocks/storage.go +++ b/flyteadmin/pkg/common/mocks/storage.go @@ -33,7 +33,7 @@ func (t *TestDataStore) Head(ctx context.Context, reference storage.DataReferenc return t.HeadCb(ctx, reference) } -func (s *TestDataStore) List(ctx context.Context, reference storage.DataReference, maxItems int, cursor storage.Cursor) ([]storage.DataReference, storage.Cursor, error) { +func (t *TestDataStore) List(ctx context.Context, reference storage.DataReference, maxItems int, cursor storage.Cursor) ([]storage.DataReference, storage.Cursor, error) { return nil, storage.NewCursorAtEnd(), fmt.Errorf("Not implemented yet") } From 49a77606b8569d3921ad325c65d6424159214281 Mon Sep 17 00:00:00 2001 From: Paul Dittamo <37558497+pvditt@users.noreply.github.com> Date: Wed, 21 Aug 2024 09:08:09 -0700 Subject: [PATCH 36/65] [Bug] Update resource failures w/ Finalizers set (#423) (#5673) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Overview when [informer cache has stale values](https://github.com/unionai/flyte/blob/1e82352dd95f89630e333fe6105d5fdb5487a24e/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go#L478), we cannot update the k8s resource when [clearing finalizers](https://github.com/unionai/flyte/blob/1e82352dd95f89630e333fe6105d5fdb5487a24e/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go#L450) and get `Error: Operation cannot be fulfilled on pods.` The current implementation bubbles up the error resulting in a system retry. By the next loop, the informer cache is up to date and the update is able to be applied. However, in an ArrayNode with many subnodes getting executed in parallel, the execution can easily run out of retries. This update adds a basic retry with exponential backoff to wait for the informer cache to get up to date. ## Test Plan Ran in dogfood-gcp - https://buildkite.com/unionai/managed-cluster-staging-sync/builds/4622 + manually updated configmap to enabled finalizers - Run without change (https://dogfood-gcp.cloud-staging.union.ai/console/projects/flytesnacks/domains/development/executions/fd16ac81fd7b5480fb6f/nodes) - Run with change (https://dogfood-gcp.cloud-staging.union.ai/console/projects/flytesnacks/domains/development/executions/f016a3be7fa304db5a77/nodeId/n0/nodes) confirmed in logs that conflict errors: ``` {"json":{"exec_id":"f016a3be7fa304db5a77","node":"n0/n42","ns":"development","res_ver":"146129599","routine":"worker-66","src":"plugin_manager.go:455","wf":"flytesnacks:development:tests.flytekit.integration.map_task_issue.wf8"},"level":"warning","msg":"Failed to clear finalizers for Resource with name: development/f016a3be7fa304db5a77-n0-0-n42-0. Error: Operation cannot be fulfilled on pods \"f016a3be7fa304db5a77-n0-0-n42-0\": the object has been modified; please apply your changes to the latest version and try again","ts":"2024-08-17T02:02:48Z"} ``` did not bubble up + confirmed finalizers were removed: ``` ➜ ~ k get pods -n development f016a3be7fa304db5a77-n0-0-n42-0 -o json | grep -i final INFO[0000] [0] Couldn't find a config file []. Relying on env vars and pflags. ➜ ~ ``` ## Rollout Plan (if applicable) - revert changes to customer's config that disabled finalizers ## Upstream Changes Should this change be upstreamed to OSS (flyteorg/flyte)? If not, please uncheck this box, which is used for auditing. Note, it is the responsibility of each developer to actually upstream their changes. See [this guide](https://unionai.atlassian.net/wiki/spaces/ENG/pages/447610883/Flyte+-+Union+Cloud+Development+Runbook/#When-are-versions-updated%3F). - [x] To be upstreamed to OSS ## Issue fixes: https://linear.app/unionai/issue/COR-1558/investigate-why-finalizers-consume-system-retries-in-map-tasks ## Checklist * [ ] Added tests * [x] Ran a deploy dry run and shared the terraform plan * [ ] Added logging and metrics * [ ] Updated [dashboards](https://unionai.grafana.net/dashboards) and [alerts](https://unionai.grafana.net/alerting/list) * [ ] Updated documentation Signed-off-by: Paul Dittamo Signed-off-by: Bugra Gedik --- .../pluginmachinery/flytek8s/config/config.go | 8 ++ .../flytek8s/config/k8spluginconfig_flags.go | 2 + .../config/k8spluginconfig_flags_test.go | 28 +++++++ .../nodes/task/k8s/plugin_manager.go | 79 +++++++++++++------ 4 files changed, 92 insertions(+), 25 deletions(-) diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go index 109ef06ba1..eb19015586 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go @@ -64,6 +64,8 @@ var ( DefaultPodTemplateResync: config2.Duration{ Duration: 30 * time.Second, }, + UpdateBaseBackoffDuration: 10, + UpdateBackoffRetries: 5, } // K8sPluginConfigSection provides a singular top level config section for all plugins. @@ -206,6 +208,12 @@ type K8sPluginConfig struct { // SendObjectEvents indicates whether to send k8s object events in TaskExecutionEvent updates (similar to kubectl get events). SendObjectEvents bool `json:"send-object-events" pflag:",If true, will send k8s object events in TaskExecutionEvent updates."` + + // Initial delay in exponential backoff when updating a resource in milliseconds. + UpdateBaseBackoffDuration int `json:"update-base-backoff-duration" pflag:",Initial delay in exponential backoff when updating a resource in milliseconds."` + + // Number of retries for exponential backoff when updating a resource. + UpdateBackoffRetries int `json:"update-backoff-retries" pflag:",Number of retries for exponential backoff when updating a resource."` } // FlyteCoPilotConfig specifies configuration for the Flyte CoPilot system. FlyteCoPilot, allows running flytekit-less containers diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go index 7a3f1c951e..4652d0bfd4 100755 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go @@ -67,5 +67,7 @@ func (cfg K8sPluginConfig) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.String(fmt.Sprintf("%v%v", prefix, "default-pod-template-name"), defaultK8sConfig.DefaultPodTemplateName, "Name of the PodTemplate to use as the base for all k8s pods created by FlytePropeller.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "default-pod-template-resync"), defaultK8sConfig.DefaultPodTemplateResync.String(), "Frequency of resyncing default pod templates") cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "send-object-events"), defaultK8sConfig.SendObjectEvents, "If true, will send k8s object events in TaskExecutionEvent updates.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "update-base-backoff-duration"), defaultK8sConfig.UpdateBaseBackoffDuration, "Initial delay in exponential backoff when updating a resource in milliseconds.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "update-backoff-retries"), defaultK8sConfig.UpdateBackoffRetries, "Number of retries for exponential backoff when updating a resource.") return cmdFlags } diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go index 4d5918a3b5..cc46ffa466 100755 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go @@ -337,4 +337,32 @@ func TestK8sPluginConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_update-base-backoff-duration", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("update-base-backoff-duration", testValue) + if vInt, err := cmdFlags.GetInt("update-base-backoff-duration"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vInt), &actual.UpdateBaseBackoffDuration) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_update-backoff-retries", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("update-backoff-retries", testValue) + if vInt, err := cmdFlags.GetInt("update-backoff-retries"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vInt), &actual.UpdateBackoffRetries) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) } diff --git a/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go b/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go index f9c3806ee6..42d3ad9b85 100644 --- a/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go +++ b/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go @@ -13,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/util/workqueue" @@ -92,9 +93,11 @@ type PluginManager struct { kubeClient pluginsCore.KubeClient metrics PluginMetrics // Per namespace-resource - backOffController *backoff.Controller - resourceLevelMonitor *ResourceLevelMonitor - eventWatcher EventWatcher + backOffController *backoff.Controller + resourceLevelMonitor *ResourceLevelMonitor + eventWatcher EventWatcher + updateBaseBackoffDuration int + updateBackoffRetries int } func (e *PluginManager) addObjectMetadata(taskCtx pluginsCore.TaskExecutionMetadata, o client.Object, cfg *config.K8sPluginConfig) { @@ -463,25 +466,48 @@ func (e *PluginManager) Finalize(ctx context.Context, tCtx pluginsCore.TaskExecu } nsName = k8stypes.NamespacedName{Namespace: o.GetNamespace(), Name: o.GetName()} + retryBackoff := wait.Backoff{ + Duration: time.Duration(e.updateBaseBackoffDuration) * time.Millisecond, + Factor: 2.0, + Jitter: 0.1, + Steps: e.updateBackoffRetries, + } + // Attempt to cleanup finalizers so that the object may be deleted/garbage collected. We try to clear them for all // objects, regardless of whether or not InjectFinalizer is configured to handle all cases where InjectFinalizer is // enabled/disabled during object execution. - if err := e.kubeClient.GetClient().Get(ctx, nsName, o); err != nil { - if isK8sObjectNotExists(err) { - return nil + var lastErr error + _ = wait.ExponentialBackoff(retryBackoff, func() (bool, error) { + lastErr = nil + if err := e.kubeClient.GetClient().Get(ctx, nsName, o); err != nil { + if isK8sObjectNotExists(err) { + return true, nil + } + lastErr = err + // This happens sometimes because a node gets removed and K8s deletes the pod. This will result in a + // Pod does not exist error. This should be retried using the retry policy + logger.Warningf(ctx, "Failed in finalizing get Resource with name: %v. Error: %v", nsName, err) + return true, err } - // This happens sometimes because a node gets removed and K8s deletes the pod. This will result in a - // Pod does not exist error. This should be retried using the retry policy - logger.Warningf(ctx, "Failed in finalizing get Resource with name: %v. Error: %v", nsName, err) - return err - } - // This must happen after sending admin event. It's safe against partial failures because if the event failed, we will - // simply retry in the next round. If the event succeeded but this failed, we will try again the next round to send - // the same event (idempotent) and then come here again... - err = e.clearFinalizers(ctx, o) - if err != nil { - errs.Append(err) + // This must happen after sending admin event. It's safe against partial failures because if the event failed, we will + // simply retry in the next round. If the event succeeded but this failed, we will try again the next round to send + // the same event (idempotent) and then come here again... + if err := e.clearFinalizers(ctx, o); err != nil { + lastErr = err + // retry is if there is a conflict in case the informer cache is out of sync + if k8serrors.IsConflict(err) { + logger.Warningf(ctx, "Failed to clear finalizers for Resource with name: %v. Error: %v. Retrying..", nsName, err) + return false, nil + } + logger.Warningf(ctx, "Failed to clear finalizers for Resource with name: %v. Error: %v", nsName, err) + return true, err + } + return true, nil + }) + + if lastErr != nil { + errs.Append(lastErr) } // If we should delete the resource when finalize is called, do a best effort delete. @@ -630,8 +656,9 @@ func NewPluginManager(ctx context.Context, iCtx pluginsCore.SetupContext, entry return nil, err } + k8sConfig := config.GetK8sPluginConfig() var eventWatcher EventWatcher - if config.GetK8sPluginConfig().SendObjectEvents { + if k8sConfig.SendObjectEvents { eventWatcher, err = NewEventWatcher(ctx, gvk, kubeClientset) if err != nil { return nil, err @@ -645,13 +672,15 @@ func NewPluginManager(ctx context.Context, iCtx pluginsCore.SetupContext, entry rm.RunCollectorOnce(ctx) return &PluginManager{ - id: entry.ID, - plugin: entry.Plugin, - resourceToWatch: entry.ResourceToWatch, - metrics: newPluginMetrics(metricsScope), - kubeClient: kubeClient, - resourceLevelMonitor: rm, - eventWatcher: eventWatcher, + id: entry.ID, + plugin: entry.Plugin, + resourceToWatch: entry.ResourceToWatch, + metrics: newPluginMetrics(metricsScope), + kubeClient: kubeClient, + resourceLevelMonitor: rm, + eventWatcher: eventWatcher, + updateBaseBackoffDuration: k8sConfig.UpdateBaseBackoffDuration, + updateBackoffRetries: k8sConfig.UpdateBackoffRetries, }, nil } From 0272b860ad02e6125abbedcb89444239f5e6aea0 Mon Sep 17 00:00:00 2001 From: Paul Dittamo <37558497+pvditt@users.noreply.github.com> Date: Wed, 21 Aug 2024 14:29:37 -0700 Subject: [PATCH 37/65] [BUG] array node eventing bump version (#5680) * [BUG] add retries to handle array node eventing race condition (#421) If there is an error updating a [FlyteWorkflow CRD](https://github.com/unionai/flyte/blob/6a7207c5345604a28a9d4e3699becff767f520f5/flytepropeller/pkg/controller/handler.go#L378), then the propeller streak ends without the CRD getting updated and the in-memory copy of the FlyteWorkflow is not utilized on the next loop. [TaskPhaseVersion](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go#L239) is stored in the FlyteWorkflow. This is incremented when there is an update to node/subnode state to ensure that events are unique. If the events stay in the same state and have the same TaskPhaseVersion, then they [get short-circuited and don't get emitted to admin](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/events/admin_eventsink.go#L59) or will get returned as an [AlreadyExists error](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flyteadmin/pkg/manager/impl/task_execution_manager.go#L172) and get [handled in propeller to not bubble up in an error](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/pkg/controller/nodes/node_exec_context.go#L38). We can run into issues with ArrayNode eventing when: - array node handler increments task phase version from "0" to "1" - admin event sink emits event with version "1" - the propeller controller is not able to update the FlyteWorkflow CRD, so the ArrayNodeStatus indicates taskPhaseVersion is still 0 - next loop, array node handler increments task phase version from "0" to "1" - admin event sink prevents the event from getting emitted as an event with the same ID has already been received. No error is bubbled up. This means we lose subnode state until there is an event that contains an update to that subnode. If the lost state is the subnode reaching a terminal state, then the subnode state (from admin/UI) is "stuck" in a non-terminal state. I confirmed this to be an issue in the load-test-cluster. Whenever, there was an [error syncing the FlyteWorkflow](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/pkg/controller/workers.go#L91), the next round of eventing in ArrayNode would fail unless the ArrayNode phase changed. - added unit test - tested locally in sandbox - test in dogfood - https://buildkite.com/unionai/managed-cluster-staging-sync/builds/4398#01914a1a-f6d6-42a5-b41b-7b6807f27370 - should be fine to rollout to prod Should this change be upstreamed to OSS (flyteorg/flyte)? If not, please uncheck this box, which is used for auditing. Note, it is the responsibility of each developer to actually upstream their changes. See [this guide](https://unionai.atlassian.net/wiki/spaces/ENG/pages/447610883/Flyte+-+Union+Cloud+Development+Runbook/#When-are-versions-updated%3F). - [x] To be upstreamed to OSS fixes: https://linear.app/unionai/issue/COR-1534/bug-arraynode-shows-non-complete-jobs-in-ui-when-the-job-is-actually * [x] Added tests * [x] Ran a deploy dry run and shared the terraform plan * [ ] Added logging and metrics * [ ] Updated [dashboards](https://unionai.grafana.net/dashboards) and [alerts](https://unionai.grafana.net/alerting/list) * [ ] Updated documentation Signed-off-by: Paul Dittamo * handle already exists error on array node abort (#427) * handle already exists error on array node abort Signed-off-by: Paul Dittamo * update comment Signed-off-by: Paul Dittamo --------- Signed-off-by: Paul Dittamo * [BUG] set cause for already exists EventError (#432) * set cause for already exists EventError Signed-off-by: Paul Dittamo * add nil check event error Signed-off-by: Paul Dittamo * lint Signed-off-by: Paul Dittamo --------- Signed-off-by: Paul Dittamo --------- Signed-off-by: Paul Dittamo Signed-off-by: Bugra Gedik --- flytepropeller/events/admin_eventsink.go | 6 +- flytepropeller/events/admin_eventsink_test.go | 9 +- flytepropeller/events/errors/errors.go | 6 +- .../pkg/controller/config/config.go | 1 + .../pkg/controller/config/config_flags.go | 1 + .../controller/config/config_flags_test.go | 14 ++ .../pkg/controller/nodes/array/handler.go | 51 +++++- .../controller/nodes/array/handler_test.go | 163 ++++++++++++++++-- .../pkg/controller/nodes/node_exec_context.go | 3 + 9 files changed, 225 insertions(+), 29 deletions(-) diff --git a/flytepropeller/events/admin_eventsink.go b/flytepropeller/events/admin_eventsink.go index cb4b88a69a..3da6cca421 100644 --- a/flytepropeller/events/admin_eventsink.go +++ b/flytepropeller/events/admin_eventsink.go @@ -57,7 +57,11 @@ func (s *adminEventSink) Sink(ctx context.Context, message proto.Message) error if s.filter.Contains(ctx, id) { logger.Debugf(ctx, "event '%s' has already been sent", string(id)) - return nil + return &errors.EventError{ + Code: errors.AlreadyExists, + Cause: fmt.Errorf("event has already been sent"), + Message: "Event Already Exists", + } } // Validate submission with rate limiter and send admin event diff --git a/flytepropeller/events/admin_eventsink_test.go b/flytepropeller/events/admin_eventsink_test.go index c13b7ad47f..510371d056 100644 --- a/flytepropeller/events/admin_eventsink_test.go +++ b/flytepropeller/events/admin_eventsink_test.go @@ -184,13 +184,16 @@ func TestAdminFilterContains(t *testing.T) { filter.OnContainsMatch(mock.Anything, mock.Anything).Return(true) wfErr := adminEventSink.Sink(ctx, wfEvent) - assert.NoError(t, wfErr) + assert.Error(t, wfErr) + assert.True(t, errors.IsAlreadyExists(wfErr)) nodeErr := adminEventSink.Sink(ctx, nodeEvent) - assert.NoError(t, nodeErr) + assert.Error(t, nodeErr) + assert.True(t, errors.IsAlreadyExists(nodeErr)) taskErr := adminEventSink.Sink(ctx, taskEvent) - assert.NoError(t, taskErr) + assert.Error(t, taskErr) + assert.True(t, errors.IsAlreadyExists(taskErr)) } func TestIDFromMessage(t *testing.T) { diff --git a/flytepropeller/events/errors/errors.go b/flytepropeller/events/errors/errors.go index 879b8b07d7..2d3e02e0df 100644 --- a/flytepropeller/events/errors/errors.go +++ b/flytepropeller/events/errors/errors.go @@ -33,7 +33,11 @@ type EventError struct { } func (r EventError) Error() string { - return fmt.Sprintf("%s: %s, caused by [%s]", r.Code, r.Message, r.Cause.Error()) + var cause string + if r.Cause != nil { + cause = r.Cause.Error() + } + return fmt.Sprintf("%s: %s, caused by [%s]", r.Code, r.Message, cause) } func (r *EventError) Is(target error) bool { diff --git a/flytepropeller/pkg/controller/config/config.go b/flytepropeller/pkg/controller/config/config.go index 419386eddd..f058212322 100644 --- a/flytepropeller/pkg/controller/config/config.go +++ b/flytepropeller/pkg/controller/config/config.go @@ -259,6 +259,7 @@ const ( type EventConfig struct { RawOutputPolicy RawOutputPolicy `json:"raw-output-policy" pflag:",How output data should be passed along in execution events."` FallbackToOutputReference bool `json:"fallback-to-output-reference" pflag:",Whether output data should be sent by reference when it is too large to be sent inline in execution events."` + ErrorOnAlreadyExists bool `json:"error-on-already-exists" pflag:",Whether to return an error when an event already exists."` } // ParallelismBehavior defines how ArrayNode should handle subNode parallelism by default diff --git a/flytepropeller/pkg/controller/config/config_flags.go b/flytepropeller/pkg/controller/config/config_flags.go index ea0b428c2f..d2dc0971ff 100755 --- a/flytepropeller/pkg/controller/config/config_flags.go +++ b/flytepropeller/pkg/controller/config/config_flags.go @@ -100,6 +100,7 @@ func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "max-streak-length"), defaultConfig.MaxStreakLength, "Maximum number of consecutive rounds that one propeller worker can use for one workflow - >1 => turbo-mode is enabled.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "event-config.raw-output-policy"), defaultConfig.EventConfig.RawOutputPolicy, "How output data should be passed along in execution events.") cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "event-config.fallback-to-output-reference"), defaultConfig.EventConfig.FallbackToOutputReference, "Whether output data should be sent by reference when it is too large to be sent inline in execution events.") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "event-config.error-on-already-exists"), defaultConfig.EventConfig.ErrorOnAlreadyExists, "Whether to return an error when an event already exists.") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "include-shard-key-label"), defaultConfig.IncludeShardKeyLabel, "Include the specified shard key label in the k8s FlyteWorkflow CRD label selector") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "exclude-shard-key-label"), defaultConfig.ExcludeShardKeyLabel, "Exclude the specified shard key label from the k8s FlyteWorkflow CRD label selector") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "include-project-label"), defaultConfig.IncludeProjectLabel, "Include the specified project label in the k8s FlyteWorkflow CRD label selector") diff --git a/flytepropeller/pkg/controller/config/config_flags_test.go b/flytepropeller/pkg/controller/config/config_flags_test.go index bce7238f60..66a14381af 100755 --- a/flytepropeller/pkg/controller/config/config_flags_test.go +++ b/flytepropeller/pkg/controller/config/config_flags_test.go @@ -799,6 +799,20 @@ func TestConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_event-config.error-on-already-exists", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("event-config.error-on-already-exists", testValue) + if vBool, err := cmdFlags.GetBool("event-config.error-on-already-exists"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.EventConfig.ErrorOnAlreadyExists) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) t.Run("Test_include-shard-key-label", func(t *testing.T) { t.Run("Override", func(t *testing.T) { diff --git a/flytepropeller/pkg/controller/nodes/array/handler.go b/flytepropeller/pkg/controller/nodes/array/handler.go index 315041cb51..a101ed5a30 100644 --- a/flytepropeller/pkg/controller/nodes/array/handler.go +++ b/flytepropeller/pkg/controller/nodes/array/handler.go @@ -11,6 +11,7 @@ import ( "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/ioutils" "github.com/flyteorg/flyte/flyteplugins/go/tasks/plugins/array/errorcollector" "github.com/flyteorg/flyte/flytepropeller/events" + eventsErr "github.com/flyteorg/flyte/flytepropeller/events/errors" "github.com/flyteorg/flyte/flytepropeller/pkg/apis/flyteworkflow/v1alpha1" "github.com/flyteorg/flyte/flytepropeller/pkg/compiler/validators" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/config" @@ -21,6 +22,7 @@ import ( "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/interfaces" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/task/k8s" "github.com/flyteorg/flyte/flytestdlib/bitarray" + stdConfig "github.com/flyteorg/flyte/flytestdlib/config" "github.com/flyteorg/flyte/flytestdlib/logger" "github.com/flyteorg/flyte/flytestdlib/promutils" "github.com/flyteorg/flyte/flytestdlib/storage" @@ -112,6 +114,10 @@ func (a *arrayNodeHandler) Abort(ctx context.Context, nCtx interfaces.NodeExecut // update state for subNodes if err := eventRecorder.finalize(ctx, nCtx, idlcore.TaskExecution_ABORTED, 0, a.eventConfig); err != nil { + // a task event with abort phase is already emitted when handling ArrayNodePhaseFailing + if eventsErr.IsAlreadyExists(err) { + return nil + } logger.Errorf(ctx, "ArrayNode event recording failed: [%s]", err.Error()) return err } @@ -579,12 +585,35 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu // increment taskPhaseVersion if we detect any changes in subNode state. if incrementTaskPhaseVersion { - arrayNodeState.TaskPhaseVersion = arrayNodeState.TaskPhaseVersion + 1 + arrayNodeState.TaskPhaseVersion++ } - if err := eventRecorder.finalize(ctx, nCtx, taskPhase, arrayNodeState.TaskPhaseVersion, a.eventConfig); err != nil { - logger.Errorf(ctx, "ArrayNode event recording failed: [%s]", err.Error()) - return handler.UnknownTransition, err + const maxRetries = 3 + retries := 0 + for retries <= maxRetries { + err := eventRecorder.finalize(ctx, nCtx, taskPhase, arrayNodeState.TaskPhaseVersion, a.eventConfig) + + if err == nil { + break + } + + // Handle potential race condition if FlyteWorkflow CRD fails to get synced + if eventsErr.IsAlreadyExists(err) { + if !incrementTaskPhaseVersion { + break + } + logger.Warnf(ctx, "Event version already exists, bumping version and retrying (%d/%d): [%s]", retries+1, maxRetries, err.Error()) + arrayNodeState.TaskPhaseVersion++ + } else { + logger.Errorf(ctx, "ArrayNode event recording failed: [%s]", err.Error()) + return handler.UnknownTransition, err + } + + retries++ + if retries > maxRetries { + logger.Errorf(ctx, "ArrayNode event recording failed after %d retries: [%s]", maxRetries, err.Error()) + return handler.UnknownTransition, err + } } // if the ArrayNode phase has changed we need to reset the taskPhaseVersion to 0 @@ -632,9 +661,21 @@ func New(nodeExecutor interfaces.Node, eventConfig *config.EventConfig, scope pr return nil, err } + eventConfigCopy, err := stdConfig.DeepCopyConfig(eventConfig) + if err != nil { + return nil, err + } + + deepCopiedEventConfig, ok := eventConfigCopy.(*config.EventConfig) + if !ok { + return nil, fmt.Errorf("deep copy error: expected *config.EventConfig, but got %T", eventConfigCopy) + } + + deepCopiedEventConfig.ErrorOnAlreadyExists = true + arrayScope := scope.NewSubScope("array") return &arrayNodeHandler{ - eventConfig: eventConfig, + eventConfig: deepCopiedEventConfig, gatherOutputsRequestChannel: make(chan *gatherOutputsRequest), metrics: newMetrics(arrayScope), nodeExecutionRequestChannel: make(chan *nodeExecutionRequest), diff --git a/flytepropeller/pkg/controller/nodes/array/handler_test.go b/flytepropeller/pkg/controller/nodes/array/handler_test.go index ee1fc5b80b..d27b412c1f 100644 --- a/flytepropeller/pkg/controller/nodes/array/handler_test.go +++ b/flytepropeller/pkg/controller/nodes/array/handler_test.go @@ -12,7 +12,8 @@ import ( idlcore "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/event" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core" - pluginmocks "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/io/mocks" + pluginiomocks "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/io/mocks" + eventsErr "github.com/flyteorg/flyte/flytepropeller/events/errors" eventmocks "github.com/flyteorg/flyte/flytepropeller/events/mocks" "github.com/flyteorg/flyte/flytepropeller/pkg/apis/flyteworkflow/v1alpha1" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/config" @@ -50,7 +51,7 @@ func createArrayNodeHandler(ctx context.Context, t *testing.T, nodeHandler inter // mock components adminClient := launchplan.NewFailFastLaunchPlanExecutor() enqueueWorkflowFunc := func(workflowID v1alpha1.WorkflowID) {} - eventConfig := &config.EventConfig{} + eventConfig := &config.EventConfig{ErrorOnAlreadyExists: true} mockEventSink := eventmocks.NewMockEventSink() mockHandlerFactory := &mocks.HandlerFactory{} mockHandlerFactory.OnGetHandlerMatch(mock.Anything).Return(nodeHandler, nil) @@ -135,7 +136,7 @@ func createNodeExecutionContext(dataStore *storage.DataStore, eventRecorder inte nCtx.OnEventsRecorder().Return(eventRecorder) // InputReader - inputFilePaths := &pluginmocks.InputFilePaths{} + inputFilePaths := &pluginiomocks.InputFilePaths{} inputFilePaths.OnGetInputPath().Return(storage.DataReference("s3://bucket/input")) nCtx.OnInputReader().Return( newStaticInputReader( @@ -459,6 +460,24 @@ func uint32Ptr(v uint32) *uint32 { return &v } +type fakeEventRecorder struct { + taskErr error + phaseVersionFailures uint32 + recordTaskEventCallCount int +} + +func (f *fakeEventRecorder) RecordNodeEvent(ctx context.Context, event *event.NodeExecutionEvent, eventConfig *config.EventConfig) error { + return nil +} + +func (f *fakeEventRecorder) RecordTaskEvent(ctx context.Context, event *event.TaskExecutionEvent, eventConfig *config.EventConfig) error { + f.recordTaskEventCallCount++ + if f.phaseVersionFailures == 0 || event.PhaseVersion < f.phaseVersionFailures { + return f.taskErr + } + return nil +} + func TestHandleArrayNodePhaseExecuting(t *testing.T) { ctx := context.Background() @@ -492,11 +511,18 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { subNodeTaskPhases []core.Phase subNodeTransitions []handler.Transition expectedArrayNodePhase v1alpha1.ArrayNodePhase + expectedArrayNodeSubPhases []v1alpha1.NodePhase expectedTransitionPhase handler.EPhase expectedExternalResourcePhases []idlcore.TaskExecution_Phase currentWfParallelism uint32 maxWfParallelism uint32 incrementParallelismCount uint32 + useFakeEventRecorder bool + eventRecorderFailures uint32 + eventRecorderError error + expectedTaskPhaseVersion uint32 + expectHandleError bool + expectedEventingCalls int }{ { name: "StartAllSubNodes", @@ -514,6 +540,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, incrementParallelismCount: 1, @@ -533,6 +560,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING}, incrementParallelismCount: 1, @@ -553,6 +581,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, currentWfParallelism: 0, @@ -573,6 +602,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING}, currentWfParallelism: workflowMaxParallelism - 1, @@ -591,6 +621,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { }, subNodeTransitions: []handler.Transition{}, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{}, currentWfParallelism: workflowMaxParallelism, @@ -612,6 +643,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, incrementParallelismCount: 1, @@ -632,6 +664,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoSuccess(&handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseSucceeding, + expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_SUCCEEDED, idlcore.TaskExecution_SUCCEEDED}, }, @@ -652,6 +685,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoFailure(0, "", "", &handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseSucceeding, + expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_SUCCEEDED, idlcore.TaskExecution_FAILED}, }, @@ -671,9 +705,78 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoSuccess(&handler.ExecutionInfo{})), }, expectedArrayNodePhase: v1alpha1.ArrayNodePhaseFailing, + expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_FAILED, idlcore.TaskExecution_SUCCEEDED}, }, + { + name: "EventingAlreadyExists_EventuallySucceeds", + parallelism: uint32Ptr(0), + subNodePhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseQueued, + v1alpha1.NodePhaseQueued, + }, + subNodeTaskPhases: []core.Phase{ + core.PhaseRunning, + core.PhaseRunning, + }, + subNodeTransitions: []handler.Transition{ + handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), + handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), + }, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedTaskPhaseVersion: 2, + expectedTransitionPhase: handler.EPhaseRunning, + expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, + useFakeEventRecorder: true, + eventRecorderFailures: 2, + eventRecorderError: &eventsErr.EventError{Code: eventsErr.AlreadyExists, Cause: fmt.Errorf("err")}, + incrementParallelismCount: 1, + expectedEventingCalls: 2, + }, + { + name: "EventingAlreadyExists_EventuallyFails", + parallelism: uint32Ptr(0), + subNodePhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseQueued, + v1alpha1.NodePhaseQueued, + }, + subNodeTaskPhases: []core.Phase{ + core.PhaseRunning, + core.PhaseRunning, + }, + subNodeTransitions: []handler.Transition{ + handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), + handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), + }, + expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, + useFakeEventRecorder: true, + eventRecorderFailures: 5, + eventRecorderError: &eventsErr.EventError{Code: eventsErr.AlreadyExists, Cause: fmt.Errorf("err")}, + expectHandleError: true, + expectedEventingCalls: 4, + }, + { + name: "EventingFails", + parallelism: uint32Ptr(0), + subNodePhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseQueued, + v1alpha1.NodePhaseQueued, + }, + subNodeTaskPhases: []core.Phase{ + core.PhaseRunning, + core.PhaseRunning, + }, + subNodeTransitions: []handler.Transition{ + handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), + handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), + }, + expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, + useFakeEventRecorder: true, + eventRecorderError: fmt.Errorf("err"), + expectHandleError: true, + expectedEventingCalls: 1, + }, } for _, test := range tests { @@ -684,6 +787,15 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { }, scope) assert.NoError(t, err) + var eventRecorder interfaces.EventRecorder + if test.useFakeEventRecorder { + eventRecorder = &fakeEventRecorder{ + phaseVersionFailures: test.eventRecorderFailures, + taskErr: test.eventRecorderError, + } + } else { + eventRecorder = newBufferedEventRecorder() + } // initialize ArrayNodeState arrayNodeState := &handler.ArrayNodeState{ Phase: v1alpha1.ArrayNodePhaseExecuting, @@ -705,18 +817,12 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { for i, nodePhase := range test.subNodePhases { arrayNodeState.SubNodePhases.SetItem(i, bitarray.Item(nodePhase)) } - for i, taskPhase := range test.subNodeTaskPhases { - arrayNodeState.SubNodeTaskPhases.SetItem(i, bitarray.Item(taskPhase)) - } - - // create NodeExecutionContext - eventRecorder := newBufferedEventRecorder() nodeSpec := arrayNodeSpec nodeSpec.ArrayNode.Parallelism = test.parallelism nodeSpec.ArrayNode.MinSuccessRatio = test.minSuccessRatio - nCtx := createNodeExecutionContext(dataStore, eventRecorder, nil, literalMap, &arrayNodeSpec, arrayNodeState, test.currentWfParallelism, workflowMaxParallelism) + nCtx := createNodeExecutionContext(dataStore, eventRecorder, nil, literalMap, &nodeSpec, arrayNodeState, test.currentWfParallelism, workflowMaxParallelism) // initialize ArrayNodeHandler nodeHandler := &mocks.NodeHandler{} @@ -745,22 +851,41 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { // evaluate node transition, err := arrayNodeHandler.Handle(ctx, nCtx) - assert.NoError(t, err) + + fakeEventRecorder, ok := eventRecorder.(*fakeEventRecorder) + if ok { + assert.Equal(t, test.expectedEventingCalls, fakeEventRecorder.recordTaskEventCallCount) + } + + if !test.expectHandleError { + assert.NoError(t, err) + } else { + assert.Error(t, err) + return + } // validate results assert.Equal(t, test.expectedArrayNodePhase, arrayNodeState.Phase) assert.Equal(t, test.expectedTransitionPhase, transition.Info().GetPhase()) + assert.Equal(t, test.expectedTaskPhaseVersion, arrayNodeState.TaskPhaseVersion) - if len(test.expectedExternalResourcePhases) > 0 { - assert.Equal(t, 1, len(eventRecorder.taskExecutionEvents)) + for i, expectedPhase := range test.expectedArrayNodeSubPhases { + assert.Equal(t, expectedPhase, v1alpha1.NodePhase(arrayNodeState.SubNodePhases.GetItem(i))) + } - externalResources := eventRecorder.taskExecutionEvents[0].Metadata.GetExternalResources() - assert.Equal(t, len(test.expectedExternalResourcePhases), len(externalResources)) - for i, expectedPhase := range test.expectedExternalResourcePhases { - assert.Equal(t, expectedPhase, externalResources[i].Phase) + bufferedEventRecorder, ok := eventRecorder.(*bufferedEventRecorder) + if ok { + if len(test.expectedExternalResourcePhases) > 0 { + assert.Equal(t, 1, len(bufferedEventRecorder.taskExecutionEvents)) + + externalResources := bufferedEventRecorder.taskExecutionEvents[0].Metadata.GetExternalResources() + assert.Equal(t, len(test.expectedExternalResourcePhases), len(externalResources)) + for i, expectedPhase := range test.expectedExternalResourcePhases { + assert.Equal(t, expectedPhase, externalResources[i].Phase) + } + } else { + assert.Equal(t, 0, len(bufferedEventRecorder.taskExecutionEvents)) } - } else { - assert.Equal(t, 0, len(eventRecorder.taskExecutionEvents)) } nCtx.ExecutionContext().(*execmocks.ExecutionContext).AssertNumberOfCalls(t, "IncrementParallelism", int(test.incrementParallelismCount)) diff --git a/flytepropeller/pkg/controller/nodes/node_exec_context.go b/flytepropeller/pkg/controller/nodes/node_exec_context.go index a579b241f3..7de31100c6 100644 --- a/flytepropeller/pkg/controller/nodes/node_exec_context.go +++ b/flytepropeller/pkg/controller/nodes/node_exec_context.go @@ -36,6 +36,9 @@ type eventRecorder struct { func (e eventRecorder) RecordTaskEvent(ctx context.Context, ev *event.TaskExecutionEvent, eventConfig *config.EventConfig) error { if err := e.taskEventRecorder.RecordTaskEvent(ctx, ev, eventConfig); err != nil { if eventsErr.IsAlreadyExists(err) { + if eventConfig.ErrorOnAlreadyExists { + return err + } logger.Warningf(ctx, "Failed to record taskEvent, error [%s]. Trying to record state: %s. Ignoring this error!", err.Error(), ev.Phase) return nil } else if eventsErr.IsEventAlreadyInTerminalStateError(err) { From 2fd51ebc670934bd26c0283c5c963574dbc7e737 Mon Sep 17 00:00:00 2001 From: ddl-rliu <140021987+ddl-rliu@users.noreply.github.com> Date: Thu, 22 Aug 2024 23:53:33 -0700 Subject: [PATCH 38/65] Add custominfo to agents (#5604) Signed-off-by: ddl-rliu Signed-off-by: Bugra Gedik --- .../go/tasks/plugins/webapi/agent/plugin.go | 32 +++++++++++-------- .../tasks/plugins/webapi/agent/plugin_test.go | 16 +++++++--- 2 files changed, 30 insertions(+), 18 deletions(-) diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go b/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go index 20a65ccba1..a7b2a3d1d4 100644 --- a/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go +++ b/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go @@ -8,6 +8,7 @@ import ( "time" "golang.org/x/exp/maps" + "google.golang.org/protobuf/types/known/structpb" "k8s.io/apimachinery/pkg/util/wait" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" @@ -39,10 +40,11 @@ type Plugin struct { type ResourceWrapper struct { Phase flyteIdl.TaskExecution_Phase // Deprecated: Please Use Phase instead. - State admin.State - Outputs *flyteIdl.LiteralMap - Message string - LogLinks []*flyteIdl.TaskLog + State admin.State + Outputs *flyteIdl.LiteralMap + Message string + LogLinks []*flyteIdl.TaskLog + CustomInfo *structpb.Struct } // IsTerminal is used to avoid making network calls to the agent service if the resource is already in a terminal state. @@ -192,10 +194,11 @@ func (p *Plugin) ExecuteTaskSync( } return nil, ResourceWrapper{ - Phase: resource.Phase, - Outputs: resource.Outputs, - Message: resource.Message, - LogLinks: resource.LogLinks, + Phase: resource.Phase, + Outputs: resource.Outputs, + Message: resource.Message, + LogLinks: resource.LogLinks, + CustomInfo: resource.CustomInfo, }, err } @@ -221,11 +224,12 @@ func (p *Plugin) Get(ctx context.Context, taskCtx webapi.GetContext) (latest web } return ResourceWrapper{ - Phase: res.Resource.Phase, - State: res.Resource.State, - Outputs: res.Resource.Outputs, - Message: res.Resource.Message, - LogLinks: res.Resource.LogLinks, + Phase: res.Resource.Phase, + State: res.Resource.State, + Outputs: res.Resource.Outputs, + Message: res.Resource.Message, + LogLinks: res.Resource.LogLinks, + CustomInfo: res.Resource.CustomInfo, }, nil } @@ -254,7 +258,7 @@ func (p *Plugin) Delete(ctx context.Context, taskCtx webapi.DeleteContext) error func (p *Plugin) Status(ctx context.Context, taskCtx webapi.StatusContext) (phase core.PhaseInfo, err error) { resource := taskCtx.Resource().(ResourceWrapper) - taskInfo := &core.TaskInfo{Logs: resource.LogLinks} + taskInfo := &core.TaskInfo{Logs: resource.LogLinks, CustomInfo: resource.CustomInfo} switch resource.Phase { case flyteIdl.TaskExecution_QUEUED: diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go b/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go index 3db1c464b6..9e8c97903e 100644 --- a/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go +++ b/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "golang.org/x/exp/maps" + "google.golang.org/protobuf/types/known/structpb" agentMocks "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" @@ -114,17 +115,24 @@ func TestPlugin(t *testing.T) { }) t.Run("test RUNNING Status", func(t *testing.T) { + simpleStruct := structpb.Struct{ + Fields: map[string]*structpb.Value{ + "foo": {Kind: &structpb.Value_StringValue{StringValue: "foo"}}, + }, + } taskContext := new(webapiPlugin.StatusContext) taskContext.On("Resource").Return(ResourceWrapper{ - State: admin.State_RUNNING, - Outputs: nil, - Message: "Job is running", - LogLinks: []*flyteIdlCore.TaskLog{{Uri: "http://localhost:3000/log", Name: "Log Link"}}, + State: admin.State_RUNNING, + Outputs: nil, + Message: "Job is running", + LogLinks: []*flyteIdlCore.TaskLog{{Uri: "http://localhost:3000/log", Name: "Log Link"}}, + CustomInfo: &simpleStruct, }) phase, err := plugin.Status(context.Background(), taskContext) assert.NoError(t, err) assert.Equal(t, pluginsCore.PhaseRunning, phase.Phase()) + assert.Equal(t, &simpleStruct, phase.Info().CustomInfo) }) t.Run("test PERMANENT_FAILURE Status", func(t *testing.T) { From 85f7d3d84fcdfd3820921d9773140e9f74e6bab1 Mon Sep 17 00:00:00 2001 From: Paul Dittamo <37558497+pvditt@users.noreply.github.com> Date: Fri, 23 Aug 2024 12:07:05 -0700 Subject: [PATCH 39/65] [BUG] use deep copy of bit arrays when getting array node state (#5681) * [BUG] add retries to handle array node eventing race condition (#421) If there is an error updating a [FlyteWorkflow CRD](https://github.com/unionai/flyte/blob/6a7207c5345604a28a9d4e3699becff767f520f5/flytepropeller/pkg/controller/handler.go#L378), then the propeller streak ends without the CRD getting updated and the in-memory copy of the FlyteWorkflow is not utilized on the next loop. [TaskPhaseVersion](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go#L239) is stored in the FlyteWorkflow. This is incremented when there is an update to node/subnode state to ensure that events are unique. If the events stay in the same state and have the same TaskPhaseVersion, then they [get short-circuited and don't get emitted to admin](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/events/admin_eventsink.go#L59) or will get returned as an [AlreadyExists error](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flyteadmin/pkg/manager/impl/task_execution_manager.go#L172) and get [handled in propeller to not bubble up in an error](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/pkg/controller/nodes/node_exec_context.go#L38). We can run into issues with ArrayNode eventing when: - array node handler increments task phase version from "0" to "1" - admin event sink emits event with version "1" - the propeller controller is not able to update the FlyteWorkflow CRD, so the ArrayNodeStatus indicates taskPhaseVersion is still 0 - next loop, array node handler increments task phase version from "0" to "1" - admin event sink prevents the event from getting emitted as an event with the same ID has already been received. No error is bubbled up. This means we lose subnode state until there is an event that contains an update to that subnode. If the lost state is the subnode reaching a terminal state, then the subnode state (from admin/UI) is "stuck" in a non-terminal state. I confirmed this to be an issue in the load-test-cluster. Whenever, there was an [error syncing the FlyteWorkflow](https://github.com/flyteorg/flyte/blob/37b4e13ac4a3594ac63b7a35058f4b2220e51282/flytepropeller/pkg/controller/workers.go#L91), the next round of eventing in ArrayNode would fail unless the ArrayNode phase changed. - added unit test - tested locally in sandbox - test in dogfood - https://buildkite.com/unionai/managed-cluster-staging-sync/builds/4398#01914a1a-f6d6-42a5-b41b-7b6807f27370 - should be fine to rollout to prod Should this change be upstreamed to OSS (flyteorg/flyte)? If not, please uncheck this box, which is used for auditing. Note, it is the responsibility of each developer to actually upstream their changes. See [this guide](https://unionai.atlassian.net/wiki/spaces/ENG/pages/447610883/Flyte+-+Union+Cloud+Development+Runbook/#When-are-versions-updated%3F). - [x] To be upstreamed to OSS fixes: https://linear.app/unionai/issue/COR-1534/bug-arraynode-shows-non-complete-jobs-in-ui-when-the-job-is-actually * [x] Added tests * [x] Ran a deploy dry run and shared the terraform plan * [ ] Added logging and metrics * [ ] Updated [dashboards](https://unionai.grafana.net/dashboards) and [alerts](https://unionai.grafana.net/alerting/list) * [ ] Updated documentation Signed-off-by: Paul Dittamo * handle already exists error on array node abort (#427) * handle already exists error on array node abort Signed-off-by: Paul Dittamo * update comment Signed-off-by: Paul Dittamo --------- Signed-off-by: Paul Dittamo * [BUG] set cause for already exists EventError (#432) * set cause for already exists EventError Signed-off-by: Paul Dittamo * add nil check event error Signed-off-by: Paul Dittamo * lint Signed-off-by: Paul Dittamo --------- Signed-off-by: Paul Dittamo * add deep copy for array node status Signed-off-by: Paul Dittamo * add deep copy for array node status Signed-off-by: Paul Dittamo * use deep copy of bit arrays when getting array node state Signed-off-by: Paul Dittamo * Revert "add deep copy for array node status" This reverts commit dde75951d87cb497e358a5bd0ff27f05078f5b72. Signed-off-by: Paul Dittamo * ignore ErrorOnAlreadyExists when marshalling event config Signed-off-by: Paul Dittamo --------- Signed-off-by: Paul Dittamo Signed-off-by: Bugra Gedik --- .../flyteworkflow/v1alpha1/node_status.go | 21 ++++++ .../v1alpha1/zz_generated.deepcopy.go | 4 ++ .../pkg/controller/config/config.go | 3 +- .../pkg/controller/config/config_flags.go | 2 +- .../controller/config/config_flags_test.go | 6 +- .../controller/nodes/array/handler_test.go | 70 ++++++++++++++++--- .../controller/nodes/node_state_manager.go | 24 +++++-- 7 files changed, 110 insertions(+), 20 deletions(-) diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go index aab034224d..218b045588 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/node_status.go @@ -316,6 +316,27 @@ func (in *ArrayNodeStatus) SetTaskPhaseVersion(taskPhaseVersion uint32) { } } +func (in *ArrayNodeStatus) DeepCopyInto(out *ArrayNodeStatus) { + *out = *in + out.MutableStruct = in.MutableStruct + + if in.ExecutionError != nil { + in, out := &in.ExecutionError, &out.ExecutionError + *out = new(core.ExecutionError) + *out = *in + } +} + +func (in *ArrayNodeStatus) DeepCopy() *ArrayNodeStatus { + if in == nil { + return nil + } + + out := &ArrayNodeStatus{} + in.DeepCopyInto(out) + return out +} + type NodeStatus struct { MutableStruct Phase NodePhase `json:"phase,omitempty"` diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/zz_generated.deepcopy.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/zz_generated.deepcopy.go index febbca733c..95cac582b8 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/zz_generated.deepcopy.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/zz_generated.deepcopy.go @@ -548,6 +548,10 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { *out = new(DynamicNodeStatus) (*in).DeepCopyInto(*out) } + if in.ArrayNodeStatus != nil { + in, out := &in.ArrayNodeStatus, &out.ArrayNodeStatus + *out = (*in).DeepCopy() + } if in.Error != nil { in, out := &in.Error, &out.Error *out = (*in).DeepCopy() diff --git a/flytepropeller/pkg/controller/config/config.go b/flytepropeller/pkg/controller/config/config.go index f058212322..a0217e186a 100644 --- a/flytepropeller/pkg/controller/config/config.go +++ b/flytepropeller/pkg/controller/config/config.go @@ -259,7 +259,8 @@ const ( type EventConfig struct { RawOutputPolicy RawOutputPolicy `json:"raw-output-policy" pflag:",How output data should be passed along in execution events."` FallbackToOutputReference bool `json:"fallback-to-output-reference" pflag:",Whether output data should be sent by reference when it is too large to be sent inline in execution events."` - ErrorOnAlreadyExists bool `json:"error-on-already-exists" pflag:",Whether to return an error when an event already exists."` + // only meant to be overridden for certain node types that have different eventing behavior such as ArrayNode + ErrorOnAlreadyExists bool `json:"-"` } // ParallelismBehavior defines how ArrayNode should handle subNode parallelism by default diff --git a/flytepropeller/pkg/controller/config/config_flags.go b/flytepropeller/pkg/controller/config/config_flags.go index d2dc0971ff..858fc8a8ba 100755 --- a/flytepropeller/pkg/controller/config/config_flags.go +++ b/flytepropeller/pkg/controller/config/config_flags.go @@ -100,7 +100,7 @@ func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "max-streak-length"), defaultConfig.MaxStreakLength, "Maximum number of consecutive rounds that one propeller worker can use for one workflow - >1 => turbo-mode is enabled.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "event-config.raw-output-policy"), defaultConfig.EventConfig.RawOutputPolicy, "How output data should be passed along in execution events.") cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "event-config.fallback-to-output-reference"), defaultConfig.EventConfig.FallbackToOutputReference, "Whether output data should be sent by reference when it is too large to be sent inline in execution events.") - cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "event-config.error-on-already-exists"), defaultConfig.EventConfig.ErrorOnAlreadyExists, "Whether to return an error when an event already exists.") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "event-config.-"), defaultConfig.EventConfig.ErrorOnAlreadyExists, "") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "include-shard-key-label"), defaultConfig.IncludeShardKeyLabel, "Include the specified shard key label in the k8s FlyteWorkflow CRD label selector") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "exclude-shard-key-label"), defaultConfig.ExcludeShardKeyLabel, "Exclude the specified shard key label from the k8s FlyteWorkflow CRD label selector") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "include-project-label"), defaultConfig.IncludeProjectLabel, "Include the specified project label in the k8s FlyteWorkflow CRD label selector") diff --git a/flytepropeller/pkg/controller/config/config_flags_test.go b/flytepropeller/pkg/controller/config/config_flags_test.go index 66a14381af..27e7b76efa 100755 --- a/flytepropeller/pkg/controller/config/config_flags_test.go +++ b/flytepropeller/pkg/controller/config/config_flags_test.go @@ -799,13 +799,13 @@ func TestConfig_SetFlags(t *testing.T) { } }) }) - t.Run("Test_event-config.error-on-already-exists", func(t *testing.T) { + t.Run("Test_event-config.-", func(t *testing.T) { t.Run("Override", func(t *testing.T) { testValue := "1" - cmdFlags.Set("event-config.error-on-already-exists", testValue) - if vBool, err := cmdFlags.GetBool("event-config.error-on-already-exists"); err == nil { + cmdFlags.Set("event-config.-", testValue) + if vBool, err := cmdFlags.GetBool("event-config.-"); err == nil { testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.EventConfig.ErrorOnAlreadyExists) } else { diff --git a/flytepropeller/pkg/controller/nodes/array/handler_test.go b/flytepropeller/pkg/controller/nodes/array/handler_test.go index d27b412c1f..648d70e36c 100644 --- a/flytepropeller/pkg/controller/nodes/array/handler_test.go +++ b/flytepropeller/pkg/controller/nodes/array/handler_test.go @@ -539,7 +539,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseRunning, + v1alpha1.NodePhaseRunning, + }, expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, @@ -559,7 +563,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { subNodeTransitions: []handler.Transition{ handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseRunning, + v1alpha1.NodePhaseQueued, + }, expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING}, @@ -580,7 +588,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseRunning, + v1alpha1.NodePhaseRunning, + }, expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, @@ -601,7 +613,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { subNodeTransitions: []handler.Transition{ handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseRunning, + v1alpha1.NodePhaseQueued, + }, expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING}, @@ -619,8 +635,12 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { core.PhaseUndefined, core.PhaseUndefined, }, - subNodeTransitions: []handler.Transition{}, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + subNodeTransitions: []handler.Transition{}, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseQueued, + v1alpha1.NodePhaseQueued, + }, expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{}, @@ -642,7 +662,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseRunning, + v1alpha1.NodePhaseRunning, + }, expectedTaskPhaseVersion: 1, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, @@ -663,7 +687,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoSuccess(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoSuccess(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseSucceeding, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseSucceeding, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseSucceeded, + v1alpha1.NodePhaseSucceeded, + }, expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_SUCCEEDED, idlcore.TaskExecution_SUCCEEDED}, @@ -684,7 +712,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoSuccess(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoFailure(0, "", "", &handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseSucceeding, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseSucceeding, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseSucceeded, + v1alpha1.NodePhaseFailed, + }, expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_SUCCEEDED, idlcore.TaskExecution_FAILED}, @@ -704,7 +736,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoFailure(0, "", "", &handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoSuccess(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseFailing, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseFailing, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseFailed, + v1alpha1.NodePhaseSucceeded, + }, expectedTaskPhaseVersion: 0, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_FAILED, idlcore.TaskExecution_SUCCEEDED}, @@ -724,7 +760,11 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, - expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodePhase: v1alpha1.ArrayNodePhaseExecuting, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseRunning, + v1alpha1.NodePhaseRunning, + }, expectedTaskPhaseVersion: 2, expectedTransitionPhase: handler.EPhaseRunning, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, @@ -749,6 +789,10 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseQueued, + v1alpha1.NodePhaseQueued, + }, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, useFakeEventRecorder: true, eventRecorderFailures: 5, @@ -771,6 +815,10 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), handler.DoTransition(handler.TransitionTypeEphemeral, handler.PhaseInfoRunning(&handler.ExecutionInfo{})), }, + expectedArrayNodeSubPhases: []v1alpha1.NodePhase{ + v1alpha1.NodePhaseQueued, + v1alpha1.NodePhaseQueued, + }, expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, useFakeEventRecorder: true, eventRecorderError: fmt.Errorf("err"), diff --git a/flytepropeller/pkg/controller/nodes/node_state_manager.go b/flytepropeller/pkg/controller/nodes/node_state_manager.go index 91cf1f2679..a9ead9afc3 100644 --- a/flytepropeller/pkg/controller/nodes/node_state_manager.go +++ b/flytepropeller/pkg/controller/nodes/node_state_manager.go @@ -160,11 +160,27 @@ func (n nodeStateManager) GetArrayNodeState() handler.ArrayNodeState { if an != nil { as.Phase = an.GetArrayNodePhase() as.Error = an.GetExecutionError() - as.SubNodePhases = an.GetSubNodePhases() - as.SubNodeTaskPhases = an.GetSubNodeTaskPhases() - as.SubNodeRetryAttempts = an.GetSubNodeRetryAttempts() - as.SubNodeSystemFailures = an.GetSubNodeSystemFailures() as.TaskPhaseVersion = an.GetTaskPhaseVersion() + + subNodePhases := an.GetSubNodePhases() + if subNodePhasesCopy := subNodePhases.DeepCopy(); subNodePhasesCopy != nil { + as.SubNodePhases = *subNodePhasesCopy + } + + subNodeTaskPhases := an.GetSubNodeTaskPhases() + if subNodeTaskPhasesCopy := subNodeTaskPhases.DeepCopy(); subNodeTaskPhasesCopy != nil { + as.SubNodeTaskPhases = *subNodeTaskPhasesCopy + } + + subNodeRetryAttempts := an.GetSubNodeRetryAttempts() + if subNodeRetryAttemptsCopy := subNodeRetryAttempts.DeepCopy(); subNodeRetryAttemptsCopy != nil { + as.SubNodeRetryAttempts = *subNodeRetryAttemptsCopy + } + + subNodeSystemFailures := an.GetSubNodeSystemFailures() + if subNodeSystemFailuresCopy := subNodeSystemFailures.DeepCopy(); subNodeSystemFailuresCopy != nil { + as.SubNodeSystemFailures = *subNodeSystemFailuresCopy + } } return as } From f02a8c79ae8b3c4c477eb22aea75d2ac35d356bb Mon Sep 17 00:00:00 2001 From: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> Date: Fri, 23 Aug 2024 16:33:07 -0400 Subject: [PATCH 40/65] More concise definition of launchplan (#5682) * More concise definition of launchplan Signed-off-by: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> * Update docs/flyte_fundamentals/tasks_workflows_and_launch_plans.md Co-authored-by: Nikki Everett Signed-off-by: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> * Update docs/flyte_fundamentals/tasks_workflows_and_launch_plans.md Co-authored-by: Nikki Everett Signed-off-by: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> --------- Signed-off-by: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> Co-authored-by: Nikki Everett Signed-off-by: Bugra Gedik --- .../tasks_workflows_and_launch_plans.md | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/docs/flyte_fundamentals/tasks_workflows_and_launch_plans.md b/docs/flyte_fundamentals/tasks_workflows_and_launch_plans.md index c8ca7f2071..f66988343a 100644 --- a/docs/flyte_fundamentals/tasks_workflows_and_launch_plans.md +++ b/docs/flyte_fundamentals/tasks_workflows_and_launch_plans.md @@ -263,15 +263,11 @@ Learn more about chaining flyte entities in the {ref}`User Guide Date: Fri, 23 Aug 2024 15:24:02 -0700 Subject: [PATCH 41/65] Auth/prevent lookup per call (#5686) * save values Signed-off-by: Yee Hing Tong * move things up Signed-off-by: Yee Hing Tong * tests Signed-off-by: Yee Hing Tong * unit test Signed-off-by: Yee Hing Tong * imports for client test Signed-off-by: Yee Hing Tong * more test Signed-off-by: Yee Hing Tong * don't test admin connection Signed-off-by: Yee Hing Tong * disable client for config Signed-off-by: Yee Hing Tong * make generate Signed-off-by: Yee Hing Tong * hide behind a once Signed-off-by: Yee Hing Tong * typo Signed-off-by: Yee Hing Tong * reset client builder test Signed-off-by: Yee Hing Tong * reset client test Signed-off-by: Yee Hing Tong * revert propeller Signed-off-by: Yee Hing Tong * delay invocation even further Signed-off-by: Yee Hing Tong --------- Signed-off-by: Yee Hing Tong Signed-off-by: Bugra Gedik --- flytectl/cmd/configuration/configuration.go | 10 +- flytectl/cmd/core/cmd_test.go | 4 +- flyteidl/clients/go/admin/auth_interceptor.go | 108 +++++++++++++----- .../clients/go/admin/auth_interceptor_test.go | 88 ++++++++++++-- flyteidl/clients/go/admin/client.go | 3 +- flyteidl/gen/pb_rust/datacatalog.rs | 9 +- flyteidl/gen/pb_rust/flyteidl.admin.rs | 49 ++++---- flyteidl/gen/pb_rust/flyteidl.cacheservice.rs | 7 +- flyteidl/gen/pb_rust/flyteidl.core.rs | 41 +++---- flyteidl/gen/pb_rust/flyteidl.event.rs | 1 + .../gen/pb_rust/flyteidl.plugins.kubeflow.rs | 3 +- flyteidl/gen/pb_rust/flyteidl.plugins.rs | 11 +- flyteidl/gen/pb_rust/flyteidl.service.rs | 9 +- 13 files changed, 238 insertions(+), 105 deletions(-) diff --git a/flytectl/cmd/configuration/configuration.go b/flytectl/cmd/configuration/configuration.go index ecbedba721..fa9d87a00a 100644 --- a/flytectl/cmd/configuration/configuration.go +++ b/flytectl/cmd/configuration/configuration.go @@ -63,9 +63,13 @@ func CreateConfigCommand() *cobra.Command { configCmd := viper.GetConfigCommand() getResourcesFuncs := map[string]cmdcore.CommandEntry{ - "init": {CmdFunc: configInitFunc, Aliases: []string{""}, ProjectDomainNotRequired: true, - Short: initCmdShort, - Long: initCmdLong, PFlagProvider: initConfig.DefaultConfig}, + "init": { + CmdFunc: configInitFunc, + Aliases: []string{""}, + ProjectDomainNotRequired: true, + DisableFlyteClient: true, + Short: initCmdShort, + Long: initCmdLong, PFlagProvider: initConfig.DefaultConfig}, } configCmd.Flags().BoolVar(&initConfig.DefaultConfig.Force, "force", false, "Force to overwrite the default config file without confirmation") diff --git a/flytectl/cmd/core/cmd_test.go b/flytectl/cmd/core/cmd_test.go index e3a1843105..3f5b3b19a5 100644 --- a/flytectl/cmd/core/cmd_test.go +++ b/flytectl/cmd/core/cmd_test.go @@ -21,7 +21,7 @@ func TestGenerateCommandFunc(t *testing.T) { adminCfg.Endpoint = config.URL{URL: url.URL{Host: "dummyHost"}} adminCfg.AuthType = admin.AuthTypePkce rootCmd := &cobra.Command{} - cmdEntry := CommandEntry{CmdFunc: testCommandFunc, ProjectDomainNotRequired: true} + cmdEntry := CommandEntry{CmdFunc: testCommandFunc, ProjectDomainNotRequired: true, DisableFlyteClient: true} fn := generateCommandFunc(cmdEntry) assert.Nil(t, fn(rootCmd, []string{})) }) @@ -30,7 +30,7 @@ func TestGenerateCommandFunc(t *testing.T) { adminCfg := admin.GetConfig(context.Background()) adminCfg.Endpoint = config.URL{URL: url.URL{Host: ""}} rootCmd := &cobra.Command{} - cmdEntry := CommandEntry{CmdFunc: testCommandFunc, ProjectDomainNotRequired: true} + cmdEntry := CommandEntry{CmdFunc: testCommandFunc, ProjectDomainNotRequired: true, DisableFlyteClient: true} fn := generateCommandFunc(cmdEntry) assert.Nil(t, fn(rootCmd, []string{})) }) diff --git a/flyteidl/clients/go/admin/auth_interceptor.go b/flyteidl/clients/go/admin/auth_interceptor.go index 4cebf6440f..5d3d9fd92f 100644 --- a/flyteidl/clients/go/admin/auth_interceptor.go +++ b/flyteidl/clients/go/admin/auth_interceptor.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "net/http" + "sync" "golang.org/x/oauth2" "google.golang.org/grpc" @@ -20,33 +21,10 @@ const ProxyAuthorizationHeader = "proxy-authorization" // MaterializeCredentials will attempt to build a TokenSource given the anonymously available information exposed by the server. // Once established, it'll invoke PerRPCCredentialsFuture.Store() on perRPCCredentials to populate it with the appropriate values. -func MaterializeCredentials(ctx context.Context, cfg *Config, tokenCache cache.TokenCache, - perRPCCredentials *PerRPCCredentialsFuture, proxyCredentialsFuture *PerRPCCredentialsFuture) error { - authMetadataClient, err := InitializeAuthMetadataClient(ctx, cfg, proxyCredentialsFuture) - if err != nil { - return fmt.Errorf("failed to initialized Auth Metadata Client. Error: %w", err) - } - - tokenSourceProvider, err := NewTokenSourceProvider(ctx, cfg, tokenCache, authMetadataClient) - if err != nil { - return fmt.Errorf("failed to initialized token source provider. Err: %w", err) - } - - authorizationMetadataKey := cfg.AuthorizationHeader - if len(authorizationMetadataKey) == 0 { - clientMetadata, err := authMetadataClient.GetPublicClientConfig(ctx, &service.PublicClientAuthConfigRequest{}) - if err != nil { - return fmt.Errorf("failed to fetch client metadata. Error: %v", err) - } - authorizationMetadataKey = clientMetadata.AuthorizationMetadataKey - } - - tokenSource, err := tokenSourceProvider.GetTokenSource(ctx) - if err != nil { - return fmt.Errorf("failed to get token source. Error: %w", err) - } +func MaterializeCredentials(tokenSource oauth2.TokenSource, cfg *Config, authorizationMetadataKey string, + perRPCCredentials *PerRPCCredentialsFuture) error { - _, err = tokenSource.Token() + _, err := tokenSource.Token() if err != nil { return fmt.Errorf("failed to issue token. Error: %w", err) } @@ -127,6 +105,60 @@ func setHTTPClientContext(ctx context.Context, cfg *Config, proxyCredentialsFutu return context.WithValue(ctx, oauth2.HTTPClient, httpClient) } +type OauthMetadataProvider struct { + authorizationMetadataKey string + tokenSource oauth2.TokenSource + once sync.Once +} + +func (o *OauthMetadataProvider) getTokenSourceAndMetadata(cfg *Config, tokenCache cache.TokenCache, proxyCredentialsFuture *PerRPCCredentialsFuture) error { + ctx := context.Background() + + authMetadataClient, err := InitializeAuthMetadataClient(ctx, cfg, proxyCredentialsFuture) + if err != nil { + return fmt.Errorf("failed to initialized Auth Metadata Client. Error: %w", err) + } + + tokenSourceProvider, err := NewTokenSourceProvider(ctx, cfg, tokenCache, authMetadataClient) + if err != nil { + return fmt.Errorf("failed to initialize token source provider. Err: %w", err) + } + + authorizationMetadataKey := cfg.AuthorizationHeader + if len(authorizationMetadataKey) == 0 { + clientMetadata, err := authMetadataClient.GetPublicClientConfig(ctx, &service.PublicClientAuthConfigRequest{}) + if err != nil { + return fmt.Errorf("failed to fetch client metadata. Error: %v", err) + } + authorizationMetadataKey = clientMetadata.AuthorizationMetadataKey + } + + tokenSource, err := tokenSourceProvider.GetTokenSource(ctx) + if err != nil { + return fmt.Errorf("failed to get token source. Error: %w", err) + } + + o.authorizationMetadataKey = authorizationMetadataKey + o.tokenSource = tokenSource + + return nil +} + +func (o *OauthMetadataProvider) GetOauthMetadata(cfg *Config, tokenCache cache.TokenCache, proxyCredentialsFuture *PerRPCCredentialsFuture) error { + // Ensure loadTokenRelated() is only executed once + var err error + o.once.Do(func() { + err = o.getTokenSourceAndMetadata(cfg, tokenCache, proxyCredentialsFuture) + if err != nil { + logger.Errorf(context.Background(), "Failed to load token related config. Error: %v", err) + } + }) + if err != nil { + return err + } + return nil +} + // NewAuthInterceptor creates a new grpc.UnaryClientInterceptor that forwards the grpc call and inspects the error. // It will first invoke the grpc pipeline (to proceed with the request) with no modifications. It's expected for the grpc // pipeline to already have a grpc.WithPerRPCCredentials() DialOption. If the perRPCCredentials has already been initialized, @@ -138,13 +170,26 @@ func setHTTPClientContext(ctx context.Context, cfg *Config, proxyCredentialsFutu // a token source has been created, it'll invoke the grpc pipeline again, this time the grpc.PerRPCCredentials should // be able to find and acquire a valid AccessToken to annotate the request with. func NewAuthInterceptor(cfg *Config, tokenCache cache.TokenCache, credentialsFuture *PerRPCCredentialsFuture, proxyCredentialsFuture *PerRPCCredentialsFuture) grpc.UnaryClientInterceptor { + + oauthMetadataProvider := OauthMetadataProvider{ + once: sync.Once{}, + } + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + ctx = setHTTPClientContext(ctx, cfg, proxyCredentialsFuture) // If there is already a token in the cache (e.g. key-ring), we should use it immediately... t, _ := tokenCache.GetToken() if t != nil { - err := MaterializeCredentials(ctx, cfg, tokenCache, credentialsFuture, proxyCredentialsFuture) + err := oauthMetadataProvider.GetOauthMetadata(cfg, tokenCache, proxyCredentialsFuture) + if err != nil { + return err + } + authorizationMetadataKey := oauthMetadataProvider.authorizationMetadataKey + tokenSource := oauthMetadataProvider.tokenSource + + err = MaterializeCredentials(tokenSource, cfg, authorizationMetadataKey, credentialsFuture) if err != nil { return fmt.Errorf("failed to materialize credentials. Error: %v", err) } @@ -157,6 +202,13 @@ func NewAuthInterceptor(cfg *Config, tokenCache cache.TokenCache, credentialsFut if st, ok := status.FromError(err); ok { // If the error we receive from executing the request expects if shouldAttemptToAuthenticate(st.Code()) { + err := oauthMetadataProvider.GetOauthMetadata(cfg, tokenCache, proxyCredentialsFuture) + if err != nil { + return err + } + authorizationMetadataKey := oauthMetadataProvider.authorizationMetadataKey + tokenSource := oauthMetadataProvider.tokenSource + err = func() error { if !tokenCache.TryLock() { tokenCache.CondWait() @@ -171,7 +223,7 @@ func NewAuthInterceptor(cfg *Config, tokenCache cache.TokenCache, credentialsFut } logger.Debugf(ctx, "Request failed due to [%v]. Attempting to establish an authenticated connection and trying again.", st.Code()) - newErr := MaterializeCredentials(ctx, cfg, tokenCache, credentialsFuture, proxyCredentialsFuture) + newErr := MaterializeCredentials(tokenSource, cfg, authorizationMetadataKey, credentialsFuture) if newErr != nil { errString := fmt.Sprintf("authentication error! Original Error: %v, Auth Error: %v", err, newErr) logger.Errorf(ctx, errString) diff --git a/flyteidl/clients/go/admin/auth_interceptor_test.go b/flyteidl/clients/go/admin/auth_interceptor_test.go index 10c96625b7..0f47e97b9c 100644 --- a/flyteidl/clients/go/admin/auth_interceptor_test.go +++ b/flyteidl/clients/go/admin/auth_interceptor_test.go @@ -24,6 +24,7 @@ import ( "github.com/flyteorg/flyte/flyteidl/clients/go/admin/cache/mocks" adminMocks "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" "github.com/flyteorg/flyte/flytestdlib/config" "github.com/flyteorg/flyte/flytestdlib/logger" @@ -141,11 +142,34 @@ func Test_newAuthInterceptor(t *testing.T) { err := json.Unmarshal(plan, &tokenData) assert.NoError(t, err) t.Run("Other Error", func(t *testing.T) { + ctx := context.Background() + httpPort := rand.IntnRange(10000, 60000) + grpcPort := rand.IntnRange(10000, 60000) + m := &adminMocks.AuthMetadataServiceServer{} + m.OnGetOAuth2MetadataMatch(mock.Anything, mock.Anything).Return(&service.OAuth2MetadataResponse{ + AuthorizationEndpoint: fmt.Sprintf("http://localhost:%d/oauth2/authorize", httpPort), + TokenEndpoint: fmt.Sprintf("http://localhost:%d/oauth2/token", httpPort), + JwksUri: fmt.Sprintf("http://localhost:%d/oauth2/jwks", httpPort), + }, nil) + + m.OnGetPublicClientConfigMatch(mock.Anything, mock.Anything).Return(&service.PublicClientAuthConfigResponse{ + Scopes: []string{"all"}, + }, nil) + + s := newAuthMetadataServer(t, grpcPort, httpPort, m) + assert.NoError(t, s.Start(ctx)) + defer s.Close() + u, err := url.Parse(fmt.Sprintf("dns:///localhost:%d", grpcPort)) + assert.NoError(t, err) f := NewPerRPCCredentialsFuture() p := NewPerRPCCredentialsFuture() mockTokenCache := &mocks.TokenCache{} mockTokenCache.OnGetTokenMatch().Return(&tokenData, nil) - interceptor := NewAuthInterceptor(&Config{}, mockTokenCache, f, p) + mockTokenCache.OnSaveTokenMatch(mock.Anything).Return(nil) + interceptor := NewAuthInterceptor(&Config{ + Endpoint: config.URL{URL: *u}, + UseInsecureConnection: true, + }, mockTokenCache, f, p) otherError := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, opts ...grpc.CallOption) error { return status.New(codes.Canceled, "").Err() } @@ -209,6 +233,14 @@ func Test_newAuthInterceptor(t *testing.T) { httpPort := rand.IntnRange(10000, 60000) grpcPort := rand.IntnRange(10000, 60000) m := &adminMocks.AuthMetadataServiceServer{} + m.OnGetOAuth2MetadataMatch(mock.Anything, mock.Anything).Return(&service.OAuth2MetadataResponse{ + AuthorizationEndpoint: fmt.Sprintf("http://localhost:%d/oauth2/authorize", httpPort), + TokenEndpoint: fmt.Sprintf("http://localhost:%d/oauth2/token", httpPort), + JwksUri: fmt.Sprintf("http://localhost:%d/oauth2/jwks", httpPort), + }, nil) + m.OnGetPublicClientConfigMatch(mock.Anything, mock.Anything).Return(&service.PublicClientAuthConfigResponse{ + Scopes: []string{"all"}, + }, nil) s := newAuthMetadataServer(t, grpcPort, httpPort, m) ctx := context.Background() assert.NoError(t, s.Start(ctx)) @@ -283,12 +315,13 @@ func Test_newAuthInterceptor(t *testing.T) { }) } -func TestMaterializeCredentials(t *testing.T) { +func TestNewAuthInterceptorAndMaterialize(t *testing.T) { t.Run("No oauth2 metadata endpoint or Public client config lookup", func(t *testing.T) { httpPort := rand.IntnRange(10000, 60000) grpcPort := rand.IntnRange(10000, 60000) + fakeToken := &oauth2.Token{} c := &mocks.TokenCache{} - c.OnGetTokenMatch().Return(nil, nil) + c.OnGetTokenMatch().Return(fakeToken, nil) c.OnSaveTokenMatch(mock.Anything).Return(nil) m := &adminMocks.AuthMetadataServiceServer{} m.OnGetOAuth2MetadataMatch(mock.Anything, mock.Anything).Return(nil, errors.New("unexpected call to get oauth2 metadata")) @@ -304,7 +337,7 @@ func TestMaterializeCredentials(t *testing.T) { f := NewPerRPCCredentialsFuture() p := NewPerRPCCredentialsFuture() - err = MaterializeCredentials(ctx, &Config{ + cfg := &Config{ Endpoint: config.URL{URL: *u}, UseInsecureConnection: true, AuthType: AuthTypeClientSecret, @@ -312,14 +345,22 @@ func TestMaterializeCredentials(t *testing.T) { Scopes: []string{"all"}, Audience: "http://localhost:30081", AuthorizationHeader: "authorization", - }, c, f, p) + } + + intercept := NewAuthInterceptor(cfg, c, f, p) + // Invoke Materialize inside the intercept + err = intercept(ctx, "GET", nil, nil, nil, func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + return nil + }) assert.NoError(t, err) }) + t.Run("Failed to fetch client metadata", func(t *testing.T) { httpPort := rand.IntnRange(10000, 60000) grpcPort := rand.IntnRange(10000, 60000) c := &mocks.TokenCache{} - c.OnGetTokenMatch().Return(nil, nil) + fakeToken := &oauth2.Token{} + c.OnGetTokenMatch().Return(fakeToken, nil) c.OnSaveTokenMatch(mock.Anything).Return(nil) m := &adminMocks.AuthMetadataServiceServer{} m.OnGetOAuth2MetadataMatch(mock.Anything, mock.Anything).Return(nil, errors.New("unexpected call to get oauth2 metadata")) @@ -333,17 +374,44 @@ func TestMaterializeCredentials(t *testing.T) { u, err := url.Parse(fmt.Sprintf("dns:///localhost:%d", grpcPort)) assert.NoError(t, err) + cfg := &Config{ + Endpoint: config.URL{URL: *u}, + UseInsecureConnection: true, + AuthType: AuthTypeClientSecret, + TokenURL: fmt.Sprintf("http://localhost:%d/api/v1/token", httpPort), + Scopes: []string{"all"}, + } f := NewPerRPCCredentialsFuture() p := NewPerRPCCredentialsFuture() + intercept := NewAuthInterceptor(cfg, c, f, p) + err = intercept(ctx, "GET", nil, nil, nil, func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + return nil + }) + assert.EqualError(t, err, "failed to fetch client metadata. Error: rpc error: code = Unknown desc = expected err") + }) +} + +func TestSimpleMaterializeCredentials(t *testing.T) { + t.Run("simple materialize", func(t *testing.T) { + httpPort := rand.IntnRange(10000, 60000) + grpcPort := rand.IntnRange(10000, 60000) + u, err := url.Parse(fmt.Sprintf("dns:///localhost:%d", grpcPort)) + assert.NoError(t, err) + + f := NewPerRPCCredentialsFuture() - err = MaterializeCredentials(ctx, &Config{ + dummySource := DummyTestTokenSource{} + + err = MaterializeCredentials(dummySource, &Config{ Endpoint: config.URL{URL: *u}, UseInsecureConnection: true, AuthType: AuthTypeClientSecret, - TokenURL: fmt.Sprintf("http://localhost:%d/api/v1/token", httpPort), + TokenURL: fmt.Sprintf("http://localhost:%d/oauth2/token", httpPort), Scopes: []string{"all"}, - }, c, f, p) - assert.EqualError(t, err, "failed to fetch client metadata. Error: rpc error: code = Unknown desc = expected err") + Audience: "http://localhost:30081", + AuthorizationHeader: "authorization", + }, "authorization", f) + assert.NoError(t, err) }) } diff --git a/flyteidl/clients/go/admin/client.go b/flyteidl/clients/go/admin/client.go index 9758bd9dec..757f25b160 100644 --- a/flyteidl/clients/go/admin/client.go +++ b/flyteidl/clients/go/admin/client.go @@ -179,8 +179,9 @@ func initializeClients(ctx context.Context, cfg *Config, tokenCache cache.TokenC credentialsFuture := NewPerRPCCredentialsFuture() proxyCredentialsFuture := NewPerRPCCredentialsFuture() + authInterceptor := NewAuthInterceptor(cfg, tokenCache, credentialsFuture, proxyCredentialsFuture) opts = append(opts, - grpc.WithChainUnaryInterceptor(NewAuthInterceptor(cfg, tokenCache, credentialsFuture, proxyCredentialsFuture)), + grpc.WithChainUnaryInterceptor(authInterceptor), grpc.WithPerRPCCredentials(credentialsFuture)) if cfg.DefaultServiceConfig != "" { diff --git a/flyteidl/gen/pb_rust/datacatalog.rs b/flyteidl/gen/pb_rust/datacatalog.rs index f181704954..b49cab340c 100644 --- a/flyteidl/gen/pb_rust/datacatalog.rs +++ b/flyteidl/gen/pb_rust/datacatalog.rs @@ -1,4 +1,5 @@ // @generated +// This file is @generated by prost-build. /// /// Request message for creating a Dataset. #[allow(clippy::derive_partial_eq_without_eq)] @@ -10,7 +11,7 @@ pub struct CreateDatasetRequest { /// /// Response message for creating a Dataset #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct CreateDatasetResponse { } /// @@ -74,7 +75,7 @@ pub struct CreateArtifactRequest { /// /// Response message for creating an Artifact. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct CreateArtifactResponse { } /// @@ -88,7 +89,7 @@ pub struct AddTagRequest { /// /// Response message for tagging an Artifact. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct AddTagResponse { } /// List the artifacts that belong to the Dataset, optionally filtered using filtered expression. @@ -245,7 +246,7 @@ pub struct ReleaseReservationRequest { } /// Response to release reservation #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ReleaseReservationResponse { } /// diff --git a/flyteidl/gen/pb_rust/flyteidl.admin.rs b/flyteidl/gen/pb_rust/flyteidl.admin.rs index ca3270264b..30f39ab45d 100644 --- a/flyteidl/gen/pb_rust/flyteidl.admin.rs +++ b/flyteidl/gen/pb_rust/flyteidl.admin.rs @@ -1,4 +1,5 @@ // @generated +// This file is @generated by prost-build. /// Represents a subset of runtime task execution metadata that are relevant to external plugins. /// /// ID of the task execution @@ -194,7 +195,7 @@ pub struct DeleteTaskRequest { } /// Response to delete a task. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct DeleteTaskResponse { } /// A message containing the agent metadata. @@ -246,7 +247,7 @@ pub struct GetAgentResponse { } /// A request to list all agents. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ListAgentsRequest { } /// A response containing a list of agents. @@ -608,7 +609,7 @@ pub struct NamedEntityUpdateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct NamedEntityUpdateResponse { } /// Shared request structure to fetch a single resource. @@ -1003,7 +1004,7 @@ pub struct WorkflowExecutionEventRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WorkflowExecutionEventResponse { } /// Request to send a notification that a node execution event has occurred. @@ -1019,7 +1020,7 @@ pub struct NodeExecutionEventRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct NodeExecutionEventResponse { } /// Request to send a notification that a task execution event has occurred. @@ -1035,7 +1036,7 @@ pub struct TaskExecutionEventRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct TaskExecutionEventResponse { } /// Defines a set of overridable task resource attributes set during task registration. @@ -1717,7 +1718,7 @@ pub struct ExecutionTerminateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ExecutionTerminateResponse { } /// Request structure to fetch inputs, output and other data produced by an execution. @@ -1774,7 +1775,7 @@ pub struct ExecutionStateChangeDetails { pub principal: ::prost::alloc::string::String, } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ExecutionUpdateResponse { } /// WorkflowExecutionGetMetricsRequest represents a request to retrieve metrics for the specified workflow execution. @@ -1828,7 +1829,7 @@ impl ExecutionState { } /// Option for schedules run at a certain frequency e.g. every 2 minutes. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct FixedRate { #[prost(uint32, tag="1")] pub value: u32, @@ -1919,7 +1920,7 @@ pub struct LaunchPlanCreateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct LaunchPlanCreateResponse { } /// A LaunchPlan provides the capability to templatize workflow executions. @@ -2084,7 +2085,7 @@ pub struct LaunchPlanUpdateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct LaunchPlanUpdateResponse { } /// Represents a request struct for finding an active launch plan for a given NamedEntityIdentifier @@ -2460,7 +2461,7 @@ pub struct EmailMessage { } /// Empty request for GetDomain #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct GetDomainRequest { } /// Namespace within a project commonly used to differentiate between different service instances. @@ -2596,12 +2597,12 @@ pub struct ProjectRegisterRequest { } /// Purposefully empty, may be updated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ProjectRegisterResponse { } /// Purposefully empty, may be updated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ProjectUpdateResponse { } #[allow(clippy::derive_partial_eq_without_eq)] @@ -2652,7 +2653,7 @@ pub struct ProjectAttributesUpdateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ProjectAttributesUpdateResponse { } /// Request to get an individual project level attribute override. @@ -2699,7 +2700,7 @@ pub struct ProjectAttributesDeleteRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ProjectAttributesDeleteResponse { } /// Defines a set of custom matching attributes which defines resource defaults for a project and domain. @@ -2730,7 +2731,7 @@ pub struct ProjectDomainAttributesUpdateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ProjectDomainAttributesUpdateResponse { } /// Request to get an individual project domain attribute override. @@ -2785,7 +2786,7 @@ pub struct ProjectDomainAttributesDeleteRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ProjectDomainAttributesDeleteResponse { } /// SignalGetOrCreateRequest represents a request structure to retrieve or create a signal. @@ -2857,7 +2858,7 @@ pub struct SignalSetRequest { /// /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct SignalSetResponse { } /// Signal encapsulates a unique identifier, associated metadata, and a value for a single Flyte @@ -2895,7 +2896,7 @@ pub struct TaskCreateRequest { /// /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct TaskCreateResponse { } /// Flyte workflows are composed of many ordered tasks. That is small, reusable, self-contained logical blocks @@ -3155,7 +3156,7 @@ pub struct Version { } /// Empty request for GetVersion #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct GetVersionRequest { } /// Represents a request structure to create a revision of a workflow. @@ -3174,7 +3175,7 @@ pub struct WorkflowCreateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WorkflowCreateResponse { } /// Represents the workflow structure stored in the Admin @@ -3295,7 +3296,7 @@ pub struct WorkflowAttributesUpdateRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WorkflowAttributesUpdateResponse { } /// Request to get an individual workflow attribute override. @@ -3357,7 +3358,7 @@ pub struct WorkflowAttributesDeleteRequest { } /// Purposefully empty, may be populated in the future. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WorkflowAttributesDeleteResponse { } // @@protoc_insertion_point(module) diff --git a/flyteidl/gen/pb_rust/flyteidl.cacheservice.rs b/flyteidl/gen/pb_rust/flyteidl.cacheservice.rs index ff3264c633..d63e4d31bf 100644 --- a/flyteidl/gen/pb_rust/flyteidl.cacheservice.rs +++ b/flyteidl/gen/pb_rust/flyteidl.cacheservice.rs @@ -1,4 +1,5 @@ // @generated +// This file is @generated by prost-build. /// /// Additional metadata as key-value pairs #[allow(clippy::derive_partial_eq_without_eq)] @@ -88,7 +89,7 @@ pub struct PutCacheRequest { /// /// Empty, success indicated by no errors #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PutCacheResponse { } /// @@ -105,7 +106,7 @@ pub struct DeleteCacheRequest { /// /// Empty, success indicated by no errors #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct DeleteCacheResponse { } /// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. @@ -166,7 +167,7 @@ pub struct ReleaseReservationRequest { /// /// Empty, success indicated by no errors #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ReleaseReservationResponse { } include!("flyteidl.cacheservice.tonic.rs"); diff --git a/flyteidl/gen/pb_rust/flyteidl.core.rs b/flyteidl/gen/pb_rust/flyteidl.core.rs index 0876c70d6f..f2b73c9b11 100644 --- a/flyteidl/gen/pb_rust/flyteidl.core.rs +++ b/flyteidl/gen/pb_rust/flyteidl.core.rs @@ -1,4 +1,5 @@ // @generated +// This file is @generated by prost-build. /// Defines schema columns and types to strongly type-validate schemas interoperability. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -375,7 +376,7 @@ pub mod primitive { /// Used to denote a nil/null/None assignment to a scalar value. The underlying LiteralType for Void is intentionally /// undefined since it can be assigned to a scalar of any LiteralType. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Void { } /// Refers to an offloaded set of files. It encapsulates the type of the store and a unique uri for where the data is. @@ -600,7 +601,7 @@ pub struct KeyValuePair { } /// Retry strategy associated with an executable unit. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct RetryStrategy { /// Number of retries. Retries will be consumed when the job fails with a recoverable error. /// The number of retries must be less than or equals to 10. @@ -770,7 +771,7 @@ pub struct InputBindingData { pub var: ::prost::alloc::string::String, } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct RuntimeBinding { } #[allow(clippy::derive_partial_eq_without_eq)] @@ -1390,7 +1391,7 @@ pub mod task_metadata { /// Identify whether task is interruptible #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum InterruptibleValue { #[prost(bool, tag="8")] Interruptible(bool), @@ -1460,7 +1461,7 @@ pub mod task_template { /// Defines port properties for a container. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ContainerPort { /// Number of port to expose on the pod's IP address. /// This must be a valid port number, 0 < x < 65536. @@ -1551,7 +1552,7 @@ pub mod container { } /// Strategy to use when dealing with Blob, Schema, or multipart blob data (large datasets) #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct IoStrategy { /// Mode to use to manage downloads #[prost(enumeration="io_strategy::DownloadMode", tag="1")] @@ -1926,7 +1927,7 @@ pub mod conjunction_expression { } /// Indicates various phases of Workflow Execution #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WorkflowExecution { } /// Nested message and enum types in `WorkflowExecution`. @@ -1984,7 +1985,7 @@ pub mod workflow_execution { } /// Indicates various phases of Node Execution that only include the time spent to run the nodes/workflows #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct NodeExecution { } /// Nested message and enum types in `NodeExecution`. @@ -2046,7 +2047,7 @@ pub mod node_execution { /// Phases that task plugins can go through. Not all phases may be applicable to a specific plugin task, /// but this is the cumulative list that customers may want to know about for their task. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct TaskExecution { } /// Nested message and enum types in `TaskExecution`. @@ -2200,7 +2201,7 @@ pub mod task_log { } /// Represents customized execution run-time attributes. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct QualityOfServiceSpec { /// Indicates how much queueing delay an execution can tolerate. #[prost(message, optional, tag="1")] @@ -2208,7 +2209,7 @@ pub struct QualityOfServiceSpec { } /// Indicates the priority of an execution. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct QualityOfService { #[prost(oneof="quality_of_service::Designation", tags="1, 2")] pub designation: ::core::option::Option, @@ -2249,7 +2250,7 @@ pub mod quality_of_service { } } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum Designation { #[prost(enumeration="Tier", tag="1")] Tier(i32), @@ -2369,7 +2370,7 @@ pub struct SignalCondition { } /// SleepCondition represents a dependency on waiting for the specified duration. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct SleepCondition { /// The overall duration for this sleep. #[prost(message, optional, tag="1")] @@ -2448,7 +2449,7 @@ pub mod array_node { } } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum ParallelismOption { /// parallelism defines the minimum number of instances to bring up concurrently at any given /// point. Note that this is an optimistic restriction and that, due to network partitioning or @@ -2458,7 +2459,7 @@ pub mod array_node { Parallelism(u32), } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum SuccessCriteria { /// min_successes is an absolute number of the minimum number of successful completions of /// sub-nodes. As soon as this criteria is met, the ArrayNode will be marked as successful @@ -2502,14 +2503,14 @@ pub struct NodeMetadata { pub mod node_metadata { /// Identify whether node is interruptible #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum InterruptibleValue { #[prost(bool, tag="6")] Interruptible(bool), } /// Identify whether a node should have it's outputs cached. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum CacheableValue { #[prost(bool, tag="7")] Cacheable(bool), @@ -2523,7 +2524,7 @@ pub mod node_metadata { } /// Identify whether caching operations involving this node should be serialized. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum CacheSerializableValue { #[prost(bool, tag="9")] CacheSerializable(bool), @@ -2653,7 +2654,7 @@ pub mod workflow_metadata { /// If you are adding a setting that applies to both the Workflow itself, and everything underneath it, it should be /// added to both this object and the WorkflowMetadata object above. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WorkflowMetadataDefaults { /// Whether child nodes of the workflow are interruptible. #[prost(bool, tag="1")] @@ -2884,7 +2885,7 @@ pub mod catalog_metadata { } } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct CatalogReservation { } /// Nested message and enum types in `CatalogReservation`. diff --git a/flyteidl/gen/pb_rust/flyteidl.event.rs b/flyteidl/gen/pb_rust/flyteidl.event.rs index 281ee07daa..80a8a11442 100644 --- a/flyteidl/gen/pb_rust/flyteidl.event.rs +++ b/flyteidl/gen/pb_rust/flyteidl.event.rs @@ -1,4 +1,5 @@ // @generated +// This file is @generated by prost-build. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WorkflowExecutionEvent { diff --git a/flyteidl/gen/pb_rust/flyteidl.plugins.kubeflow.rs b/flyteidl/gen/pb_rust/flyteidl.plugins.kubeflow.rs index 9eebb7bc9e..b2a4d69f57 100644 --- a/flyteidl/gen/pb_rust/flyteidl.plugins.kubeflow.rs +++ b/flyteidl/gen/pb_rust/flyteidl.plugins.kubeflow.rs @@ -1,6 +1,7 @@ // @generated +// This file is @generated by prost-build. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct RunPolicy { /// Defines the policy to kill pods after the job completes. Default to None. #[prost(enumeration="CleanPodPolicy", tag="1")] diff --git a/flyteidl/gen/pb_rust/flyteidl.plugins.rs b/flyteidl/gen/pb_rust/flyteidl.plugins.rs index 28c2f77e97..0252c9d882 100644 --- a/flyteidl/gen/pb_rust/flyteidl.plugins.rs +++ b/flyteidl/gen/pb_rust/flyteidl.plugins.rs @@ -1,8 +1,9 @@ // @generated +// This file is @generated by prost-build. /// Describes a job that can process independent pieces of data concurrently. Multiple copies of the runnable component /// will be executed concurrently. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ArrayJob { /// Defines the maximum number of instances to bring up concurrently at any given point. Note that this is an /// optimistic restriction and that, due to network partitioning or other failures, the actual number of currently @@ -20,7 +21,7 @@ pub struct ArrayJob { /// Nested message and enum types in `ArrayJob`. pub mod array_job { #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] +#[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum SuccessCriteria { /// An absolute number of the minimum number of successful completions of subtasks. As soon as this criteria is met, /// the array job will be marked as successful and outputs will be computed. This has to be a non-negative number if @@ -120,7 +121,7 @@ pub struct DaskWorkerGroup { /// MPI operator proposal /// Custom proto for plugin that enables distributed training using #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct DistributedMpiTrainingTask { /// number of worker spawned in the cluster for this job #[prost(int32, tag="1")] @@ -277,7 +278,7 @@ pub struct WorkerGroupSpec { pub ray_start_params: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct SparkApplication { } /// Nested message and enum types in `SparkApplication`. @@ -347,7 +348,7 @@ pub struct SparkJob { } /// Custom proto for plugin that enables distributed training using #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct DistributedTensorflowTrainingTask { /// number of worker replicas spawned in the cluster for this job #[prost(int32, tag="1")] diff --git a/flyteidl/gen/pb_rust/flyteidl.service.rs b/flyteidl/gen/pb_rust/flyteidl.service.rs index 2fb065da4e..8c5a33de9a 100644 --- a/flyteidl/gen/pb_rust/flyteidl.service.rs +++ b/flyteidl/gen/pb_rust/flyteidl.service.rs @@ -1,6 +1,7 @@ // @generated +// This file is @generated by prost-build. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct OAuth2MetadataRequest { } /// OAuth2MetadataResponse defines an RFC-Compliant response for /.well-known/oauth-authorization-server metadata @@ -44,7 +45,7 @@ pub struct OAuth2MetadataResponse { pub device_authorization_endpoint: ::prost::alloc::string::String, } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PublicClientAuthConfigRequest { } /// FlyteClientResponse encapsulates public information that flyte clients (CLIs... etc.) can use to authenticate users. @@ -335,7 +336,7 @@ pub struct TaskDeleteRequest { } /// Response to delete a task. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct TaskDeleteResponse { } /// The state of the execution is used to control its visibility in the UI/CLI. @@ -375,7 +376,7 @@ impl State { } } #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct UserInfoRequest { } /// See the OpenID Connect spec at for more information. From a86feb002792538d8b82830e22d53cb5ae352816 Mon Sep 17 00:00:00 2001 From: Flyte Bot Date: Fri, 23 Aug 2024 16:07:24 -0700 Subject: [PATCH 42/65] Update Flyte components - v1.13.1-rc1 (#5691) * Update Flyte Components Signed-off-by: Flyte-Bot * Add changelog and bump version in conf.py Signed-off-by: Eduardo Apolinario --------- Signed-off-by: Flyte-Bot Signed-off-by: Eduardo Apolinario Co-authored-by: eapolinario Signed-off-by: Bugra Gedik --- CHANGELOG/CHANGELOG-v1.13.1-rc1.md | 35 ++++++++++++++++++ charts/flyte-binary/README.md | 2 +- charts/flyte-binary/values.yaml | 2 +- charts/flyte-core/README.md | 12 +++---- charts/flyte-core/values.yaml | 10 +++--- charts/flyte/README.md | 16 ++++----- charts/flyte/values.yaml | 10 +++--- charts/flyteagent/README.md | 2 +- charts/flyteagent/values.yaml | 2 +- .../agent/flyte_agent_helm_generated.yaml | 2 +- .../flyte_aws_scheduler_helm_generated.yaml | 30 ++++++++-------- .../flyte_helm_controlplane_generated.yaml | 20 +++++------ .../eks/flyte_helm_dataplane_generated.yaml | 14 ++++---- deployment/eks/flyte_helm_generated.yaml | 34 +++++++++--------- .../flyte_helm_controlplane_generated.yaml | 20 +++++------ .../gcp/flyte_helm_dataplane_generated.yaml | 14 ++++---- deployment/gcp/flyte_helm_generated.yaml | 34 +++++++++--------- .../flyte_sandbox_binary_helm_generated.yaml | 4 +-- deployment/sandbox/flyte_helm_generated.yaml | 34 +++++++++--------- .../manifests/complete-agent.yaml | 10 +++--- .../sandbox-bundled/manifests/complete.yaml | 8 ++--- docker/sandbox-bundled/manifests/dev.yaml | 4 +-- docs/conf.py | 2 +- .../generated/flyteadmin_config.rst | 36 +++++++++++++++++++ .../generated/flytepropeller_config.rst | 36 +++++++++++++++++++ .../generated/scheduler_config.rst | 36 +++++++++++++++++++ 26 files changed, 286 insertions(+), 143 deletions(-) create mode 100644 CHANGELOG/CHANGELOG-v1.13.1-rc1.md diff --git a/CHANGELOG/CHANGELOG-v1.13.1-rc1.md b/CHANGELOG/CHANGELOG-v1.13.1-rc1.md new file mode 100644 index 0000000000..b9b3099f39 --- /dev/null +++ b/CHANGELOG/CHANGELOG-v1.13.1-rc1.md @@ -0,0 +1,35 @@ +# Flyte v1.13.0-rc1 Release Notes + +## What's Changed +* Add CustomHeaderMatcher to pass additional headers by @andrewwdye in https://github.com/flyteorg/flyte/pull/5563 +* Turn flyteidl and flytectl releases into manual gh workflows by @eapolinario in https://github.com/flyteorg/flyte/pull/5635 +* docs: fix typo by @cratiu222 in https://github.com/flyteorg/flyte/pull/5643 +* Use enable_deck=True in docs by @thomasjpfan in https://github.com/flyteorg/flyte/pull/5645 +* Fix flyteidl release checkout all tags by @eapolinario in https://github.com/flyteorg/flyte/pull/5646 +* Install pyarrow in sandbox functional tests by @eapolinario in https://github.com/flyteorg/flyte/pull/5647 +* docs: add documentation for configuring notifications in GCP by @desihsu in https://github.com/flyteorg/flyte/pull/5545 +* Correct "sucessfile" to "successfile" by @shengyu7697 in https://github.com/flyteorg/flyte/pull/5652 +* Fix ordering for custom template values in cluster resource controller by @katrogan in https://github.com/flyteorg/flyte/pull/5648 +* Don't error when attempting to trigger schedules for inactive projects by @katrogan in https://github.com/flyteorg/flyte/pull/5649 +* Update Flyte components - v1.13.1-rc0 by @flyte-bot in https://github.com/flyteorg/flyte/pull/5656 +* Add offloaded path to literal by @katrogan in https://github.com/flyteorg/flyte/pull/5660 +* Improve error messaging for invalid arguments by @pingsutw in https://github.com/flyteorg/flyte/pull/5658 +* DOC-462 Update "Try Flyte in the browser" text by @neverett in https://github.com/flyteorg/flyte/pull/5654 +* DOC-533 Remove outdated duplicate notification config content by @neverett in https://github.com/flyteorg/flyte/pull/5672 +* Validate labels before creating flyte CRD by @pingsutw in https://github.com/flyteorg/flyte/pull/5671 +* Add FLYTE_INTERNAL_POD_NAME environment variable that holds the pod name by @bgedik in https://github.com/flyteorg/flyte/pull/5616 +* Upstream Using InMemory token cache for admin clientset in propeller by @pvditt in https://github.com/flyteorg/flyte/pull/5621 +* [Bug] Update resource failures w/ Finalizers set (#423) by @pvditt in https://github.com/flyteorg/flyte/pull/5673 +* [BUG] array node eventing bump version by @pvditt in https://github.com/flyteorg/flyte/pull/5680 +* Add custominfo to agents by @ddl-rliu in https://github.com/flyteorg/flyte/pull/5604 +* [BUG] use deep copy of bit arrays when getting array node state by @pvditt in https://github.com/flyteorg/flyte/pull/5681 +* More concise definition of launchplan by @eapolinario in https://github.com/flyteorg/flyte/pull/5682 +* Auth/prevent lookup per call by @wild-endeavor in https://github.com/flyteorg/flyte/pull/5686 + +## New Contributors +* @cratiu222 made their first contribution in https://github.com/flyteorg/flyte/pull/5643 +* @desihsu made their first contribution in https://github.com/flyteorg/flyte/pull/5545 +* @shengyu7697 made their first contribution in https://github.com/flyteorg/flyte/pull/5652 +* @bgedik made their first contribution in https://github.com/flyteorg/flyte/pull/5616 + +**Full Changelog**: https://github.com/flyteorg/flyte/compare/flytectl/v0.9.1...v1.13.1-rc1 diff --git a/charts/flyte-binary/README.md b/charts/flyte-binary/README.md index 64feee5d89..350391fd53 100644 --- a/charts/flyte-binary/README.md +++ b/charts/flyte-binary/README.md @@ -42,7 +42,7 @@ Chart for basic single Flyte executable deployment | configuration.auth.oidc.clientId | string | `""` | | | configuration.auth.oidc.clientSecret | string | `""` | | | configuration.co-pilot.image.repository | string | `"cr.flyte.org/flyteorg/flytecopilot"` | | -| configuration.co-pilot.image.tag | string | `"v1.13.1-rc0"` | | +| configuration.co-pilot.image.tag | string | `"v1.13.1-rc1"` | | | configuration.database.dbname | string | `"flyte"` | | | configuration.database.host | string | `"127.0.0.1"` | | | configuration.database.options | string | `"sslmode=disable"` | | diff --git a/charts/flyte-binary/values.yaml b/charts/flyte-binary/values.yaml index b3ea6877f7..8821126da2 100644 --- a/charts/flyte-binary/values.yaml +++ b/charts/flyte-binary/values.yaml @@ -159,7 +159,7 @@ configuration: # repository CoPilot sidecar image repository repository: cr.flyte.org/flyteorg/flytecopilot # FLYTECOPILOT_IMAGE # tag CoPilot sidecar image tag - tag: v1.13.1-rc0 # FLYTECOPILOT_TAG + tag: v1.13.1-rc1 # FLYTECOPILOT_TAG # agentService Flyte Agent configuration agentService: defaultAgent: diff --git a/charts/flyte-core/README.md b/charts/flyte-core/README.md index cf4f511a2e..14b938d2cf 100644 --- a/charts/flyte-core/README.md +++ b/charts/flyte-core/README.md @@ -95,8 +95,8 @@ helm install gateway bitnami/contour -n flyte | configmap.clusters.clusterConfigs | list | `[]` | | | configmap.clusters.labelClusterMap | object | `{}` | | | configmap.console | object | `{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"}` | Configuration for Flyte console UI | -| configmap.copilot | object | `{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0","name":"flyte-copilot-","start-timeout":"30s"}}}}` | Copilot configuration | -| configmap.copilot.plugins.k8s.co-pilot | object | `{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0","name":"flyte-copilot-","start-timeout":"30s"}` | Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) | +| configmap.copilot | object | `{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}}}}` | Copilot configuration | +| configmap.copilot.plugins.k8s.co-pilot | object | `{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}` | Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) | | configmap.core | object | `{"manager":{"pod-application":"flytepropeller","pod-template-container-name":"flytepropeller","pod-template-name":"flytepropeller-template"},"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}}` | Core propeller configuration | | configmap.core.manager | object | `{"pod-application":"flytepropeller","pod-template-container-name":"flytepropeller","pod-template-name":"flytepropeller-template"}` | follows the structure specified [here](https://pkg.go.dev/github.com/flyteorg/flytepropeller/manager/config#Config). | | configmap.core.propeller | object | `{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"}` | follows the structure specified [here](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/config). | @@ -130,7 +130,7 @@ helm install gateway bitnami/contour -n flyte | datacatalog.extraArgs | object | `{}` | Appends extra command line arguments to the main command | | datacatalog.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | datacatalog.image.repository | string | `"cr.flyte.org/flyteorg/datacatalog"` | Docker image for Datacatalog deployment | -| datacatalog.image.tag | string | `"v1.13.1-rc0"` | Docker image tag | +| datacatalog.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | | datacatalog.nodeSelector | object | `{}` | nodeSelector for Datacatalog deployment | | datacatalog.podAnnotations | object | `{}` | Annotations for Datacatalog pods | | datacatalog.podEnv | object | `{}` | Additional Datacatalog container environment variables | @@ -166,7 +166,7 @@ helm install gateway bitnami/contour -n flyte | flyteadmin.extraArgs | object | `{}` | Appends extra command line arguments to the serve command | | flyteadmin.image.pullPolicy | string | `"IfNotPresent"` | | | flyteadmin.image.repository | string | `"cr.flyte.org/flyteorg/flyteadmin"` | Docker image for Flyteadmin deployment | -| flyteadmin.image.tag | string | `"v1.13.1-rc0"` | | +| flyteadmin.image.tag | string | `"v1.13.1-rc1"` | | | flyteadmin.initialProjects | list | `["flytesnacks","flytetester","flyteexamples"]` | Initial projects to create | | flyteadmin.nodeSelector | object | `{}` | nodeSelector for Flyteadmin deployment | | flyteadmin.podAnnotations | object | `{}` | Annotations for Flyteadmin pods | @@ -238,7 +238,7 @@ helm install gateway bitnami/contour -n flyte | flytepropeller.extraArgs | object | `{}` | Appends extra command line arguments to the main command | | flytepropeller.image.pullPolicy | string | `"IfNotPresent"` | | | flytepropeller.image.repository | string | `"cr.flyte.org/flyteorg/flytepropeller"` | Docker image for Flytepropeller deployment | -| flytepropeller.image.tag | string | `"v1.13.1-rc0"` | | +| flytepropeller.image.tag | string | `"v1.13.1-rc1"` | | | flytepropeller.manager | bool | `false` | | | flytepropeller.nodeSelector | object | `{}` | nodeSelector for Flytepropeller deployment | | flytepropeller.podAnnotations | object | `{}` | Annotations for Flytepropeller pods | @@ -270,7 +270,7 @@ helm install gateway bitnami/contour -n flyte | flytescheduler.configPath | string | `"/etc/flyte/config/*.yaml"` | Default regex string for searching configuration files | | flytescheduler.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flytescheduler.image.repository | string | `"cr.flyte.org/flyteorg/flytescheduler"` | Docker image for Flytescheduler deployment | -| flytescheduler.image.tag | string | `"v1.13.1-rc0"` | Docker image tag | +| flytescheduler.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | | flytescheduler.nodeSelector | object | `{}` | nodeSelector for Flytescheduler deployment | | flytescheduler.podAnnotations | object | `{}` | Annotations for Flytescheduler pods | | flytescheduler.podEnv | object | `{}` | Additional Flytescheduler container environment variables | diff --git a/charts/flyte-core/values.yaml b/charts/flyte-core/values.yaml index 2c9e02427e..4462372d95 100755 --- a/charts/flyte-core/values.yaml +++ b/charts/flyte-core/values.yaml @@ -16,7 +16,7 @@ flyteadmin: image: # -- Docker image for Flyteadmin deployment repository: cr.flyte.org/flyteorg/flyteadmin # FLYTEADMIN_IMAGE - tag: v1.13.1-rc0 # FLYTEADMIN_TAG + tag: v1.13.1-rc1 # FLYTEADMIN_TAG pullPolicy: IfNotPresent # -- Additional flyteadmin container environment variables # @@ -144,7 +144,7 @@ flytescheduler: # -- Docker image for Flytescheduler deployment repository: cr.flyte.org/flyteorg/flytescheduler # FLYTESCHEDULER_IMAGE # -- Docker image tag - tag: v1.13.1-rc0 # FLYTESCHEDULER_TAG + tag: v1.13.1-rc1 # FLYTESCHEDULER_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Flytescheduler deployment @@ -210,7 +210,7 @@ datacatalog: # -- Docker image for Datacatalog deployment repository: cr.flyte.org/flyteorg/datacatalog # DATACATALOG_IMAGE # -- Docker image tag - tag: v1.13.1-rc0 # DATACATALOG_TAG + tag: v1.13.1-rc1 # DATACATALOG_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Datacatalog deployment @@ -309,7 +309,7 @@ flytepropeller: image: # -- Docker image for Flytepropeller deployment repository: cr.flyte.org/flyteorg/flytepropeller # FLYTEPROPELLER_IMAGE - tag: v1.13.1-rc0 # FLYTEPROPELLER_TAG + tag: v1.13.1-rc1 # FLYTEPROPELLER_TAG pullPolicy: IfNotPresent # -- Default resources requests and limits for Flytepropeller deployment resources: @@ -801,7 +801,7 @@ configmap: # -- Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) co-pilot: name: flyte-copilot- - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 # FLYTECOPILOT_IMAGE + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 # FLYTECOPILOT_IMAGE start-timeout: 30s # -- Core propeller configuration diff --git a/charts/flyte/README.md b/charts/flyte/README.md index 321b9e22a3..4a3a911d00 100644 --- a/charts/flyte/README.md +++ b/charts/flyte/README.md @@ -71,7 +71,7 @@ helm upgrade -f values-sandbox.yaml flyte . | contour.tolerations | list | `[]` | tolerations for Contour deployment | | daskoperator | object | `{"enabled":false}` | Optional: Dask Plugin using the Dask Operator | | daskoperator.enabled | bool | `false` | - enable or disable the dask operator deployment installation | -| flyte | object | `{"cluster_resource_manager":{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]},"common":{"databaseSecret":{"name":"","secretManifest":{}},"flyteNamespaceTemplate":{"enabled":false},"ingress":{"albSSLRedirect":false,"annotations":{"nginx.ingress.kubernetes.io/app-root":"/console"},"enabled":true,"host":"","separateGrpcIngress":false,"separateGrpcIngressAnnotations":{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"},"tls":{"enabled":false},"webpackHMR":true}},"configmap":{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}},"datacatalog":{"affinity":{},"configPath":"/etc/datacatalog/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/datacatalog","tag":"v1.13.1-rc0"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"NodePort"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"db":{"admin":{"database":{"dbname":"flyteadmin","host":"postgres","port":5432,"username":"postgres"}},"datacatalog":{"database":{"dbname":"datacatalog","host":"postgres","port":5432,"username":"postgres"}}},"deployRedoc":true,"flyteadmin":{"additionalVolumeMounts":[],"additionalVolumes":[],"affinity":{},"configPath":"/etc/flyte/config/*.yaml","env":[],"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteadmin","tag":"v1.13.1-rc0"},"initialProjects":["flytesnacks","flytetester","flyteexamples"],"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"loadBalancerSourceRanges":[],"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flyteconsole":{"affinity":{},"ga":{"enabled":true,"tracking_id":"G-0QW4DJWJ20"},"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteconsole","tag":"v1.17.1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","memory":"275Mi"},"requests":{"cpu":"10m","memory":"250Mi"}},"service":{"annotations":{},"type":"ClusterIP"},"tolerations":[]},"flytepropeller":{"affinity":{},"cacheSizeMbs":0,"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytepropeller","tag":"v1.13.1-rc0"},"manager":false,"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"200m","ephemeral-storage":"100Mi","memory":"200Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flytescheduler":{"affinity":{},"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytescheduler","tag":"v1.13.1-rc0"},"nodeSelector":{},"podAnnotations":{},"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"storage":{"bucketName":"my-s3-bucket","custom":{},"gcs":null,"s3":{"region":"us-east-1"},"type":"sandbox"},"webhook":{"enabled":true,"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]}},"workflow_notifications":{"config":{},"enabled":false},"workflow_scheduler":{"enabled":true,"type":"native"}}` | ------------------------------------------------------------------- Core System settings This section consists of Core components of Flyte and their deployment settings. This includes FlyteAdmin service, Datacatalog, FlytePropeller and Flyteconsole | +| flyte | object | `{"cluster_resource_manager":{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]},"common":{"databaseSecret":{"name":"","secretManifest":{}},"flyteNamespaceTemplate":{"enabled":false},"ingress":{"albSSLRedirect":false,"annotations":{"nginx.ingress.kubernetes.io/app-root":"/console"},"enabled":true,"host":"","separateGrpcIngress":false,"separateGrpcIngressAnnotations":{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"},"tls":{"enabled":false},"webpackHMR":true}},"configmap":{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}},"datacatalog":{"affinity":{},"configPath":"/etc/datacatalog/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/datacatalog","tag":"v1.13.1-rc1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"NodePort"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"db":{"admin":{"database":{"dbname":"flyteadmin","host":"postgres","port":5432,"username":"postgres"}},"datacatalog":{"database":{"dbname":"datacatalog","host":"postgres","port":5432,"username":"postgres"}}},"deployRedoc":true,"flyteadmin":{"additionalVolumeMounts":[],"additionalVolumes":[],"affinity":{},"configPath":"/etc/flyte/config/*.yaml","env":[],"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteadmin","tag":"v1.13.1-rc1"},"initialProjects":["flytesnacks","flytetester","flyteexamples"],"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"loadBalancerSourceRanges":[],"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flyteconsole":{"affinity":{},"ga":{"enabled":true,"tracking_id":"G-0QW4DJWJ20"},"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteconsole","tag":"v1.17.1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","memory":"275Mi"},"requests":{"cpu":"10m","memory":"250Mi"}},"service":{"annotations":{},"type":"ClusterIP"},"tolerations":[]},"flytepropeller":{"affinity":{},"cacheSizeMbs":0,"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytepropeller","tag":"v1.13.1-rc1"},"manager":false,"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"200m","ephemeral-storage":"100Mi","memory":"200Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flytescheduler":{"affinity":{},"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytescheduler","tag":"v1.13.1-rc1"},"nodeSelector":{},"podAnnotations":{},"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"storage":{"bucketName":"my-s3-bucket","custom":{},"gcs":null,"s3":{"region":"us-east-1"},"type":"sandbox"},"webhook":{"enabled":true,"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]}},"workflow_notifications":{"config":{},"enabled":false},"workflow_scheduler":{"enabled":true,"type":"native"}}` | ------------------------------------------------------------------- Core System settings This section consists of Core components of Flyte and their deployment settings. This includes FlyteAdmin service, Datacatalog, FlytePropeller and Flyteconsole | | flyte.cluster_resource_manager | object | `{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]}` | Configuration for the Cluster resource manager component. This is an optional component, that enables automatic cluster configuration. This is useful to set default quotas, manage namespaces etc that map to a project/domain | | flyte.cluster_resource_manager.config.cluster_resources | object | `{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}` | ClusterResource parameters Refer to the [structure](https://pkg.go.dev/github.com/lyft/flyteadmin@v0.3.37/pkg/runtime/interfaces#ClusterResourceConfig) to customize. | | flyte.cluster_resource_manager.config.cluster_resources.standaloneDeployment | bool | `false` | Starts the cluster resource manager in standalone mode with requisite auth credentials to call flyteadmin service endpoints | @@ -91,15 +91,15 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.common.ingress.separateGrpcIngressAnnotations | object | `{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"}` | - Extra Ingress annotations applied only to the GRPC ingress. Only makes sense if `separateGrpcIngress` is enabled. | | flyte.common.ingress.tls | object | `{"enabled":false}` | - TLS Settings | | flyte.common.ingress.webpackHMR | bool | `true` | - Enable or disable HMR route to flyteconsole. This is useful only for frontend development. | -| flyte.configmap | object | `{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}}` | ----------------------------------------------------------------- CONFIGMAPS SETTINGS | +| flyte.configmap | object | `{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}}` | ----------------------------------------------------------------- CONFIGMAPS SETTINGS | | flyte.configmap.adminServer | object | `{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}}` | FlyteAdmin server configuration | | flyte.configmap.adminServer.auth | object | `{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}}` | Authentication configuration | | flyte.configmap.adminServer.server.security.secure | bool | `false` | Controls whether to serve requests over SSL/TLS. | | flyte.configmap.adminServer.server.security.useAuth | bool | `false` | Controls whether to enforce authentication. Follow the guide in https://docs.flyte.org/ on how to setup authentication. | | flyte.configmap.catalog | object | `{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}}` | Catalog Client configuration [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/catalog#Config) Additional advanced Catalog configuration [here](https://pkg.go.dev/github.com/lyft/flyteplugins/go/tasks/pluginmachinery/catalog#Config) | | flyte.configmap.console | object | `{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"}` | Configuration for Flyte console UI | -| flyte.configmap.copilot | object | `{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0","name":"flyte-copilot-","start-timeout":"30s"}}}}` | Copilot configuration | -| flyte.configmap.copilot.plugins.k8s.co-pilot | object | `{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0","name":"flyte-copilot-","start-timeout":"30s"}` | Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) | +| flyte.configmap.copilot | object | `{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}}}}` | Copilot configuration | +| flyte.configmap.copilot.plugins.k8s.co-pilot | object | `{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}` | Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) | | flyte.configmap.core | object | `{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}}` | Core propeller configuration | | flyte.configmap.core.propeller | object | `{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"}` | follows the structure specified [here](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/config). | | flyte.configmap.datacatalogServer | object | `{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}}` | Datacatalog server config | @@ -120,7 +120,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.datacatalog.configPath | string | `"/etc/datacatalog/config/*.yaml"` | Default regex string for searching configuration files | | flyte.datacatalog.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flyte.datacatalog.image.repository | string | `"cr.flyte.org/flyteorg/datacatalog"` | Docker image for Datacatalog deployment | -| flyte.datacatalog.image.tag | string | `"v1.13.1-rc0"` | Docker image tag | +| flyte.datacatalog.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | | flyte.datacatalog.nodeSelector | object | `{}` | nodeSelector for Datacatalog deployment | | flyte.datacatalog.podAnnotations | object | `{}` | Annotations for Datacatalog pods | | flyte.datacatalog.replicaCount | int | `1` | Replicas count for Datacatalog deployment | @@ -136,7 +136,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.flyteadmin.env | list | `[]` | Additional flyteadmin container environment variables e.g. SendGrid's API key - name: SENDGRID_API_KEY value: "" e.g. secret environment variable (you can combine it with .additionalVolumes): - name: SENDGRID_API_KEY valueFrom: secretKeyRef: name: sendgrid-secret key: api_key | | flyte.flyteadmin.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flyte.flyteadmin.image.repository | string | `"cr.flyte.org/flyteorg/flyteadmin"` | Docker image for Flyteadmin deployment | -| flyte.flyteadmin.image.tag | string | `"v1.13.1-rc0"` | Docker image tag | +| flyte.flyteadmin.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | | flyte.flyteadmin.initialProjects | list | `["flytesnacks","flytetester","flyteexamples"]` | Initial projects to create | | flyte.flyteadmin.nodeSelector | object | `{}` | nodeSelector for Flyteadmin deployment | | flyte.flyteadmin.podAnnotations | object | `{}` | Annotations for Flyteadmin pods | @@ -162,7 +162,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.flytepropeller.configPath | string | `"/etc/flyte/config/*.yaml"` | Default regex string for searching configuration files | | flyte.flytepropeller.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flyte.flytepropeller.image.repository | string | `"cr.flyte.org/flyteorg/flytepropeller"` | Docker image for Flytepropeller deployment | -| flyte.flytepropeller.image.tag | string | `"v1.13.1-rc0"` | Docker image tag | +| flyte.flytepropeller.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | | flyte.flytepropeller.nodeSelector | object | `{}` | nodeSelector for Flytepropeller deployment | | flyte.flytepropeller.podAnnotations | object | `{}` | Annotations for Flytepropeller pods | | flyte.flytepropeller.replicaCount | int | `1` | Replicas count for Flytepropeller deployment | @@ -176,7 +176,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.flytescheduler.configPath | string | `"/etc/flyte/config/*.yaml"` | Default regex string for searching configuration files | | flyte.flytescheduler.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flyte.flytescheduler.image.repository | string | `"cr.flyte.org/flyteorg/flytescheduler"` | Docker image for Flytescheduler deployment | -| flyte.flytescheduler.image.tag | string | `"v1.13.1-rc0"` | Docker image tag | +| flyte.flytescheduler.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | | flyte.flytescheduler.nodeSelector | object | `{}` | nodeSelector for Flytescheduler deployment | | flyte.flytescheduler.podAnnotations | object | `{}` | Annotations for Flytescheduler pods | | flyte.flytescheduler.resources | object | `{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}}` | Default resources requests and limits for Flytescheduler deployment | diff --git a/charts/flyte/values.yaml b/charts/flyte/values.yaml index acd3df9050..6444f2d334 100755 --- a/charts/flyte/values.yaml +++ b/charts/flyte/values.yaml @@ -15,7 +15,7 @@ flyte: # -- Docker image for Flyteadmin deployment repository: cr.flyte.org/flyteorg/flyteadmin # FLYTEADMIN_IMAGE # -- Docker image tag - tag: v1.13.1-rc0 # FLYTEADMIN_TAG + tag: v1.13.1-rc1 # FLYTEADMIN_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Additional flyteadmin container environment variables @@ -83,7 +83,7 @@ flyte: # -- Docker image for Flytescheduler deployment repository: cr.flyte.org/flyteorg/flytescheduler # FLYTESCHEDULER_IMAGE # -- Docker image tag - tag: v1.13.1-rc0 # FLYTESCHEDULER_TAG + tag: v1.13.1-rc1 # FLYTESCHEDULER_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Flytescheduler deployment @@ -128,7 +128,7 @@ flyte: # -- Docker image for Datacatalog deployment repository: cr.flyte.org/flyteorg/datacatalog # DATACATALOG_IMAGE # -- Docker image tag - tag: v1.13.1-rc0 # DATACATALOG_TAG + tag: v1.13.1-rc1 # DATACATALOG_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Datacatalog deployment @@ -177,7 +177,7 @@ flyte: # -- Docker image for Flytepropeller deployment repository: cr.flyte.org/flyteorg/flytepropeller # FLYTEPROPELLER_IMAGE # -- Docker image tag - tag: v1.13.1-rc0 # FLYTEPROPELLER_TAG + tag: v1.13.1-rc1 # FLYTEPROPELLER_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Flytepropeller deployment @@ -471,7 +471,7 @@ flyte: # -- Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) co-pilot: name: flyte-copilot- - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 # FLYTECOPILOT_IMAGE + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 # FLYTECOPILOT_IMAGE start-timeout: 30s # -- Core propeller configuration diff --git a/charts/flyteagent/README.md b/charts/flyteagent/README.md index e6851b5758..6bd7b056c4 100644 --- a/charts/flyteagent/README.md +++ b/charts/flyteagent/README.md @@ -20,7 +20,7 @@ A Helm chart for Flyte agent | fullnameOverride | string | `""` | | | image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | image.repository | string | `"cr.flyte.org/flyteorg/flyteagent"` | Docker image for flyteagent deployment | -| image.tag | string | `"1.13.3"` | Docker image tag | +| image.tag | string | `"1.13.4"` | Docker image tag | | nameOverride | string | `""` | | | nodeSelector | object | `{}` | nodeSelector for flyteagent deployment | | podAnnotations | object | `{}` | Annotations for flyteagent pods | diff --git a/charts/flyteagent/values.yaml b/charts/flyteagent/values.yaml index ce23995df0..845248af90 100755 --- a/charts/flyteagent/values.yaml +++ b/charts/flyteagent/values.yaml @@ -23,7 +23,7 @@ image: # -- Docker image for flyteagent deployment repository: cr.flyte.org/flyteorg/flyteagent # FLYTEAGENT_IMAGE # -- Docker image tag - tag: 1.13.3 # FLYTEAGENT_TAG + tag: 1.13.4 # FLYTEAGENT_TAG # -- Docker image pull policy pullPolicy: IfNotPresent ports: diff --git a/deployment/agent/flyte_agent_helm_generated.yaml b/deployment/agent/flyte_agent_helm_generated.yaml index c8244070d5..53fe9016c2 100644 --- a/deployment/agent/flyte_agent_helm_generated.yaml +++ b/deployment/agent/flyte_agent_helm_generated.yaml @@ -79,7 +79,7 @@ spec: - pyflyte - serve - agent - image: "cr.flyte.org/flyteorg/flyteagent:1.13.3" + image: "cr.flyte.org/flyteorg/flyteagent:1.13.4" imagePullPolicy: "IfNotPresent" name: flyteagent volumeMounts: diff --git a/deployment/eks/flyte_aws_scheduler_helm_generated.yaml b/deployment/eks/flyte_aws_scheduler_helm_generated.yaml index 7e2f9fa395..2468bf049c 100644 --- a/deployment/eks/flyte_aws_scheduler_helm_generated.yaml +++ b/deployment/eks/flyte_aws_scheduler_helm_generated.yaml @@ -430,7 +430,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -876,7 +876,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -897,7 +897,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -915,7 +915,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -932,7 +932,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -959,7 +959,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -1066,7 +1066,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -1196,7 +1196,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -1214,7 +1214,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1277,7 +1277,7 @@ spec: template: metadata: annotations: - configChecksum: "de81ec89079e3abbbc351f1e9dd5f918ac37e7a302dfe32a4ce4da1083980cd" + configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1305,7 +1305,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -1359,9 +1359,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc0 + app.kubernetes.io/version: v1.13.1-rc1 annotations: - configChecksum: "de81ec89079e3abbbc351f1e9dd5f918ac37e7a302dfe32a4ce4da1083980cd" + configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -1375,7 +1375,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -1402,7 +1402,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/eks/flyte_helm_controlplane_generated.yaml b/deployment/eks/flyte_helm_controlplane_generated.yaml index b6fd465b34..4f98e96224 100644 --- a/deployment/eks/flyte_helm_controlplane_generated.yaml +++ b/deployment/eks/flyte_helm_controlplane_generated.yaml @@ -581,7 +581,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -602,7 +602,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -620,7 +620,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -637,7 +637,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -664,7 +664,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -771,7 +771,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -901,7 +901,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -919,7 +919,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1002,7 +1002,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -1022,7 +1022,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: diff --git a/deployment/eks/flyte_helm_dataplane_generated.yaml b/deployment/eks/flyte_helm_dataplane_generated.yaml index 421c16dae3..d4780f1f25 100644 --- a/deployment/eks/flyte_helm_dataplane_generated.yaml +++ b/deployment/eks/flyte_helm_dataplane_generated.yaml @@ -94,7 +94,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -428,7 +428,7 @@ spec: template: metadata: annotations: - configChecksum: "de81ec89079e3abbbc351f1e9dd5f918ac37e7a302dfe32a4ce4da1083980cd" + configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -456,7 +456,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -510,9 +510,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc0 + app.kubernetes.io/version: v1.13.1-rc1 annotations: - configChecksum: "de81ec89079e3abbbc351f1e9dd5f918ac37e7a302dfe32a4ce4da1083980cd" + configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -526,7 +526,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -553,7 +553,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/eks/flyte_helm_generated.yaml b/deployment/eks/flyte_helm_generated.yaml index 1e9d9a5bf1..db89ef2cf0 100644 --- a/deployment/eks/flyte_helm_generated.yaml +++ b/deployment/eks/flyte_helm_generated.yaml @@ -461,7 +461,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -907,7 +907,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -928,7 +928,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -946,7 +946,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -963,7 +963,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -990,7 +990,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -1097,7 +1097,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -1227,7 +1227,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -1245,7 +1245,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1328,7 +1328,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -1348,7 +1348,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: @@ -1407,7 +1407,7 @@ spec: template: metadata: annotations: - configChecksum: "de81ec89079e3abbbc351f1e9dd5f918ac37e7a302dfe32a4ce4da1083980cd" + configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1435,7 +1435,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -1489,9 +1489,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc0 + app.kubernetes.io/version: v1.13.1-rc1 annotations: - configChecksum: "de81ec89079e3abbbc351f1e9dd5f918ac37e7a302dfe32a4ce4da1083980cd" + configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -1505,7 +1505,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -1532,7 +1532,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/gcp/flyte_helm_controlplane_generated.yaml b/deployment/gcp/flyte_helm_controlplane_generated.yaml index 7d999c54fa..aa84954510 100644 --- a/deployment/gcp/flyte_helm_controlplane_generated.yaml +++ b/deployment/gcp/flyte_helm_controlplane_generated.yaml @@ -596,7 +596,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -617,7 +617,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -635,7 +635,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -652,7 +652,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -679,7 +679,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -786,7 +786,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -916,7 +916,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -934,7 +934,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1017,7 +1017,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -1037,7 +1037,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: diff --git a/deployment/gcp/flyte_helm_dataplane_generated.yaml b/deployment/gcp/flyte_helm_dataplane_generated.yaml index a189c612a2..3d8f70b15b 100644 --- a/deployment/gcp/flyte_helm_dataplane_generated.yaml +++ b/deployment/gcp/flyte_helm_dataplane_generated.yaml @@ -94,7 +94,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -436,7 +436,7 @@ spec: template: metadata: annotations: - configChecksum: "407e317f4b09b0311b506d7bf60e23bd6e98acd501f0301c2d78d71ea108983" + configChecksum: "f652cb79ec4760da1c900d5bf0c530a8d05b34380fc1be967080d35945bc3b1" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -463,7 +463,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -517,9 +517,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc0 + app.kubernetes.io/version: v1.13.1-rc1 annotations: - configChecksum: "407e317f4b09b0311b506d7bf60e23bd6e98acd501f0301c2d78d71ea108983" + configChecksum: "f652cb79ec4760da1c900d5bf0c530a8d05b34380fc1be967080d35945bc3b1" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -533,7 +533,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -560,7 +560,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/gcp/flyte_helm_generated.yaml b/deployment/gcp/flyte_helm_generated.yaml index c51e6972ce..9f501cf916 100644 --- a/deployment/gcp/flyte_helm_generated.yaml +++ b/deployment/gcp/flyte_helm_generated.yaml @@ -474,7 +474,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -930,7 +930,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -951,7 +951,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -969,7 +969,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -986,7 +986,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -1013,7 +1013,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -1120,7 +1120,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -1250,7 +1250,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -1268,7 +1268,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1351,7 +1351,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -1371,7 +1371,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: @@ -1430,7 +1430,7 @@ spec: template: metadata: annotations: - configChecksum: "407e317f4b09b0311b506d7bf60e23bd6e98acd501f0301c2d78d71ea108983" + configChecksum: "f652cb79ec4760da1c900d5bf0c530a8d05b34380fc1be967080d35945bc3b1" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1457,7 +1457,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -1511,9 +1511,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc0 + app.kubernetes.io/version: v1.13.1-rc1 annotations: - configChecksum: "407e317f4b09b0311b506d7bf60e23bd6e98acd501f0301c2d78d71ea108983" + configChecksum: "f652cb79ec4760da1c900d5bf0c530a8d05b34380fc1be967080d35945bc3b1" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -1527,7 +1527,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -1554,7 +1554,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml b/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml index 74a37957ea..ef73b3f145 100644 --- a/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml +++ b/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml @@ -116,7 +116,7 @@ data: stackdriver-enabled: false k8s: co-pilot: - image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1" k8s-array: logs: config: @@ -359,7 +359,7 @@ spec: app.kubernetes.io/instance: flyte app.kubernetes.io/component: flyte-binary annotations: - checksum/configuration: 58c26a7a95c9edce075726e132dac345f0aafb69dea2b21f6445dc2615ee61fe + checksum/configuration: 2761c022d974c8ebb20ba44c0363cd80e46e2afd24ea916ebfe6b0f242f0418e checksum/configuration-secret: d5d93f4e67780b21593dc3799f0f6682aab0765e708e4020939975d14d44f929 checksum/cluster-resource-templates: 7dfa59f3d447e9c099b8f8ffad3af466fecbc9cf9f8c97295d9634254a55d4ae spec: diff --git a/deployment/sandbox/flyte_helm_generated.yaml b/deployment/sandbox/flyte_helm_generated.yaml index 28ab47df17..f1f71817b6 100644 --- a/deployment/sandbox/flyte_helm_generated.yaml +++ b/deployment/sandbox/flyte_helm_generated.yaml @@ -586,7 +586,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -6714,7 +6714,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -6734,7 +6734,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -6751,7 +6751,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -6767,7 +6767,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -6794,7 +6794,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -6891,7 +6891,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -7016,7 +7016,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -7033,7 +7033,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -7106,7 +7106,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -7125,7 +7125,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: @@ -7181,7 +7181,7 @@ spec: template: metadata: annotations: - configChecksum: "87f8dd83145c058839fbf440c688d131d5917282ae935b2fe02147df47ef3a7" + configChecksum: "b6a325d0de65783cfab97909bc7202fef4c3efc85edd2b95b5076e3681938f1" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -7208,7 +7208,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -7255,9 +7255,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc0 + app.kubernetes.io/version: v1.13.1-rc1 annotations: - configChecksum: "87f8dd83145c058839fbf440c688d131d5917282ae935b2fe02147df47ef3a7" + configChecksum: "b6a325d0de65783cfab97909bc7202fef4c3efc85edd2b95b5076e3681938f1" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -7271,7 +7271,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -7298,7 +7298,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/docker/sandbox-bundled/manifests/complete-agent.yaml b/docker/sandbox-bundled/manifests/complete-agent.yaml index 9e10ae09fb..3ba7075df8 100644 --- a/docker/sandbox-bundled/manifests/complete-agent.yaml +++ b/docker/sandbox-bundled/manifests/complete-agent.yaml @@ -469,7 +469,7 @@ data: stackdriver-enabled: false k8s: co-pilot: - image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1" k8s-array: logs: config: @@ -817,7 +817,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: cEJlTDl0bXN6NVE4ZEdFag== + haSharedSecret: dkJZdnpKQ0FYZkhWano2eg== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1248,7 +1248,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: 9207564b9b5f0358f7b8507232200ac759f58ae16af8561f72a4488274629eaf + checksum/configuration: 12c484f191527a693debafaa71bfcd04dbda7bfc87c83e385ea6d5c13188401f checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1414,7 +1414,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: ad16b8a2ae1014673d354d27f8f9e4307e588f439534e5894ecb7b0e4c8c7692 + checksum/secret: 4c91f059d15ecfed81f3906fb24896c41fde9103a61ace577327d080409423da labels: app: docker-registry release: flyte-sandbox @@ -1757,7 +1757,7 @@ spec: value: minio - name: FLYTE_AWS_SECRET_ACCESS_KEY value: miniostorage - image: cr.flyte.org/flyteorg/flyteagent:1.13.3 + image: cr.flyte.org/flyteorg/flyteagent:1.13.4 imagePullPolicy: IfNotPresent name: flyteagent ports: diff --git a/docker/sandbox-bundled/manifests/complete.yaml b/docker/sandbox-bundled/manifests/complete.yaml index ea327339fb..5c470d20cf 100644 --- a/docker/sandbox-bundled/manifests/complete.yaml +++ b/docker/sandbox-bundled/manifests/complete.yaml @@ -458,7 +458,7 @@ data: stackdriver-enabled: false k8s: co-pilot: - image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc0" + image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1" k8s-array: logs: config: @@ -797,7 +797,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: UFppYnRpOVNHMVdlZkp0TA== + haSharedSecret: WGJQZFpzb2ZDSkU5dmJReQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1195,7 +1195,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: 6bc1ee22a1eb899398b82b56862cfb1aa09ed96f467d4eae11f2738c284115c2 + checksum/configuration: 0c0c4c2401e4d6362921a86660489536d0db8e4e66ae09e429adc54665c68021 checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1361,7 +1361,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: bfe262b4ef6c387db539e0d2b93d9557907a4a4b5aef3cec954b1ce593d364d9 + checksum/secret: 20145f8b7e37f104163904f86eeb0a46444c157de19f8e675128b04d16598ee4 labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/dev.yaml b/docker/sandbox-bundled/manifests/dev.yaml index 1f55acef66..787be05725 100644 --- a/docker/sandbox-bundled/manifests/dev.yaml +++ b/docker/sandbox-bundled/manifests/dev.yaml @@ -499,7 +499,7 @@ metadata: --- apiVersion: v1 data: - haSharedSecret: MEV1QmRqTlVpVHljaU9FeQ== + haSharedSecret: R2RwSGJNOERJN2NSWXNQNg== proxyPassword: "" proxyUsername: "" kind: Secret @@ -934,7 +934,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 9ac72b0cb595456c3e96447f44a0377762ab17d663e80e47079203fcbd518a34 + checksum/secret: a110328cf7fce9dfe57fe25438d4902fc3cc661346782bb261c0b6b80fb783d1 labels: app: docker-registry release: flyte-sandbox diff --git a/docs/conf.py b/docs/conf.py index 35b21a09ca..992b62f91f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -36,7 +36,7 @@ # The short X.Y version version = "" # The full version, including alpha/beta/rc tags -release = "1.13.1-rc0" +release = "1.13.1-rc1" # -- General configuration --------------------------------------------------- diff --git a/docs/deployment/configuration/generated/flyteadmin_config.rst b/docs/deployment/configuration/generated/flyteadmin_config.rst index 162cbc4d1d..0912738015 100644 --- a/docs/deployment/configuration/generated/flyteadmin_config.rst +++ b/docs/deployment/configuration/generated/flyteadmin_config.rst @@ -2756,6 +2756,8 @@ k8s (`config.K8sPluginConfig`_) resource-tolerations: null scheduler-name: "" send-object-events: false + update-backoff-retries: 5 + update-base-backoff-duration: 10 catalog.Config @@ -3228,6 +3230,30 @@ If true, will send k8s object events in TaskExecutionEvent updates. "false" +update-base-backoff-duration (int) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +Initial delay in exponential backoff when updating a resource in milliseconds. + +**Default Value**: + +.. code-block:: yaml + + "10" + + +update-backoff-retries (int) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +Number of retries for exponential backoff when updating a resource. + +**Default Value**: + +.. code-block:: yaml + + "5" + + config.FlyteCoPilotConfig ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -4058,6 +4084,16 @@ Whether output data should be sent by reference when it is too large to be sent "false" +ErrorOnAlreadyExists (bool) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +**Default Value**: + +.. code-block:: yaml + + "false" + + config.KubeClientConfig ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/deployment/configuration/generated/flytepropeller_config.rst b/docs/deployment/configuration/generated/flytepropeller_config.rst index a09a4b1e91..be6f7ee7f0 100644 --- a/docs/deployment/configuration/generated/flytepropeller_config.rst +++ b/docs/deployment/configuration/generated/flytepropeller_config.rst @@ -1211,6 +1211,8 @@ k8s (`config.K8sPluginConfig`_) resource-tolerations: null scheduler-name: "" send-object-events: false + update-backoff-retries: 5 + update-base-backoff-duration: 10 k8s-array (`k8s.Config`_) @@ -2712,6 +2714,30 @@ If true, will send k8s object events in TaskExecutionEvent updates. "false" +update-base-backoff-duration (int) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +Initial delay in exponential backoff when updating a resource in milliseconds. + +**Default Value**: + +.. code-block:: yaml + + "10" + + +update-backoff-retries (int) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +Number of retries for exponential backoff when updating a resource. + +**Default Value**: + +.. code-block:: yaml + + "5" + + config.FlyteCoPilotConfig ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -4896,6 +4922,16 @@ Whether output data should be sent by reference when it is too large to be sent "false" +ErrorOnAlreadyExists (bool) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +**Default Value**: + +.. code-block:: yaml + + "false" + + config.KubeClientConfig ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/deployment/configuration/generated/scheduler_config.rst b/docs/deployment/configuration/generated/scheduler_config.rst index 923e3db898..98ff1ee343 100644 --- a/docs/deployment/configuration/generated/scheduler_config.rst +++ b/docs/deployment/configuration/generated/scheduler_config.rst @@ -2756,6 +2756,8 @@ k8s (`config.K8sPluginConfig`_) resource-tolerations: null scheduler-name: "" send-object-events: false + update-backoff-retries: 5 + update-base-backoff-duration: 10 catalog.Config @@ -3228,6 +3230,30 @@ If true, will send k8s object events in TaskExecutionEvent updates. "false" +update-base-backoff-duration (int) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +Initial delay in exponential backoff when updating a resource in milliseconds. + +**Default Value**: + +.. code-block:: yaml + + "10" + + +update-backoff-retries (int) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +Number of retries for exponential backoff when updating a resource. + +**Default Value**: + +.. code-block:: yaml + + "5" + + config.FlyteCoPilotConfig ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -4058,6 +4084,16 @@ Whether output data should be sent by reference when it is too large to be sent "false" +ErrorOnAlreadyExists (bool) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +**Default Value**: + +.. code-block:: yaml + + "false" + + config.KubeClientConfig ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From 3b690d206155fa059f31e0d963ba9b120a25104a Mon Sep 17 00:00:00 2001 From: Yee Hing Tong Date: Sat, 24 Aug 2024 10:24:13 -0700 Subject: [PATCH 43/65] [flytectl] DataConfig missing from TaskSpec (#5692) Signed-off-by: Yee Hing Tong Signed-off-by: Bugra Gedik --- flytectl/cmd/register/register_util.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flytectl/cmd/register/register_util.go b/flytectl/cmd/register/register_util.go index 0bf5a23d49..b7b419e611 100644 --- a/flytectl/cmd/register/register_util.go +++ b/flytectl/cmd/register/register_util.go @@ -339,8 +339,9 @@ func hydrateTaskSpec(task *admin.TaskSpec, sourceUploadedLocation storage.DataRe } task.Template.Target = &core.TaskTemplate_K8SPod{ K8SPod: &core.K8SPod{ - Metadata: task.Template.GetK8SPod().Metadata, - PodSpec: podSpecStruct, + Metadata: task.Template.GetK8SPod().Metadata, + PodSpec: podSpecStruct, + DataConfig: task.Template.GetK8SPod().DataConfig, }, } } From e0ae3ca5f51427503ab9cc7c6538d871332de7cd Mon Sep 17 00:00:00 2001 From: Flyte Bot Date: Mon, 26 Aug 2024 17:56:56 -0700 Subject: [PATCH 44/65] Update Flyte components - v1.13.1 (#5696) * Update Flyte Components Signed-off-by: Flyte-Bot * Bump version in conf.py and add changelog Signed-off-by: Eduardo Apolinario --------- Signed-off-by: Flyte-Bot Signed-off-by: Eduardo Apolinario Co-authored-by: eapolinario Signed-off-by: Bugra Gedik --- CHANGELOG/CHANGELOG-v1.13.1.md | 81 +++++++++++++++++++ charts/flyte-binary/README.md | 2 +- charts/flyte-binary/values.yaml | 2 +- charts/flyte-core/README.md | 12 +-- charts/flyte-core/values.yaml | 10 +-- charts/flyte/README.md | 16 ++-- charts/flyte/values.yaml | 10 +-- .../flyte_aws_scheduler_helm_generated.yaml | 30 +++---- .../flyte_helm_controlplane_generated.yaml | 20 ++--- .../eks/flyte_helm_dataplane_generated.yaml | 14 ++-- deployment/eks/flyte_helm_generated.yaml | 34 ++++---- .../flyte_helm_controlplane_generated.yaml | 20 ++--- .../gcp/flyte_helm_dataplane_generated.yaml | 14 ++-- deployment/gcp/flyte_helm_generated.yaml | 34 ++++---- .../flyte_sandbox_binary_helm_generated.yaml | 4 +- deployment/sandbox/flyte_helm_generated.yaml | 34 ++++---- .../manifests/complete-agent.yaml | 8 +- .../sandbox-bundled/manifests/complete.yaml | 8 +- docker/sandbox-bundled/manifests/dev.yaml | 4 +- docs/conf.py | 2 +- 20 files changed, 220 insertions(+), 139 deletions(-) create mode 100644 CHANGELOG/CHANGELOG-v1.13.1.md diff --git a/CHANGELOG/CHANGELOG-v1.13.1.md b/CHANGELOG/CHANGELOG-v1.13.1.md new file mode 100644 index 0000000000..e84f8184b5 --- /dev/null +++ b/CHANGELOG/CHANGELOG-v1.13.1.md @@ -0,0 +1,81 @@ +# Flyte 1.13.1 Release Notes + +## What's Changed +* chore: update runllm widget configuration by @agiron123 in https://github.com/flyteorg/flyte/pull/5530 +* Use jsonpb AllowUnknownFields everywhere by @andrewwdye in https://github.com/flyteorg/flyte/pull/5521 +* [flyteadmin] Use WithContext in all DB calls by @Sovietaced in https://github.com/flyteorg/flyte/pull/5538 +* add new project state: SYSTEM_ARCHIVED by @troychiu in https://github.com/flyteorg/flyte/pull/5544 +* Pryce/doc 434 clarify how code is pushed into a given image during pyflyte by @pryce-turner in https://github.com/flyteorg/flyte/pull/5548 +* Increase more memory limits in flyteagent by @Future-Outlier in https://github.com/flyteorg/flyte/pull/5550 +* Updated map task information to indicate array node is now the default and optional return type by @pryce-turner in https://github.com/flyteorg/flyte/pull/5561 +* reverted mockery-v2 on ExecutionContext by @hamersaw in https://github.com/flyteorg/flyte/pull/5562 +* Fix issues with helm chart release process by @Sovietaced in https://github.com/flyteorg/flyte/pull/5560 +* Add FlyteDirectory to file_types.rst template by @ppiegaze in https://github.com/flyteorg/flyte/pull/5564 +* Fully populate Abort task event fields by @va6996 in https://github.com/flyteorg/flyte/pull/5551 +* [fix] Add blob typechecker by @ddl-rliu in https://github.com/flyteorg/flyte/pull/5519 +* Refactor echo plugin by @pingsutw in https://github.com/flyteorg/flyte/pull/5565 +* [Bug] fix ArrayNode state's TaskPhase reset by @pvditt in https://github.com/flyteorg/flyte/pull/5451 +* Remove confusing prometheus configuration options in helm charts by @Sovietaced in https://github.com/flyteorg/flyte/pull/5549 +* [Housekeeping] Bump Go version to 1.22 by @lowc1012 in https://github.com/flyteorg/flyte/pull/5032 +* Fix typos by @omahs in https://github.com/flyteorg/flyte/pull/5571 +* Respect original task definition retry strategy for single task executions by @katrogan in https://github.com/flyteorg/flyte/pull/5577 +* Clarify the support for the Java/Scala SDK in the docs by @eapolinario in https://github.com/flyteorg/flyte/pull/5582 +* Fix spelling issues by @nnsW3 in https://github.com/flyteorg/flyte/pull/5580 +* Give flyte binary cluster role permission to create service accounts by @shreyas44 in https://github.com/flyteorg/flyte/pull/5579 +* Simplify single task retry strategy check by @eapolinario in https://github.com/flyteorg/flyte/pull/5584 +* Fix failures in `generate_helm` CI check by @eapolinario in https://github.com/flyteorg/flyte/pull/5587 +* Update GPU docs by @davidmirror-ops in https://github.com/flyteorg/flyte/pull/5515 +* Update azblob 1.1.0 -> 1.4.0 / azcore 1.7.2 -> 1.13.0 by @ddl-ebrown in https://github.com/flyteorg/flyte/pull/5590 +* Bump github.com/go-jose/go-jose/v3 from 3.0.0 to 3.0.3 in /flyteadmin by @dependabot in https://github.com/flyteorg/flyte/pull/5591 +* add execution mode to ArrayNode proto by @pvditt in https://github.com/flyteorg/flyte/pull/5512 +* Fix incorrect YAML for unpartitoned GPU by @davidmirror-ops in https://github.com/flyteorg/flyte/pull/5595 +* Another YAML fix by @davidmirror-ops in https://github.com/flyteorg/flyte/pull/5596 +* DOC-431 Document pyflyte option --overwrite-cache by @ppiegaze in https://github.com/flyteorg/flyte/pull/5567 +* Upgrade docker dependency to address vulnerability by @katrogan in https://github.com/flyteorg/flyte/pull/5614 +* Support offloading workflow CRD inputs by @katrogan in https://github.com/flyteorg/flyte/pull/5609 +* [flyteadmin] Refactor panic recovery into middleware by @Sovietaced in https://github.com/flyteorg/flyte/pull/5546 +* Snowflake agent Doc by @Future-Outlier in https://github.com/flyteorg/flyte/pull/5620 +* [flytepropeller][compiler] Error Handling when Type is not found by @Future-Outlier in https://github.com/flyteorg/flyte/pull/5612 +* Fix nil pointer when task plugin load returns error by @Sovietaced in https://github.com/flyteorg/flyte/pull/5622 +* Log stack trace when refresh cache sync recovers from panic by @Sovietaced in https://github.com/flyteorg/flyte/pull/5623 +* [Doc] Fix snowflake agent secret documentation error by @Future-Outlier in https://github.com/flyteorg/flyte/pull/5626 +* [Doc] Explain how Agent Secret Works by @Future-Outlier in https://github.com/flyteorg/flyte/pull/5625 +* Fix typo in execution manager by @ddl-rliu in https://github.com/flyteorg/flyte/pull/5619 +* Amend Admin to use grpc message size by @wild-endeavor in https://github.com/flyteorg/flyte/pull/5628 +* [Docs] document the process of setting ttl for a ray cluster by @pingsutw in https://github.com/flyteorg/flyte/pull/5636 +* Add CustomHeaderMatcher to pass additional headers by @andrewwdye in https://github.com/flyteorg/flyte/pull/5563 +* Turn flyteidl and flytectl releases into manual gh workflows by @eapolinario in https://github.com/flyteorg/flyte/pull/5635 +* docs: fix typo by @cratiu222 in https://github.com/flyteorg/flyte/pull/5643 +* Use enable_deck=True in docs by @thomasjpfan in https://github.com/flyteorg/flyte/pull/5645 +* Fix flyteidl release checkout all tags by @eapolinario in https://github.com/flyteorg/flyte/pull/5646 +* Install pyarrow in sandbox functional tests by @eapolinario in https://github.com/flyteorg/flyte/pull/5647 +* docs: add documentation for configuring notifications in GCP by @desihsu in https://github.com/flyteorg/flyte/pull/5545 +* Correct "sucessfile" to "successfile" by @shengyu7697 in https://github.com/flyteorg/flyte/pull/5652 +* Fix ordering for custom template values in cluster resource controller by @katrogan in https://github.com/flyteorg/flyte/pull/5648 +* Don't error when attempting to trigger schedules for inactive projects by @katrogan in https://github.com/flyteorg/flyte/pull/5649 +* Update Flyte components - v1.13.1-rc0 by @flyte-bot in https://github.com/flyteorg/flyte/pull/5656 +* Add offloaded path to literal by @katrogan in https://github.com/flyteorg/flyte/pull/5660 +* Improve error messaging for invalid arguments by @pingsutw in https://github.com/flyteorg/flyte/pull/5658 +* DOC-462 Update "Try Flyte in the browser" text by @neverett in https://github.com/flyteorg/flyte/pull/5654 +* DOC-533 Remove outdated duplicate notification config content by @neverett in https://github.com/flyteorg/flyte/pull/5672 +* Validate labels before creating flyte CRD by @pingsutw in https://github.com/flyteorg/flyte/pull/5671 +* Add FLYTE_INTERNAL_POD_NAME environment variable that holds the pod name by @bgedik in https://github.com/flyteorg/flyte/pull/5616 +* Upstream Using InMemory token cache for admin clientset in propeller by @pvditt in https://github.com/flyteorg/flyte/pull/5621 +* [Bug] Update resource failures w/ Finalizers set (#423) by @pvditt in https://github.com/flyteorg/flyte/pull/5673 +* [BUG] array node eventing bump version by @pvditt in https://github.com/flyteorg/flyte/pull/5680 +* Add custominfo to agents by @ddl-rliu in https://github.com/flyteorg/flyte/pull/5604 +* [BUG] use deep copy of bit arrays when getting array node state by @pvditt in https://github.com/flyteorg/flyte/pull/5681 +* More concise definition of launchplan by @eapolinario in https://github.com/flyteorg/flyte/pull/5682 +* Auth/prevent lookup per call by @wild-endeavor in https://github.com/flyteorg/flyte/pull/5686 +* Update Flyte components - v1.13.1-rc1 by @flyte-bot in https://github.com/flyteorg/flyte/pull/5691 +* [flytectl] DataConfig missing from TaskSpec by @wild-endeavor in https://github.com/flyteorg/flyte/pull/5692 + +## New Contributors +* @omahs made their first contribution in https://github.com/flyteorg/flyte/pull/5571 +* @nnsW3 made their first contribution in https://github.com/flyteorg/flyte/pull/5580 +* @shreyas44 made their first contribution in https://github.com/flyteorg/flyte/pull/5579 +* @cratiu222 made their first contribution in https://github.com/flyteorg/flyte/pull/5643 +* @desihsu made their first contribution in https://github.com/flyteorg/flyte/pull/5545 +* @shengyu7697 made their first contribution in https://github.com/flyteorg/flyte/pull/5652 +* @bgedik made their first contribution in https://github.com/flyteorg/flyte/pull/5616 +**Full Changelog**: https://github.com/flyteorg/flyte/compare/v1.13.0...v1.13.1 diff --git a/charts/flyte-binary/README.md b/charts/flyte-binary/README.md index 350391fd53..6bcac8f45e 100644 --- a/charts/flyte-binary/README.md +++ b/charts/flyte-binary/README.md @@ -42,7 +42,7 @@ Chart for basic single Flyte executable deployment | configuration.auth.oidc.clientId | string | `""` | | | configuration.auth.oidc.clientSecret | string | `""` | | | configuration.co-pilot.image.repository | string | `"cr.flyte.org/flyteorg/flytecopilot"` | | -| configuration.co-pilot.image.tag | string | `"v1.13.1-rc1"` | | +| configuration.co-pilot.image.tag | string | `"v1.13.1"` | | | configuration.database.dbname | string | `"flyte"` | | | configuration.database.host | string | `"127.0.0.1"` | | | configuration.database.options | string | `"sslmode=disable"` | | diff --git a/charts/flyte-binary/values.yaml b/charts/flyte-binary/values.yaml index 8821126da2..f70b5024d1 100644 --- a/charts/flyte-binary/values.yaml +++ b/charts/flyte-binary/values.yaml @@ -159,7 +159,7 @@ configuration: # repository CoPilot sidecar image repository repository: cr.flyte.org/flyteorg/flytecopilot # FLYTECOPILOT_IMAGE # tag CoPilot sidecar image tag - tag: v1.13.1-rc1 # FLYTECOPILOT_TAG + tag: v1.13.1 # FLYTECOPILOT_TAG # agentService Flyte Agent configuration agentService: defaultAgent: diff --git a/charts/flyte-core/README.md b/charts/flyte-core/README.md index 14b938d2cf..ef9814c8da 100644 --- a/charts/flyte-core/README.md +++ b/charts/flyte-core/README.md @@ -95,8 +95,8 @@ helm install gateway bitnami/contour -n flyte | configmap.clusters.clusterConfigs | list | `[]` | | | configmap.clusters.labelClusterMap | object | `{}` | | | configmap.console | object | `{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"}` | Configuration for Flyte console UI | -| configmap.copilot | object | `{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}}}}` | Copilot configuration | -| configmap.copilot.plugins.k8s.co-pilot | object | `{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}` | Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) | +| configmap.copilot | object | `{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1","name":"flyte-copilot-","start-timeout":"30s"}}}}` | Copilot configuration | +| configmap.copilot.plugins.k8s.co-pilot | object | `{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1","name":"flyte-copilot-","start-timeout":"30s"}` | Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) | | configmap.core | object | `{"manager":{"pod-application":"flytepropeller","pod-template-container-name":"flytepropeller","pod-template-name":"flytepropeller-template"},"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}}` | Core propeller configuration | | configmap.core.manager | object | `{"pod-application":"flytepropeller","pod-template-container-name":"flytepropeller","pod-template-name":"flytepropeller-template"}` | follows the structure specified [here](https://pkg.go.dev/github.com/flyteorg/flytepropeller/manager/config#Config). | | configmap.core.propeller | object | `{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"}` | follows the structure specified [here](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/config). | @@ -130,7 +130,7 @@ helm install gateway bitnami/contour -n flyte | datacatalog.extraArgs | object | `{}` | Appends extra command line arguments to the main command | | datacatalog.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | datacatalog.image.repository | string | `"cr.flyte.org/flyteorg/datacatalog"` | Docker image for Datacatalog deployment | -| datacatalog.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | +| datacatalog.image.tag | string | `"v1.13.1"` | Docker image tag | | datacatalog.nodeSelector | object | `{}` | nodeSelector for Datacatalog deployment | | datacatalog.podAnnotations | object | `{}` | Annotations for Datacatalog pods | | datacatalog.podEnv | object | `{}` | Additional Datacatalog container environment variables | @@ -166,7 +166,7 @@ helm install gateway bitnami/contour -n flyte | flyteadmin.extraArgs | object | `{}` | Appends extra command line arguments to the serve command | | flyteadmin.image.pullPolicy | string | `"IfNotPresent"` | | | flyteadmin.image.repository | string | `"cr.flyte.org/flyteorg/flyteadmin"` | Docker image for Flyteadmin deployment | -| flyteadmin.image.tag | string | `"v1.13.1-rc1"` | | +| flyteadmin.image.tag | string | `"v1.13.1"` | | | flyteadmin.initialProjects | list | `["flytesnacks","flytetester","flyteexamples"]` | Initial projects to create | | flyteadmin.nodeSelector | object | `{}` | nodeSelector for Flyteadmin deployment | | flyteadmin.podAnnotations | object | `{}` | Annotations for Flyteadmin pods | @@ -238,7 +238,7 @@ helm install gateway bitnami/contour -n flyte | flytepropeller.extraArgs | object | `{}` | Appends extra command line arguments to the main command | | flytepropeller.image.pullPolicy | string | `"IfNotPresent"` | | | flytepropeller.image.repository | string | `"cr.flyte.org/flyteorg/flytepropeller"` | Docker image for Flytepropeller deployment | -| flytepropeller.image.tag | string | `"v1.13.1-rc1"` | | +| flytepropeller.image.tag | string | `"v1.13.1"` | | | flytepropeller.manager | bool | `false` | | | flytepropeller.nodeSelector | object | `{}` | nodeSelector for Flytepropeller deployment | | flytepropeller.podAnnotations | object | `{}` | Annotations for Flytepropeller pods | @@ -270,7 +270,7 @@ helm install gateway bitnami/contour -n flyte | flytescheduler.configPath | string | `"/etc/flyte/config/*.yaml"` | Default regex string for searching configuration files | | flytescheduler.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flytescheduler.image.repository | string | `"cr.flyte.org/flyteorg/flytescheduler"` | Docker image for Flytescheduler deployment | -| flytescheduler.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | +| flytescheduler.image.tag | string | `"v1.13.1"` | Docker image tag | | flytescheduler.nodeSelector | object | `{}` | nodeSelector for Flytescheduler deployment | | flytescheduler.podAnnotations | object | `{}` | Annotations for Flytescheduler pods | | flytescheduler.podEnv | object | `{}` | Additional Flytescheduler container environment variables | diff --git a/charts/flyte-core/values.yaml b/charts/flyte-core/values.yaml index 4462372d95..7023dea3f0 100755 --- a/charts/flyte-core/values.yaml +++ b/charts/flyte-core/values.yaml @@ -16,7 +16,7 @@ flyteadmin: image: # -- Docker image for Flyteadmin deployment repository: cr.flyte.org/flyteorg/flyteadmin # FLYTEADMIN_IMAGE - tag: v1.13.1-rc1 # FLYTEADMIN_TAG + tag: v1.13.1 # FLYTEADMIN_TAG pullPolicy: IfNotPresent # -- Additional flyteadmin container environment variables # @@ -144,7 +144,7 @@ flytescheduler: # -- Docker image for Flytescheduler deployment repository: cr.flyte.org/flyteorg/flytescheduler # FLYTESCHEDULER_IMAGE # -- Docker image tag - tag: v1.13.1-rc1 # FLYTESCHEDULER_TAG + tag: v1.13.1 # FLYTESCHEDULER_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Flytescheduler deployment @@ -210,7 +210,7 @@ datacatalog: # -- Docker image for Datacatalog deployment repository: cr.flyte.org/flyteorg/datacatalog # DATACATALOG_IMAGE # -- Docker image tag - tag: v1.13.1-rc1 # DATACATALOG_TAG + tag: v1.13.1 # DATACATALOG_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Datacatalog deployment @@ -309,7 +309,7 @@ flytepropeller: image: # -- Docker image for Flytepropeller deployment repository: cr.flyte.org/flyteorg/flytepropeller # FLYTEPROPELLER_IMAGE - tag: v1.13.1-rc1 # FLYTEPROPELLER_TAG + tag: v1.13.1 # FLYTEPROPELLER_TAG pullPolicy: IfNotPresent # -- Default resources requests and limits for Flytepropeller deployment resources: @@ -801,7 +801,7 @@ configmap: # -- Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) co-pilot: name: flyte-copilot- - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 # FLYTECOPILOT_IMAGE + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1 # FLYTECOPILOT_IMAGE start-timeout: 30s # -- Core propeller configuration diff --git a/charts/flyte/README.md b/charts/flyte/README.md index 4a3a911d00..5ae694e411 100644 --- a/charts/flyte/README.md +++ b/charts/flyte/README.md @@ -71,7 +71,7 @@ helm upgrade -f values-sandbox.yaml flyte . | contour.tolerations | list | `[]` | tolerations for Contour deployment | | daskoperator | object | `{"enabled":false}` | Optional: Dask Plugin using the Dask Operator | | daskoperator.enabled | bool | `false` | - enable or disable the dask operator deployment installation | -| flyte | object | `{"cluster_resource_manager":{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]},"common":{"databaseSecret":{"name":"","secretManifest":{}},"flyteNamespaceTemplate":{"enabled":false},"ingress":{"albSSLRedirect":false,"annotations":{"nginx.ingress.kubernetes.io/app-root":"/console"},"enabled":true,"host":"","separateGrpcIngress":false,"separateGrpcIngressAnnotations":{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"},"tls":{"enabled":false},"webpackHMR":true}},"configmap":{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}},"datacatalog":{"affinity":{},"configPath":"/etc/datacatalog/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/datacatalog","tag":"v1.13.1-rc1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"NodePort"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"db":{"admin":{"database":{"dbname":"flyteadmin","host":"postgres","port":5432,"username":"postgres"}},"datacatalog":{"database":{"dbname":"datacatalog","host":"postgres","port":5432,"username":"postgres"}}},"deployRedoc":true,"flyteadmin":{"additionalVolumeMounts":[],"additionalVolumes":[],"affinity":{},"configPath":"/etc/flyte/config/*.yaml","env":[],"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteadmin","tag":"v1.13.1-rc1"},"initialProjects":["flytesnacks","flytetester","flyteexamples"],"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"loadBalancerSourceRanges":[],"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flyteconsole":{"affinity":{},"ga":{"enabled":true,"tracking_id":"G-0QW4DJWJ20"},"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteconsole","tag":"v1.17.1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","memory":"275Mi"},"requests":{"cpu":"10m","memory":"250Mi"}},"service":{"annotations":{},"type":"ClusterIP"},"tolerations":[]},"flytepropeller":{"affinity":{},"cacheSizeMbs":0,"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytepropeller","tag":"v1.13.1-rc1"},"manager":false,"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"200m","ephemeral-storage":"100Mi","memory":"200Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flytescheduler":{"affinity":{},"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytescheduler","tag":"v1.13.1-rc1"},"nodeSelector":{},"podAnnotations":{},"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"storage":{"bucketName":"my-s3-bucket","custom":{},"gcs":null,"s3":{"region":"us-east-1"},"type":"sandbox"},"webhook":{"enabled":true,"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]}},"workflow_notifications":{"config":{},"enabled":false},"workflow_scheduler":{"enabled":true,"type":"native"}}` | ------------------------------------------------------------------- Core System settings This section consists of Core components of Flyte and their deployment settings. This includes FlyteAdmin service, Datacatalog, FlytePropeller and Flyteconsole | +| flyte | object | `{"cluster_resource_manager":{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]},"common":{"databaseSecret":{"name":"","secretManifest":{}},"flyteNamespaceTemplate":{"enabled":false},"ingress":{"albSSLRedirect":false,"annotations":{"nginx.ingress.kubernetes.io/app-root":"/console"},"enabled":true,"host":"","separateGrpcIngress":false,"separateGrpcIngressAnnotations":{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"},"tls":{"enabled":false},"webpackHMR":true}},"configmap":{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}},"datacatalog":{"affinity":{},"configPath":"/etc/datacatalog/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/datacatalog","tag":"v1.13.1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"NodePort"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"db":{"admin":{"database":{"dbname":"flyteadmin","host":"postgres","port":5432,"username":"postgres"}},"datacatalog":{"database":{"dbname":"datacatalog","host":"postgres","port":5432,"username":"postgres"}}},"deployRedoc":true,"flyteadmin":{"additionalVolumeMounts":[],"additionalVolumes":[],"affinity":{},"configPath":"/etc/flyte/config/*.yaml","env":[],"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteadmin","tag":"v1.13.1"},"initialProjects":["flytesnacks","flytetester","flyteexamples"],"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"loadBalancerSourceRanges":[],"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flyteconsole":{"affinity":{},"ga":{"enabled":true,"tracking_id":"G-0QW4DJWJ20"},"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteconsole","tag":"v1.17.1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","memory":"275Mi"},"requests":{"cpu":"10m","memory":"250Mi"}},"service":{"annotations":{},"type":"ClusterIP"},"tolerations":[]},"flytepropeller":{"affinity":{},"cacheSizeMbs":0,"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytepropeller","tag":"v1.13.1"},"manager":false,"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"200m","ephemeral-storage":"100Mi","memory":"200Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flytescheduler":{"affinity":{},"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytescheduler","tag":"v1.13.1"},"nodeSelector":{},"podAnnotations":{},"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"storage":{"bucketName":"my-s3-bucket","custom":{},"gcs":null,"s3":{"region":"us-east-1"},"type":"sandbox"},"webhook":{"enabled":true,"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]}},"workflow_notifications":{"config":{},"enabled":false},"workflow_scheduler":{"enabled":true,"type":"native"}}` | ------------------------------------------------------------------- Core System settings This section consists of Core components of Flyte and their deployment settings. This includes FlyteAdmin service, Datacatalog, FlytePropeller and Flyteconsole | | flyte.cluster_resource_manager | object | `{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]}` | Configuration for the Cluster resource manager component. This is an optional component, that enables automatic cluster configuration. This is useful to set default quotas, manage namespaces etc that map to a project/domain | | flyte.cluster_resource_manager.config.cluster_resources | object | `{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}` | ClusterResource parameters Refer to the [structure](https://pkg.go.dev/github.com/lyft/flyteadmin@v0.3.37/pkg/runtime/interfaces#ClusterResourceConfig) to customize. | | flyte.cluster_resource_manager.config.cluster_resources.standaloneDeployment | bool | `false` | Starts the cluster resource manager in standalone mode with requisite auth credentials to call flyteadmin service endpoints | @@ -91,15 +91,15 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.common.ingress.separateGrpcIngressAnnotations | object | `{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"}` | - Extra Ingress annotations applied only to the GRPC ingress. Only makes sense if `separateGrpcIngress` is enabled. | | flyte.common.ingress.tls | object | `{"enabled":false}` | - TLS Settings | | flyte.common.ingress.webpackHMR | bool | `true` | - Enable or disable HMR route to flyteconsole. This is useful only for frontend development. | -| flyte.configmap | object | `{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}}` | ----------------------------------------------------------------- CONFIGMAPS SETTINGS | +| flyte.configmap | object | `{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}}` | ----------------------------------------------------------------- CONFIGMAPS SETTINGS | | flyte.configmap.adminServer | object | `{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}}` | FlyteAdmin server configuration | | flyte.configmap.adminServer.auth | object | `{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}}` | Authentication configuration | | flyte.configmap.adminServer.server.security.secure | bool | `false` | Controls whether to serve requests over SSL/TLS. | | flyte.configmap.adminServer.server.security.useAuth | bool | `false` | Controls whether to enforce authentication. Follow the guide in https://docs.flyte.org/ on how to setup authentication. | | flyte.configmap.catalog | object | `{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}}` | Catalog Client configuration [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/catalog#Config) Additional advanced Catalog configuration [here](https://pkg.go.dev/github.com/lyft/flyteplugins/go/tasks/pluginmachinery/catalog#Config) | | flyte.configmap.console | object | `{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"}` | Configuration for Flyte console UI | -| flyte.configmap.copilot | object | `{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}}}}` | Copilot configuration | -| flyte.configmap.copilot.plugins.k8s.co-pilot | object | `{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1","name":"flyte-copilot-","start-timeout":"30s"}` | Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) | +| flyte.configmap.copilot | object | `{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1","name":"flyte-copilot-","start-timeout":"30s"}}}}` | Copilot configuration | +| flyte.configmap.copilot.plugins.k8s.co-pilot | object | `{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1","name":"flyte-copilot-","start-timeout":"30s"}` | Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) | | flyte.configmap.core | object | `{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}}` | Core propeller configuration | | flyte.configmap.core.propeller | object | `{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"}` | follows the structure specified [here](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/config). | | flyte.configmap.datacatalogServer | object | `{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}}` | Datacatalog server config | @@ -120,7 +120,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.datacatalog.configPath | string | `"/etc/datacatalog/config/*.yaml"` | Default regex string for searching configuration files | | flyte.datacatalog.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flyte.datacatalog.image.repository | string | `"cr.flyte.org/flyteorg/datacatalog"` | Docker image for Datacatalog deployment | -| flyte.datacatalog.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | +| flyte.datacatalog.image.tag | string | `"v1.13.1"` | Docker image tag | | flyte.datacatalog.nodeSelector | object | `{}` | nodeSelector for Datacatalog deployment | | flyte.datacatalog.podAnnotations | object | `{}` | Annotations for Datacatalog pods | | flyte.datacatalog.replicaCount | int | `1` | Replicas count for Datacatalog deployment | @@ -136,7 +136,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.flyteadmin.env | list | `[]` | Additional flyteadmin container environment variables e.g. SendGrid's API key - name: SENDGRID_API_KEY value: "" e.g. secret environment variable (you can combine it with .additionalVolumes): - name: SENDGRID_API_KEY valueFrom: secretKeyRef: name: sendgrid-secret key: api_key | | flyte.flyteadmin.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flyte.flyteadmin.image.repository | string | `"cr.flyte.org/flyteorg/flyteadmin"` | Docker image for Flyteadmin deployment | -| flyte.flyteadmin.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | +| flyte.flyteadmin.image.tag | string | `"v1.13.1"` | Docker image tag | | flyte.flyteadmin.initialProjects | list | `["flytesnacks","flytetester","flyteexamples"]` | Initial projects to create | | flyte.flyteadmin.nodeSelector | object | `{}` | nodeSelector for Flyteadmin deployment | | flyte.flyteadmin.podAnnotations | object | `{}` | Annotations for Flyteadmin pods | @@ -162,7 +162,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.flytepropeller.configPath | string | `"/etc/flyte/config/*.yaml"` | Default regex string for searching configuration files | | flyte.flytepropeller.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flyte.flytepropeller.image.repository | string | `"cr.flyte.org/flyteorg/flytepropeller"` | Docker image for Flytepropeller deployment | -| flyte.flytepropeller.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | +| flyte.flytepropeller.image.tag | string | `"v1.13.1"` | Docker image tag | | flyte.flytepropeller.nodeSelector | object | `{}` | nodeSelector for Flytepropeller deployment | | flyte.flytepropeller.podAnnotations | object | `{}` | Annotations for Flytepropeller pods | | flyte.flytepropeller.replicaCount | int | `1` | Replicas count for Flytepropeller deployment | @@ -176,7 +176,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.flytescheduler.configPath | string | `"/etc/flyte/config/*.yaml"` | Default regex string for searching configuration files | | flyte.flytescheduler.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | | flyte.flytescheduler.image.repository | string | `"cr.flyte.org/flyteorg/flytescheduler"` | Docker image for Flytescheduler deployment | -| flyte.flytescheduler.image.tag | string | `"v1.13.1-rc1"` | Docker image tag | +| flyte.flytescheduler.image.tag | string | `"v1.13.1"` | Docker image tag | | flyte.flytescheduler.nodeSelector | object | `{}` | nodeSelector for Flytescheduler deployment | | flyte.flytescheduler.podAnnotations | object | `{}` | Annotations for Flytescheduler pods | | flyte.flytescheduler.resources | object | `{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}}` | Default resources requests and limits for Flytescheduler deployment | diff --git a/charts/flyte/values.yaml b/charts/flyte/values.yaml index 6444f2d334..9081162782 100755 --- a/charts/flyte/values.yaml +++ b/charts/flyte/values.yaml @@ -15,7 +15,7 @@ flyte: # -- Docker image for Flyteadmin deployment repository: cr.flyte.org/flyteorg/flyteadmin # FLYTEADMIN_IMAGE # -- Docker image tag - tag: v1.13.1-rc1 # FLYTEADMIN_TAG + tag: v1.13.1 # FLYTEADMIN_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Additional flyteadmin container environment variables @@ -83,7 +83,7 @@ flyte: # -- Docker image for Flytescheduler deployment repository: cr.flyte.org/flyteorg/flytescheduler # FLYTESCHEDULER_IMAGE # -- Docker image tag - tag: v1.13.1-rc1 # FLYTESCHEDULER_TAG + tag: v1.13.1 # FLYTESCHEDULER_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Flytescheduler deployment @@ -128,7 +128,7 @@ flyte: # -- Docker image for Datacatalog deployment repository: cr.flyte.org/flyteorg/datacatalog # DATACATALOG_IMAGE # -- Docker image tag - tag: v1.13.1-rc1 # DATACATALOG_TAG + tag: v1.13.1 # DATACATALOG_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Datacatalog deployment @@ -177,7 +177,7 @@ flyte: # -- Docker image for Flytepropeller deployment repository: cr.flyte.org/flyteorg/flytepropeller # FLYTEPROPELLER_IMAGE # -- Docker image tag - tag: v1.13.1-rc1 # FLYTEPROPELLER_TAG + tag: v1.13.1 # FLYTEPROPELLER_TAG # -- Docker image pull policy pullPolicy: IfNotPresent # -- Default resources requests and limits for Flytepropeller deployment @@ -471,7 +471,7 @@ flyte: # -- Structure documented [here](https://pkg.go.dev/github.com/lyft/flyteplugins@v0.5.28/go/tasks/pluginmachinery/flytek8s/config#FlyteCoPilotConfig) co-pilot: name: flyte-copilot- - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 # FLYTECOPILOT_IMAGE + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1 # FLYTECOPILOT_IMAGE start-timeout: 30s # -- Core propeller configuration diff --git a/deployment/eks/flyte_aws_scheduler_helm_generated.yaml b/deployment/eks/flyte_aws_scheduler_helm_generated.yaml index 2468bf049c..54dd0fa261 100644 --- a/deployment/eks/flyte_aws_scheduler_helm_generated.yaml +++ b/deployment/eks/flyte_aws_scheduler_helm_generated.yaml @@ -430,7 +430,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -876,7 +876,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -897,7 +897,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -915,7 +915,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -932,7 +932,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -959,7 +959,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -1066,7 +1066,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -1196,7 +1196,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -1214,7 +1214,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1277,7 +1277,7 @@ spec: template: metadata: annotations: - configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" + configChecksum: "cf07450d68fd5fcd2055e08e48664a18edabf2e13904b93096f2f6aa7ea7f5c" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1305,7 +1305,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -1359,9 +1359,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc1 + app.kubernetes.io/version: v1.13.1 annotations: - configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" + configChecksum: "cf07450d68fd5fcd2055e08e48664a18edabf2e13904b93096f2f6aa7ea7f5c" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -1375,7 +1375,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -1402,7 +1402,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/eks/flyte_helm_controlplane_generated.yaml b/deployment/eks/flyte_helm_controlplane_generated.yaml index 4f98e96224..ddc2116a6d 100644 --- a/deployment/eks/flyte_helm_controlplane_generated.yaml +++ b/deployment/eks/flyte_helm_controlplane_generated.yaml @@ -581,7 +581,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -602,7 +602,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -620,7 +620,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -637,7 +637,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -664,7 +664,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -771,7 +771,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -901,7 +901,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -919,7 +919,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1002,7 +1002,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -1022,7 +1022,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: diff --git a/deployment/eks/flyte_helm_dataplane_generated.yaml b/deployment/eks/flyte_helm_dataplane_generated.yaml index d4780f1f25..da234512fb 100644 --- a/deployment/eks/flyte_helm_dataplane_generated.yaml +++ b/deployment/eks/flyte_helm_dataplane_generated.yaml @@ -94,7 +94,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -428,7 +428,7 @@ spec: template: metadata: annotations: - configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" + configChecksum: "cf07450d68fd5fcd2055e08e48664a18edabf2e13904b93096f2f6aa7ea7f5c" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -456,7 +456,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -510,9 +510,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc1 + app.kubernetes.io/version: v1.13.1 annotations: - configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" + configChecksum: "cf07450d68fd5fcd2055e08e48664a18edabf2e13904b93096f2f6aa7ea7f5c" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -526,7 +526,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -553,7 +553,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/eks/flyte_helm_generated.yaml b/deployment/eks/flyte_helm_generated.yaml index db89ef2cf0..3a2aa378b2 100644 --- a/deployment/eks/flyte_helm_generated.yaml +++ b/deployment/eks/flyte_helm_generated.yaml @@ -461,7 +461,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -907,7 +907,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -928,7 +928,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -946,7 +946,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -963,7 +963,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -990,7 +990,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -1097,7 +1097,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -1227,7 +1227,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -1245,7 +1245,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1328,7 +1328,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -1348,7 +1348,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: @@ -1407,7 +1407,7 @@ spec: template: metadata: annotations: - configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" + configChecksum: "cf07450d68fd5fcd2055e08e48664a18edabf2e13904b93096f2f6aa7ea7f5c" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1435,7 +1435,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -1489,9 +1489,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc1 + app.kubernetes.io/version: v1.13.1 annotations: - configChecksum: "ea7955d463445fa62b4096a37caba7b9049bbbaf909a75ccb91c8bfbc35178b" + configChecksum: "cf07450d68fd5fcd2055e08e48664a18edabf2e13904b93096f2f6aa7ea7f5c" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -1505,7 +1505,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -1532,7 +1532,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/gcp/flyte_helm_controlplane_generated.yaml b/deployment/gcp/flyte_helm_controlplane_generated.yaml index aa84954510..43245474e9 100644 --- a/deployment/gcp/flyte_helm_controlplane_generated.yaml +++ b/deployment/gcp/flyte_helm_controlplane_generated.yaml @@ -596,7 +596,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -617,7 +617,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -635,7 +635,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -652,7 +652,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -679,7 +679,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -786,7 +786,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -916,7 +916,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -934,7 +934,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1017,7 +1017,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -1037,7 +1037,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: diff --git a/deployment/gcp/flyte_helm_dataplane_generated.yaml b/deployment/gcp/flyte_helm_dataplane_generated.yaml index 3d8f70b15b..6cf03628f3 100644 --- a/deployment/gcp/flyte_helm_dataplane_generated.yaml +++ b/deployment/gcp/flyte_helm_dataplane_generated.yaml @@ -94,7 +94,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -436,7 +436,7 @@ spec: template: metadata: annotations: - configChecksum: "f652cb79ec4760da1c900d5bf0c530a8d05b34380fc1be967080d35945bc3b1" + configChecksum: "3ceda510332c8f7f6cd2cdd75234ef366e2b31dfe0632a2a93cc25bc326cf28" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -463,7 +463,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -517,9 +517,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc1 + app.kubernetes.io/version: v1.13.1 annotations: - configChecksum: "f652cb79ec4760da1c900d5bf0c530a8d05b34380fc1be967080d35945bc3b1" + configChecksum: "3ceda510332c8f7f6cd2cdd75234ef366e2b31dfe0632a2a93cc25bc326cf28" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -533,7 +533,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -560,7 +560,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/gcp/flyte_helm_generated.yaml b/deployment/gcp/flyte_helm_generated.yaml index 9f501cf916..5992af2081 100644 --- a/deployment/gcp/flyte_helm_generated.yaml +++ b/deployment/gcp/flyte_helm_generated.yaml @@ -474,7 +474,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -930,7 +930,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -951,7 +951,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -969,7 +969,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -986,7 +986,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -1013,7 +1013,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -1120,7 +1120,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -1250,7 +1250,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -1268,7 +1268,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -1351,7 +1351,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -1371,7 +1371,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: @@ -1430,7 +1430,7 @@ spec: template: metadata: annotations: - configChecksum: "f652cb79ec4760da1c900d5bf0c530a8d05b34380fc1be967080d35945bc3b1" + configChecksum: "3ceda510332c8f7f6cd2cdd75234ef366e2b31dfe0632a2a93cc25bc326cf28" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1457,7 +1457,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -1511,9 +1511,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc1 + app.kubernetes.io/version: v1.13.1 annotations: - configChecksum: "f652cb79ec4760da1c900d5bf0c530a8d05b34380fc1be967080d35945bc3b1" + configChecksum: "3ceda510332c8f7f6cd2cdd75234ef366e2b31dfe0632a2a93cc25bc326cf28" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -1527,7 +1527,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -1554,7 +1554,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml b/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml index ef73b3f145..e7a4425a5f 100644 --- a/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml +++ b/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml @@ -116,7 +116,7 @@ data: stackdriver-enabled: false k8s: co-pilot: - image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1" k8s-array: logs: config: @@ -359,7 +359,7 @@ spec: app.kubernetes.io/instance: flyte app.kubernetes.io/component: flyte-binary annotations: - checksum/configuration: 2761c022d974c8ebb20ba44c0363cd80e46e2afd24ea916ebfe6b0f242f0418e + checksum/configuration: f0ece2b70412090c94dd394c3b7c213edac6e7e6d53b0696ce1b1e748176bb45 checksum/configuration-secret: d5d93f4e67780b21593dc3799f0f6682aab0765e708e4020939975d14d44f929 checksum/cluster-resource-templates: 7dfa59f3d447e9c099b8f8ffad3af466fecbc9cf9f8c97295d9634254a55d4ae spec: diff --git a/deployment/sandbox/flyte_helm_generated.yaml b/deployment/sandbox/flyte_helm_generated.yaml index f1f71817b6..4bdc97df64 100644 --- a/deployment/sandbox/flyte_helm_generated.yaml +++ b/deployment/sandbox/flyte_helm_generated.yaml @@ -586,7 +586,7 @@ data: plugins: k8s: co-pilot: - image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1 + image: cr.flyte.org/flyteorg/flytecopilot:v1.13.1 name: flyte-copilot- start-timeout: 30s core.yaml: | @@ -6714,7 +6714,7 @@ spec: - /etc/flyte/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: run-migrations securityContext: @@ -6734,7 +6734,7 @@ spec: - flytesnacks - flytetester - flyteexamples - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: seed-projects securityContext: @@ -6751,7 +6751,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - sync - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources securityContext: @@ -6767,7 +6767,7 @@ spec: - mountPath: /etc/secrets/ name: admin-secrets - name: generate-secrets - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" command: ["/bin/sh", "-c"] args: @@ -6794,7 +6794,7 @@ spec: - --config - /etc/flyte/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: flyteadmin ports: @@ -6891,7 +6891,7 @@ spec: - /etc/flyte/config/*.yaml - clusterresource - run - image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flyteadmin:v1.13.1" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources volumeMounts: @@ -7016,7 +7016,7 @@ spec: - /etc/datacatalog/config/*.yaml - migrate - run - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1" imagePullPolicy: "IfNotPresent" name: run-migrations volumeMounts: @@ -7033,7 +7033,7 @@ spec: - --config - /etc/datacatalog/config/*.yaml - serve - image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/datacatalog:v1.13.1" imagePullPolicy: "IfNotPresent" name: datacatalog ports: @@ -7106,7 +7106,7 @@ spec: - precheck - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytescheduler-check securityContext: @@ -7125,7 +7125,7 @@ spec: - run - --config - /etc/flyte/config/*.yaml - image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytescheduler:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytescheduler ports: @@ -7181,7 +7181,7 @@ spec: template: metadata: annotations: - configChecksum: "b6a325d0de65783cfab97909bc7202fef4c3efc85edd2b95b5076e3681938f1" + configChecksum: "b535173fc46161f52db8a753f6b82289cdf866def58cd388585d9a78c19363f" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -7208,7 +7208,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: @@ -7255,9 +7255,9 @@ spec: labels: app: flyte-pod-webhook app.kubernetes.io/name: flyte-pod-webhook - app.kubernetes.io/version: v1.13.1-rc1 + app.kubernetes.io/version: v1.13.1 annotations: - configChecksum: "b6a325d0de65783cfab97909bc7202fef4c3efc85edd2b95b5076e3681938f1" + configChecksum: "b535173fc46161f52db8a753f6b82289cdf866def58cd388585d9a78c19363f" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: @@ -7271,7 +7271,7 @@ spec: serviceAccountName: flyte-pod-webhook initContainers: - name: generate-secrets - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -7298,7 +7298,7 @@ spec: mountPath: /etc/flyte/config containers: - name: webhook - image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytepropeller:v1.13.1" imagePullPolicy: "IfNotPresent" command: - flytepropeller diff --git a/docker/sandbox-bundled/manifests/complete-agent.yaml b/docker/sandbox-bundled/manifests/complete-agent.yaml index 3ba7075df8..9b75793981 100644 --- a/docker/sandbox-bundled/manifests/complete-agent.yaml +++ b/docker/sandbox-bundled/manifests/complete-agent.yaml @@ -469,7 +469,7 @@ data: stackdriver-enabled: false k8s: co-pilot: - image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1" k8s-array: logs: config: @@ -817,7 +817,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: dkJZdnpKQ0FYZkhWano2eg== + haSharedSecret: czRSTHlPalFoaXh2eG8ybQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1248,7 +1248,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: 12c484f191527a693debafaa71bfcd04dbda7bfc87c83e385ea6d5c13188401f + checksum/configuration: c947a76cd5e9d59db63c98247b8944d64351090c65c4741d0ac30b6d17bde944 checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1414,7 +1414,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 4c91f059d15ecfed81f3906fb24896c41fde9103a61ace577327d080409423da + checksum/secret: 26c674111980a6c187443e32ee29419ae3678da338eabcc88055efe5b975736a labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/complete.yaml b/docker/sandbox-bundled/manifests/complete.yaml index 5c470d20cf..2841a1c9d8 100644 --- a/docker/sandbox-bundled/manifests/complete.yaml +++ b/docker/sandbox-bundled/manifests/complete.yaml @@ -458,7 +458,7 @@ data: stackdriver-enabled: false k8s: co-pilot: - image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1-rc1" + image: "cr.flyte.org/flyteorg/flytecopilot:v1.13.1" k8s-array: logs: config: @@ -797,7 +797,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: WGJQZFpzb2ZDSkU5dmJReQ== + haSharedSecret: bVZ2dDVDNDBYYUZUWDhaOQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1195,7 +1195,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: 0c0c4c2401e4d6362921a86660489536d0db8e4e66ae09e429adc54665c68021 + checksum/configuration: dcece0d233748108b134ff58a6c1f2c5eafb960e009c8a55b90734ffc884a435 checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1361,7 +1361,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 20145f8b7e37f104163904f86eeb0a46444c157de19f8e675128b04d16598ee4 + checksum/secret: bd5c554c59981c4aeac7afc8c548d72de08d84f5a0c3b874d0122c423370f51a labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/dev.yaml b/docker/sandbox-bundled/manifests/dev.yaml index 787be05725..c0d1d7d5b1 100644 --- a/docker/sandbox-bundled/manifests/dev.yaml +++ b/docker/sandbox-bundled/manifests/dev.yaml @@ -499,7 +499,7 @@ metadata: --- apiVersion: v1 data: - haSharedSecret: R2RwSGJNOERJN2NSWXNQNg== + haSharedSecret: Tmp2T0hScHN2NFJtWUxKVQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -934,7 +934,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: a110328cf7fce9dfe57fe25438d4902fc3cc661346782bb261c0b6b80fb783d1 + checksum/secret: 599e28a6ad1e5b63c91487bf3fea5d5b413b9ea50a0262e0ac6b00c65db92dc1 labels: app: docker-registry release: flyte-sandbox diff --git a/docs/conf.py b/docs/conf.py index 992b62f91f..24f6feb97e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -36,7 +36,7 @@ # The short X.Y version version = "" # The full version, including alpha/beta/rc tags -release = "1.13.1-rc1" +release = "1.13.1" # -- General configuration --------------------------------------------------- From add4e4831630995369e2835b2b403ff1aada81d4 Mon Sep 17 00:00:00 2001 From: Kevin Su Date: Wed, 28 Aug 2024 10:09:57 -0700 Subject: [PATCH 45/65] Enable echo plugin by default (#5679) Signed-off-by: Kevin Su Signed-off-by: Bugra Gedik --- charts/flyte-binary/README.md | 6 +++--- charts/flyte-binary/eks-production.yaml | 1 + charts/flyte-binary/eks-starter.yaml | 1 + charts/flyte-binary/gke-starter.yaml | 1 + charts/flyte-binary/values.yaml | 1 + charts/flyte-core/README.md | 6 +++--- charts/flyte-core/values-eks.yaml | 1 + charts/flyte-core/values-gcp.yaml | 1 + ...lues-keycloak-idp-flyteclients-without-browser.yaml | 1 + charts/flyte-core/values.yaml | 1 + charts/flyte/README.md | 10 +++++----- charts/flyte/values.yaml | 1 + deployment/eks/flyte_aws_scheduler_helm_generated.yaml | 5 +++-- deployment/eks/flyte_helm_dataplane_generated.yaml | 5 +++-- deployment/eks/flyte_helm_generated.yaml | 5 +++-- deployment/gcp/flyte_helm_dataplane_generated.yaml | 5 +++-- deployment/gcp/flyte_helm_generated.yaml | 5 +++-- .../flyte_sandbox_binary_helm_generated.yaml | 3 ++- deployment/sandbox/flyte_helm_generated.yaml | 5 +++-- docker/sandbox-bundled/manifests/complete-agent.yaml | 7 ++++--- docker/sandbox-bundled/manifests/complete.yaml | 7 ++++--- docker/sandbox-bundled/manifests/dev.yaml | 4 ++-- flyte-single-binary-local.yaml | 1 + flytepropeller/propeller-config.yaml | 1 + 24 files changed, 52 insertions(+), 32 deletions(-) diff --git a/charts/flyte-binary/README.md b/charts/flyte-binary/README.md index 6bcac8f45e..cc39b38cdc 100644 --- a/charts/flyte-binary/README.md +++ b/charts/flyte-binary/README.md @@ -111,9 +111,9 @@ Chart for basic single Flyte executable deployment | deployment.waitForDB.image.pullPolicy | string | `"IfNotPresent"` | | | deployment.waitForDB.image.repository | string | `"postgres"` | | | deployment.waitForDB.image.tag | string | `"15-alpine"` | | -| enabled_plugins.tasks | object | `{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}` | Tasks specific configuration [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#GetConfig) | -| enabled_plugins.tasks.task-plugins | object | `{"default-for-task-types":{"container":"container","container_array":"k8s-array","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}` | Plugins configuration, [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#TaskPluginConfig) | -| enabled_plugins.tasks.task-plugins.enabled-plugins | list | `["container","sidecar","k8s-array","agent-service"]` | [Enabled Plugins](https://pkg.go.dev/github.com/lyft/flyteplugins/go/tasks/config#Config). Enable sagemaker*, athena if you install the backend plugins | +| enabled_plugins.tasks | object | `{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service","echo"]}}` | Tasks specific configuration [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#GetConfig) | +| enabled_plugins.tasks.task-plugins | object | `{"default-for-task-types":{"container":"container","container_array":"k8s-array","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service","echo"]}` | Plugins configuration, [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#TaskPluginConfig) | +| enabled_plugins.tasks.task-plugins.enabled-plugins | list | `["container","sidecar","k8s-array","agent-service","echo"]` | [Enabled Plugins](https://pkg.go.dev/github.com/lyft/flyteplugins/go/tasks/config#Config). Enable sagemaker*, athena if you install the backend plugins | | flyte-core-components.admin.disableClusterResourceManager | bool | `false` | | | flyte-core-components.admin.disableScheduler | bool | `false` | | | flyte-core-components.admin.disabled | bool | `false` | | diff --git a/charts/flyte-binary/eks-production.yaml b/charts/flyte-binary/eks-production.yaml index 987269a5aa..11cd55f6cd 100644 --- a/charts/flyte-binary/eks-production.yaml +++ b/charts/flyte-binary/eks-production.yaml @@ -69,6 +69,7 @@ configuration: - sidecar - K8S-ARRAY #used for MapTasks - agent-service + - echo default-for-task-types: - container: container - container_array: K8S-ARRAY diff --git a/charts/flyte-binary/eks-starter.yaml b/charts/flyte-binary/eks-starter.yaml index 20257bf072..215cf5f2ba 100644 --- a/charts/flyte-binary/eks-starter.yaml +++ b/charts/flyte-binary/eks-starter.yaml @@ -69,6 +69,7 @@ configuration: - sidecar - K8S-ARRAY #used for MapTasks - agent-service + - echo default-for-task-types: - container: container - container_array: K8S-ARRAY diff --git a/charts/flyte-binary/gke-starter.yaml b/charts/flyte-binary/gke-starter.yaml index f9052a27fd..9ea53e080c 100644 --- a/charts/flyte-binary/gke-starter.yaml +++ b/charts/flyte-binary/gke-starter.yaml @@ -99,6 +99,7 @@ configuration: - container - sidecar - K8S-ARRAY #used for MapTasks + - echo default-for-task-types: - container: container - container_array: K8S-ARRAY diff --git a/charts/flyte-binary/values.yaml b/charts/flyte-binary/values.yaml index f70b5024d1..0f7261d86e 100644 --- a/charts/flyte-binary/values.yaml +++ b/charts/flyte-binary/values.yaml @@ -399,6 +399,7 @@ enabled_plugins: - sidecar - k8s-array - agent-service + - echo default-for-task-types: container: container sidecar: sidecar diff --git a/charts/flyte-core/README.md b/charts/flyte-core/README.md index ef9814c8da..673ba7b6ef 100644 --- a/charts/flyte-core/README.md +++ b/charts/flyte-core/README.md @@ -102,9 +102,9 @@ helm install gateway bitnami/contour -n flyte | configmap.core.propeller | object | `{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"}` | follows the structure specified [here](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/config). | | configmap.datacatalogServer | object | `{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"heartbeat-grace-period-multiplier":3,"max-reservation-heartbeat":"30s","metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}}` | Datacatalog server config | | configmap.domain | object | `{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]}` | Domains configuration for Flyte projects. This enables the specified number of domains across all projects in Flyte. | -| configmap.enabled_plugins.tasks | object | `{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}` | Tasks specific configuration [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#GetConfig) | -| configmap.enabled_plugins.tasks.task-plugins | object | `{"default-for-task-types":{"container":"container","container_array":"k8s-array","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}` | Plugins configuration, [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#TaskPluginConfig) | -| configmap.enabled_plugins.tasks.task-plugins.enabled-plugins | list | `["container","sidecar","k8s-array","agent-service"]` | [Enabled Plugins](https://pkg.go.dev/github.com/lyft/flyteplugins/go/tasks/config#Config). Enable sagemaker*, athena if you install the backend plugins | +| configmap.enabled_plugins.tasks | object | `{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service","echo"]}}` | Tasks specific configuration [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#GetConfig) | +| configmap.enabled_plugins.tasks.task-plugins | object | `{"default-for-task-types":{"container":"container","container_array":"k8s-array","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service","echo"]}` | Plugins configuration, [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#TaskPluginConfig) | +| configmap.enabled_plugins.tasks.task-plugins.enabled-plugins | list | `["container","sidecar","k8s-array","agent-service","echo"]` | [Enabled Plugins](https://pkg.go.dev/github.com/lyft/flyteplugins/go/tasks/config#Config). Enable sagemaker*, athena if you install the backend plugins | | configmap.k8s | object | `{"plugins":{"k8s":{"default-cpus":"100m","default-env-vars":[],"default-memory":"100Mi"}}}` | Kubernetes specific Flyte configuration | | configmap.k8s.plugins.k8s | object | `{"default-cpus":"100m","default-env-vars":[],"default-memory":"100Mi"}` | Configuration section for all K8s specific plugins [Configuration structure](https://pkg.go.dev/github.com/lyft/flyteplugins/go/tasks/pluginmachinery/flytek8s/config) | | configmap.remoteData.remoteData.region | string | `"us-east-1"` | | diff --git a/charts/flyte-core/values-eks.yaml b/charts/flyte-core/values-eks.yaml index 0ab0dfc150..5a1cc1b94d 100644 --- a/charts/flyte-core/values-eks.yaml +++ b/charts/flyte-core/values-eks.yaml @@ -282,6 +282,7 @@ configmap: - sidecar - k8s-array - agent-service + - echo # - sagemaker_hyperparameter_tuning # - sagemaker_custom_training # - sagemaker_training diff --git a/charts/flyte-core/values-gcp.yaml b/charts/flyte-core/values-gcp.yaml index b402924699..b6b0342ab3 100644 --- a/charts/flyte-core/values-gcp.yaml +++ b/charts/flyte-core/values-gcp.yaml @@ -297,6 +297,7 @@ configmap: - sidecar - k8s-array - agent-service + - echo default-for-task-types: container: container sidecar: sidecar diff --git a/charts/flyte-core/values-keycloak-idp-flyteclients-without-browser.yaml b/charts/flyte-core/values-keycloak-idp-flyteclients-without-browser.yaml index 22624d8775..961611b56c 100644 --- a/charts/flyte-core/values-keycloak-idp-flyteclients-without-browser.yaml +++ b/charts/flyte-core/values-keycloak-idp-flyteclients-without-browser.yaml @@ -674,6 +674,7 @@ configmap: - sidecar - k8s-array - agent-service + - echo default-for-task-types: container: container sidecar: sidecar diff --git a/charts/flyte-core/values.yaml b/charts/flyte-core/values.yaml index 7023dea3f0..93c0d9b389 100755 --- a/charts/flyte-core/values.yaml +++ b/charts/flyte-core/values.yaml @@ -863,6 +863,7 @@ configmap: - sidecar - k8s-array - agent-service + - echo default-for-task-types: container: container sidecar: sidecar diff --git a/charts/flyte/README.md b/charts/flyte/README.md index 5ae694e411..1c0d00661c 100644 --- a/charts/flyte/README.md +++ b/charts/flyte/README.md @@ -71,7 +71,7 @@ helm upgrade -f values-sandbox.yaml flyte . | contour.tolerations | list | `[]` | tolerations for Contour deployment | | daskoperator | object | `{"enabled":false}` | Optional: Dask Plugin using the Dask Operator | | daskoperator.enabled | bool | `false` | - enable or disable the dask operator deployment installation | -| flyte | object | `{"cluster_resource_manager":{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]},"common":{"databaseSecret":{"name":"","secretManifest":{}},"flyteNamespaceTemplate":{"enabled":false},"ingress":{"albSSLRedirect":false,"annotations":{"nginx.ingress.kubernetes.io/app-root":"/console"},"enabled":true,"host":"","separateGrpcIngress":false,"separateGrpcIngressAnnotations":{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"},"tls":{"enabled":false},"webpackHMR":true}},"configmap":{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}},"datacatalog":{"affinity":{},"configPath":"/etc/datacatalog/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/datacatalog","tag":"v1.13.1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"NodePort"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"db":{"admin":{"database":{"dbname":"flyteadmin","host":"postgres","port":5432,"username":"postgres"}},"datacatalog":{"database":{"dbname":"datacatalog","host":"postgres","port":5432,"username":"postgres"}}},"deployRedoc":true,"flyteadmin":{"additionalVolumeMounts":[],"additionalVolumes":[],"affinity":{},"configPath":"/etc/flyte/config/*.yaml","env":[],"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteadmin","tag":"v1.13.1"},"initialProjects":["flytesnacks","flytetester","flyteexamples"],"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"loadBalancerSourceRanges":[],"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flyteconsole":{"affinity":{},"ga":{"enabled":true,"tracking_id":"G-0QW4DJWJ20"},"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteconsole","tag":"v1.17.1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","memory":"275Mi"},"requests":{"cpu":"10m","memory":"250Mi"}},"service":{"annotations":{},"type":"ClusterIP"},"tolerations":[]},"flytepropeller":{"affinity":{},"cacheSizeMbs":0,"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytepropeller","tag":"v1.13.1"},"manager":false,"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"200m","ephemeral-storage":"100Mi","memory":"200Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flytescheduler":{"affinity":{},"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytescheduler","tag":"v1.13.1"},"nodeSelector":{},"podAnnotations":{},"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"storage":{"bucketName":"my-s3-bucket","custom":{},"gcs":null,"s3":{"region":"us-east-1"},"type":"sandbox"},"webhook":{"enabled":true,"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]}},"workflow_notifications":{"config":{},"enabled":false},"workflow_scheduler":{"enabled":true,"type":"native"}}` | ------------------------------------------------------------------- Core System settings This section consists of Core components of Flyte and their deployment settings. This includes FlyteAdmin service, Datacatalog, FlytePropeller and Flyteconsole | +| flyte | object | `{"cluster_resource_manager":{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]},"common":{"databaseSecret":{"name":"","secretManifest":{}},"flyteNamespaceTemplate":{"enabled":false},"ingress":{"albSSLRedirect":false,"annotations":{"nginx.ingress.kubernetes.io/app-root":"/console"},"enabled":true,"host":"","separateGrpcIngress":false,"separateGrpcIngressAnnotations":{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"},"tls":{"enabled":false},"webpackHMR":true}},"configmap":{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service","echo"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}},"datacatalog":{"affinity":{},"configPath":"/etc/datacatalog/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/datacatalog","tag":"v1.13.1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"NodePort"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"db":{"admin":{"database":{"dbname":"flyteadmin","host":"postgres","port":5432,"username":"postgres"}},"datacatalog":{"database":{"dbname":"datacatalog","host":"postgres","port":5432,"username":"postgres"}}},"deployRedoc":true,"flyteadmin":{"additionalVolumeMounts":[],"additionalVolumes":[],"affinity":{},"configPath":"/etc/flyte/config/*.yaml","env":[],"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteadmin","tag":"v1.13.1"},"initialProjects":["flytesnacks","flytetester","flyteexamples"],"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"loadBalancerSourceRanges":[],"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flyteconsole":{"affinity":{},"ga":{"enabled":true,"tracking_id":"G-0QW4DJWJ20"},"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteconsole","tag":"v1.17.1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","memory":"275Mi"},"requests":{"cpu":"10m","memory":"250Mi"}},"service":{"annotations":{},"type":"ClusterIP"},"tolerations":[]},"flytepropeller":{"affinity":{},"cacheSizeMbs":0,"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytepropeller","tag":"v1.13.1"},"manager":false,"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"200m","ephemeral-storage":"100Mi","memory":"200Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flytescheduler":{"affinity":{},"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytescheduler","tag":"v1.13.1"},"nodeSelector":{},"podAnnotations":{},"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"storage":{"bucketName":"my-s3-bucket","custom":{},"gcs":null,"s3":{"region":"us-east-1"},"type":"sandbox"},"webhook":{"enabled":true,"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]}},"workflow_notifications":{"config":{},"enabled":false},"workflow_scheduler":{"enabled":true,"type":"native"}}` | ------------------------------------------------------------------- Core System settings This section consists of Core components of Flyte and their deployment settings. This includes FlyteAdmin service, Datacatalog, FlytePropeller and Flyteconsole | | flyte.cluster_resource_manager | object | `{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]}` | Configuration for the Cluster resource manager component. This is an optional component, that enables automatic cluster configuration. This is useful to set default quotas, manage namespaces etc that map to a project/domain | | flyte.cluster_resource_manager.config.cluster_resources | object | `{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}` | ClusterResource parameters Refer to the [structure](https://pkg.go.dev/github.com/lyft/flyteadmin@v0.3.37/pkg/runtime/interfaces#ClusterResourceConfig) to customize. | | flyte.cluster_resource_manager.config.cluster_resources.standaloneDeployment | bool | `false` | Starts the cluster resource manager in standalone mode with requisite auth credentials to call flyteadmin service endpoints | @@ -91,7 +91,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.common.ingress.separateGrpcIngressAnnotations | object | `{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"}` | - Extra Ingress annotations applied only to the GRPC ingress. Only makes sense if `separateGrpcIngress` is enabled. | | flyte.common.ingress.tls | object | `{"enabled":false}` | - TLS Settings | | flyte.common.ingress.webpackHMR | bool | `true` | - Enable or disable HMR route to flyteconsole. This is useful only for frontend development. | -| flyte.configmap | object | `{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}}` | ----------------------------------------------------------------- CONFIGMAPS SETTINGS | +| flyte.configmap | object | `{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service","echo"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}}` | ----------------------------------------------------------------- CONFIGMAPS SETTINGS | | flyte.configmap.adminServer | object | `{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}}` | FlyteAdmin server configuration | | flyte.configmap.adminServer.auth | object | `{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}}` | Authentication configuration | | flyte.configmap.adminServer.server.security.secure | bool | `false` | Controls whether to serve requests over SSL/TLS. | @@ -104,9 +104,9 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.configmap.core.propeller | object | `{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"}` | follows the structure specified [here](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/config). | | flyte.configmap.datacatalogServer | object | `{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}}` | Datacatalog server config | | flyte.configmap.domain | object | `{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]}` | Domains configuration for Flyte projects. This enables the specified number of domains across all projects in Flyte. | -| flyte.configmap.enabled_plugins.tasks | object | `{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}` | Tasks specific configuration [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#GetConfig) | -| flyte.configmap.enabled_plugins.tasks.task-plugins | object | `{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}` | Plugins configuration, [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#TaskPluginConfig) | -| flyte.configmap.enabled_plugins.tasks.task-plugins.enabled-plugins | list | `["container","sidecar","k8s-array","agent-service"]` | [Enabled Plugins](https://pkg.go.dev/github.com/lyft/flyteplugins/go/tasks/config#Config). Enable sagemaker*, athena if you install the backend plugins | +| flyte.configmap.enabled_plugins.tasks | object | `{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service","echo"]}}` | Tasks specific configuration [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#GetConfig) | +| flyte.configmap.enabled_plugins.tasks.task-plugins | object | `{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service","echo"]}` | Plugins configuration, [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#TaskPluginConfig) | +| flyte.configmap.enabled_plugins.tasks.task-plugins.enabled-plugins | list | `["container","sidecar","k8s-array","agent-service","echo"]` | [Enabled Plugins](https://pkg.go.dev/github.com/lyft/flyteplugins/go/tasks/config#Config). Enable sagemaker*, athena if you install the backend plugins | | flyte.configmap.k8s | object | `{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}}` | Kubernetes specific Flyte configuration | | flyte.configmap.k8s.plugins.k8s | object | `{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}` | Configuration section for all K8s specific plugins [Configuration structure](https://pkg.go.dev/github.com/lyft/flyteplugins/go/tasks/pluginmachinery/flytek8s/config) | | flyte.configmap.logger | object | `{"logger":{"level":5,"show-source":true}}` | Logger configuration | diff --git a/charts/flyte/values.yaml b/charts/flyte/values.yaml index 9081162782..63c0a34c64 100755 --- a/charts/flyte/values.yaml +++ b/charts/flyte/values.yaml @@ -528,6 +528,7 @@ flyte: - sidecar - k8s-array - agent-service + - echo default-for-task-types: container: container sidecar: sidecar diff --git a/deployment/eks/flyte_aws_scheduler_helm_generated.yaml b/deployment/eks/flyte_aws_scheduler_helm_generated.yaml index 54dd0fa261..dc4ac5e800 100644 --- a/deployment/eks/flyte_aws_scheduler_helm_generated.yaml +++ b/deployment/eks/flyte_aws_scheduler_helm_generated.yaml @@ -491,6 +491,7 @@ data: - sidecar - k8s-array - agent-service + - echo k8s.yaml: | plugins: k8s: @@ -1277,7 +1278,7 @@ spec: template: metadata: annotations: - configChecksum: "cf07450d68fd5fcd2055e08e48664a18edabf2e13904b93096f2f6aa7ea7f5c" + configChecksum: "33bc4dd986fdb015ce49d998deedf122e119579ec09db311e67276230678a70" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1361,7 +1362,7 @@ spec: app.kubernetes.io/name: flyte-pod-webhook app.kubernetes.io/version: v1.13.1 annotations: - configChecksum: "cf07450d68fd5fcd2055e08e48664a18edabf2e13904b93096f2f6aa7ea7f5c" + configChecksum: "33bc4dd986fdb015ce49d998deedf122e119579ec09db311e67276230678a70" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: diff --git a/deployment/eks/flyte_helm_dataplane_generated.yaml b/deployment/eks/flyte_helm_dataplane_generated.yaml index da234512fb..03640c4c05 100644 --- a/deployment/eks/flyte_helm_dataplane_generated.yaml +++ b/deployment/eks/flyte_helm_dataplane_generated.yaml @@ -155,6 +155,7 @@ data: - sidecar - k8s-array - agent-service + - echo k8s.yaml: | plugins: k8s: @@ -428,7 +429,7 @@ spec: template: metadata: annotations: - configChecksum: "cf07450d68fd5fcd2055e08e48664a18edabf2e13904b93096f2f6aa7ea7f5c" + configChecksum: "33bc4dd986fdb015ce49d998deedf122e119579ec09db311e67276230678a70" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -512,7 +513,7 @@ spec: app.kubernetes.io/name: flyte-pod-webhook app.kubernetes.io/version: v1.13.1 annotations: - configChecksum: "cf07450d68fd5fcd2055e08e48664a18edabf2e13904b93096f2f6aa7ea7f5c" + configChecksum: "33bc4dd986fdb015ce49d998deedf122e119579ec09db311e67276230678a70" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: diff --git a/deployment/eks/flyte_helm_generated.yaml b/deployment/eks/flyte_helm_generated.yaml index 3a2aa378b2..c2e861857c 100644 --- a/deployment/eks/flyte_helm_generated.yaml +++ b/deployment/eks/flyte_helm_generated.yaml @@ -522,6 +522,7 @@ data: - sidecar - k8s-array - agent-service + - echo k8s.yaml: | plugins: k8s: @@ -1407,7 +1408,7 @@ spec: template: metadata: annotations: - configChecksum: "cf07450d68fd5fcd2055e08e48664a18edabf2e13904b93096f2f6aa7ea7f5c" + configChecksum: "33bc4dd986fdb015ce49d998deedf122e119579ec09db311e67276230678a70" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1491,7 +1492,7 @@ spec: app.kubernetes.io/name: flyte-pod-webhook app.kubernetes.io/version: v1.13.1 annotations: - configChecksum: "cf07450d68fd5fcd2055e08e48664a18edabf2e13904b93096f2f6aa7ea7f5c" + configChecksum: "33bc4dd986fdb015ce49d998deedf122e119579ec09db311e67276230678a70" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: diff --git a/deployment/gcp/flyte_helm_dataplane_generated.yaml b/deployment/gcp/flyte_helm_dataplane_generated.yaml index 6cf03628f3..be2feeb698 100644 --- a/deployment/gcp/flyte_helm_dataplane_generated.yaml +++ b/deployment/gcp/flyte_helm_dataplane_generated.yaml @@ -155,6 +155,7 @@ data: - sidecar - k8s-array - agent-service + - echo k8s.yaml: | plugins: k8s: @@ -436,7 +437,7 @@ spec: template: metadata: annotations: - configChecksum: "3ceda510332c8f7f6cd2cdd75234ef366e2b31dfe0632a2a93cc25bc326cf28" + configChecksum: "64a3f5e546eddd8126d03c005460b964f428a0da31c0bfac5f70c63fbf3d635" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -519,7 +520,7 @@ spec: app.kubernetes.io/name: flyte-pod-webhook app.kubernetes.io/version: v1.13.1 annotations: - configChecksum: "3ceda510332c8f7f6cd2cdd75234ef366e2b31dfe0632a2a93cc25bc326cf28" + configChecksum: "64a3f5e546eddd8126d03c005460b964f428a0da31c0bfac5f70c63fbf3d635" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: diff --git a/deployment/gcp/flyte_helm_generated.yaml b/deployment/gcp/flyte_helm_generated.yaml index 5992af2081..acd3985c6d 100644 --- a/deployment/gcp/flyte_helm_generated.yaml +++ b/deployment/gcp/flyte_helm_generated.yaml @@ -535,6 +535,7 @@ data: - sidecar - k8s-array - agent-service + - echo k8s.yaml: | plugins: k8s: @@ -1430,7 +1431,7 @@ spec: template: metadata: annotations: - configChecksum: "3ceda510332c8f7f6cd2cdd75234ef366e2b31dfe0632a2a93cc25bc326cf28" + configChecksum: "64a3f5e546eddd8126d03c005460b964f428a0da31c0bfac5f70c63fbf3d635" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1513,7 +1514,7 @@ spec: app.kubernetes.io/name: flyte-pod-webhook app.kubernetes.io/version: v1.13.1 annotations: - configChecksum: "3ceda510332c8f7f6cd2cdd75234ef366e2b31dfe0632a2a93cc25bc326cf28" + configChecksum: "64a3f5e546eddd8126d03c005460b964f428a0da31c0bfac5f70c63fbf3d635" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: diff --git a/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml b/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml index e7a4425a5f..ebfd93f0f7 100644 --- a/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml +++ b/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml @@ -109,6 +109,7 @@ data: - sidecar - k8s-array - agent-service + - echo plugins: logs: kubernetes-enabled: false @@ -359,7 +360,7 @@ spec: app.kubernetes.io/instance: flyte app.kubernetes.io/component: flyte-binary annotations: - checksum/configuration: f0ece2b70412090c94dd394c3b7c213edac6e7e6d53b0696ce1b1e748176bb45 + checksum/configuration: 1dd761465a57869e165697d10e35a81beeb988253055d6a74ed80aa5e3e5e106 checksum/configuration-secret: d5d93f4e67780b21593dc3799f0f6682aab0765e708e4020939975d14d44f929 checksum/cluster-resource-templates: 7dfa59f3d447e9c099b8f8ffad3af466fecbc9cf9f8c97295d9634254a55d4ae spec: diff --git a/deployment/sandbox/flyte_helm_generated.yaml b/deployment/sandbox/flyte_helm_generated.yaml index 4bdc97df64..c68497cf1b 100644 --- a/deployment/sandbox/flyte_helm_generated.yaml +++ b/deployment/sandbox/flyte_helm_generated.yaml @@ -643,6 +643,7 @@ data: - sidecar - k8s-array - agent-service + - echo k8s.yaml: | plugins: k8s: @@ -7181,7 +7182,7 @@ spec: template: metadata: annotations: - configChecksum: "b535173fc46161f52db8a753f6b82289cdf866def58cd388585d9a78c19363f" + configChecksum: "84d449758d51dc641aff55ae07f4376a860b5038e8407cb9d2444c4f895d953" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -7257,7 +7258,7 @@ spec: app.kubernetes.io/name: flyte-pod-webhook app.kubernetes.io/version: v1.13.1 annotations: - configChecksum: "b535173fc46161f52db8a753f6b82289cdf866def58cd388585d9a78c19363f" + configChecksum: "84d449758d51dc641aff55ae07f4376a860b5038e8407cb9d2444c4f895d953" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: diff --git a/docker/sandbox-bundled/manifests/complete-agent.yaml b/docker/sandbox-bundled/manifests/complete-agent.yaml index 9b75793981..78a678ae34 100644 --- a/docker/sandbox-bundled/manifests/complete-agent.yaml +++ b/docker/sandbox-bundled/manifests/complete-agent.yaml @@ -461,6 +461,7 @@ data: - sidecar - k8s-array - agent-service + - echo plugins: logs: kubernetes-enabled: true @@ -817,7 +818,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: czRSTHlPalFoaXh2eG8ybQ== + haSharedSecret: cWlOc1c1bnl5ZGI3YTlzSw== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1248,7 +1249,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: c947a76cd5e9d59db63c98247b8944d64351090c65c4741d0ac30b6d17bde944 + checksum/configuration: 6e8a4cc6177037f26cee65d09c37c010437ea3f0989a2a2dfef380fed9f468c2 checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1414,7 +1415,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 26c674111980a6c187443e32ee29419ae3678da338eabcc88055efe5b975736a + checksum/secret: 7f8247a0b84f43018fdf11a598132b8a67ed9fde6573ffce801b725a6f955012 labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/complete.yaml b/docker/sandbox-bundled/manifests/complete.yaml index 2841a1c9d8..5d46b89edf 100644 --- a/docker/sandbox-bundled/manifests/complete.yaml +++ b/docker/sandbox-bundled/manifests/complete.yaml @@ -450,6 +450,7 @@ data: - sidecar - k8s-array - agent-service + - echo plugins: logs: kubernetes-enabled: true @@ -797,7 +798,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: bVZ2dDVDNDBYYUZUWDhaOQ== + haSharedSecret: UUxqaW5SeGlBbFNlQzVoag== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1195,7 +1196,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: dcece0d233748108b134ff58a6c1f2c5eafb960e009c8a55b90734ffc884a435 + checksum/configuration: 967349c227efb6765bb7509d14dc7b0d62b07904a337dd70c8682d52d870590a checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1361,7 +1362,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: bd5c554c59981c4aeac7afc8c548d72de08d84f5a0c3b874d0122c423370f51a + checksum/secret: bea0c8f293b54e309a353e0e8563e709ad817d372d2b1dce1114188693aa3f12 labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/dev.yaml b/docker/sandbox-bundled/manifests/dev.yaml index c0d1d7d5b1..917645af33 100644 --- a/docker/sandbox-bundled/manifests/dev.yaml +++ b/docker/sandbox-bundled/manifests/dev.yaml @@ -499,7 +499,7 @@ metadata: --- apiVersion: v1 data: - haSharedSecret: Tmp2T0hScHN2NFJtWUxKVQ== + haSharedSecret: ZmdJNWs5RUg4cWNVTVBzRw== proxyPassword: "" proxyUsername: "" kind: Secret @@ -934,7 +934,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 599e28a6ad1e5b63c91487bf3fea5d5b413b9ea50a0262e0ac6b00c65db92dc1 + checksum/secret: a896f2c43dff6c05c154b51e4c9ec21c9e2f03ecaf4c1fed045d84523219cf63 labels: app: docker-registry release: flyte-sandbox diff --git a/flyte-single-binary-local.yaml b/flyte-single-binary-local.yaml index 0597009854..4cb63e8d4d 100644 --- a/flyte-single-binary-local.yaml +++ b/flyte-single-binary-local.yaml @@ -41,6 +41,7 @@ tasks: - container - sidecar - K8S-ARRAY + - echo default-for-task-types: - container: container - container_array: K8S-ARRAY diff --git a/flytepropeller/propeller-config.yaml b/flytepropeller/propeller-config.yaml index 84ce877d66..c62b9ae5ba 100644 --- a/flytepropeller/propeller-config.yaml +++ b/flytepropeller/propeller-config.yaml @@ -33,6 +33,7 @@ tasks: - container - sidecar - K8S-ARRAY + - echo # Uncomment to enable sagemaker plugin # - sagemaker_training # - sagemaker_hyperparameter_tuning From 34efd0b5dd02e748dba02c1e16c26ae8bf340e5a Mon Sep 17 00:00:00 2001 From: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> Date: Thu, 29 Aug 2024 16:01:17 -0400 Subject: [PATCH 46/65] Do not emit execution id label by default in single binary (#5704) * Do not emit execution id label by default in single binary Signed-off-by: Eduardo Apolinario * Update flytestdlib/contextutils/context.go Co-authored-by: Thomas J. Fan Signed-off-by: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> --------- Signed-off-by: Eduardo Apolinario Signed-off-by: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> Co-authored-by: Eduardo Apolinario Co-authored-by: Thomas J. Fan Signed-off-by: Bugra Gedik --- cmd/single/start.go | 2 +- flytestdlib/contextutils/context.go | 17 +++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/cmd/single/start.go b/cmd/single/start.go index e60c0e565f..1683fad4e1 100644 --- a/cmd/single/start.go +++ b/cmd/single/start.go @@ -249,6 +249,6 @@ func init() { RootCmd.AddCommand(startCmd) // Set Keys labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey, - contextutils.ExecIDKey, contextutils.WorkflowIDKey, contextutils.NodeIDKey, contextutils.TaskIDKey, + contextutils.WorkflowIDKey, contextutils.NodeIDKey, contextutils.TaskIDKey, contextutils.TaskTypeKey, common.RuntimeTypeKey, common.RuntimeVersionKey, storage.FailureTypeLabel) } diff --git a/flytestdlib/contextutils/context.go b/flytestdlib/contextutils/context.go index 080b0a098c..504ce767e9 100644 --- a/flytestdlib/contextutils/context.go +++ b/flytestdlib/contextutils/context.go @@ -12,14 +12,15 @@ import ( type Key string const ( - AppNameKey Key = "app_name" - NamespaceKey Key = "ns" - TaskTypeKey Key = "tasktype" - ProjectKey Key = "project" - DomainKey Key = "domain" - WorkflowIDKey Key = "wf" - NodeIDKey Key = "node" - TaskIDKey Key = "task" + AppNameKey Key = "app_name" + NamespaceKey Key = "ns" + TaskTypeKey Key = "tasktype" + ProjectKey Key = "project" + DomainKey Key = "domain" + WorkflowIDKey Key = "wf" + NodeIDKey Key = "node" + TaskIDKey Key = "task" + // Adding the ExecIDKey label to a metric will cause higher cardinality. Use with caution. ExecIDKey Key = "exec_id" JobIDKey Key = "job_id" PhaseKey Key = "phase" From e126da19ac4a38086f572793225430d124dcc038 Mon Sep 17 00:00:00 2001 From: Prafulla Mahindrakar Date: Thu, 29 Aug 2024 17:52:40 -0700 Subject: [PATCH 47/65] Using new offloaded metadata literal message for literal offloading (#5705) Signed-off-by: Bugra Gedik --- .../gen/pb-es/flyteidl/core/literals_pb.ts | 83 ++- .../gen/pb-go/flyteidl/core/literals.pb.go | 489 +++++++++++------- .../cacheservice/cacheservice.swagger.json | 32 +- .../datacatalog/datacatalog.swagger.json | 32 +- .../flyteidl/service/admin.swagger.json | 32 +- .../flyteidl/service/agent.swagger.json | 32 +- .../flyteidl/service/dataproxy.swagger.json | 32 +- .../external_plugin_service.swagger.json | 32 +- .../flyteidl/service/signal.swagger.json | 32 +- flyteidl/gen/pb-js/flyteidl.d.ts | 84 ++- flyteidl/gen/pb-js/flyteidl.js | 198 +++++-- .../pb_python/flyteidl/core/literals_pb2.py | 54 +- .../pb_python/flyteidl/core/literals_pb2.pyi | 16 +- flyteidl/gen/pb_rust/flyteidl.core.rs | 26 +- flyteidl/protos/flyteidl/core/literals.proto | 19 +- 15 files changed, 831 insertions(+), 362 deletions(-) diff --git a/flyteidl/gen/pb-es/flyteidl/core/literals_pb.ts b/flyteidl/gen/pb-es/flyteidl/core/literals_pb.ts index 4dd6f2467e..95ebbd9de9 100644 --- a/flyteidl/gen/pb-es/flyteidl/core/literals_pb.ts +++ b/flyteidl/gen/pb-es/flyteidl/core/literals_pb.ts @@ -566,6 +566,15 @@ export class Literal extends Message { */ value: LiteralMap; case: "map"; + } | { + /** + * Offloaded literal metadata + * When you deserialize the offloaded metadata, it would be of Literal and its type would be defined by LiteralType stored in offloaded_metadata. + * + * @generated from field: flyteidl.core.LiteralOffloadedMetadata offloaded_metadata = 8; + */ + value: LiteralOffloadedMetadata; + case: "offloadedMetadata"; } | { case: undefined; value?: undefined } = { case: undefined }; /** @@ -584,20 +593,6 @@ export class Literal extends Message { */ metadata: { [key: string]: string } = {}; - /** - * If this literal is offloaded, this field will contain metadata including the offload location. - * - * @generated from field: string uri = 6; - */ - uri = ""; - - /** - * Includes information about the size of the literal. - * - * @generated from field: uint64 size_bytes = 7; - */ - sizeBytes = protoInt64.zero; - constructor(data?: PartialMessage) { super(); proto3.util.initPartial(data, this); @@ -609,10 +604,9 @@ export class Literal extends Message { { no: 1, name: "scalar", kind: "message", T: Scalar, oneof: "value" }, { no: 2, name: "collection", kind: "message", T: LiteralCollection, oneof: "value" }, { no: 3, name: "map", kind: "message", T: LiteralMap, oneof: "value" }, + { no: 8, name: "offloaded_metadata", kind: "message", T: LiteralOffloadedMetadata, oneof: "value" }, { no: 4, name: "hash", kind: "scalar", T: 9 /* ScalarType.STRING */ }, { no: 5, name: "metadata", kind: "map", K: 9 /* ScalarType.STRING */, V: {kind: "scalar", T: 9 /* ScalarType.STRING */} }, - { no: 6, name: "uri", kind: "scalar", T: 9 /* ScalarType.STRING */ }, - { no: 7, name: "size_bytes", kind: "scalar", T: 4 /* ScalarType.UINT64 */ }, ]); static fromBinary(bytes: Uint8Array, options?: Partial): Literal { @@ -632,6 +626,63 @@ export class Literal extends Message { } } +/** + * A message that contains the metadata of the offloaded data. + * + * @generated from message flyteidl.core.LiteralOffloadedMetadata + */ +export class LiteralOffloadedMetadata extends Message { + /** + * The location of the offloaded core.Literal. + * + * @generated from field: string uri = 1; + */ + uri = ""; + + /** + * The size of the offloaded data. + * + * @generated from field: uint64 size_bytes = 2; + */ + sizeBytes = protoInt64.zero; + + /** + * The inferred literal type of the offloaded data. + * + * @generated from field: flyteidl.core.LiteralType inferred_type = 3; + */ + inferredType?: LiteralType; + + constructor(data?: PartialMessage) { + super(); + proto3.util.initPartial(data, this); + } + + static readonly runtime: typeof proto3 = proto3; + static readonly typeName = "flyteidl.core.LiteralOffloadedMetadata"; + static readonly fields: FieldList = proto3.util.newFieldList(() => [ + { no: 1, name: "uri", kind: "scalar", T: 9 /* ScalarType.STRING */ }, + { no: 2, name: "size_bytes", kind: "scalar", T: 4 /* ScalarType.UINT64 */ }, + { no: 3, name: "inferred_type", kind: "message", T: LiteralType }, + ]); + + static fromBinary(bytes: Uint8Array, options?: Partial): LiteralOffloadedMetadata { + return new LiteralOffloadedMetadata().fromBinary(bytes, options); + } + + static fromJson(jsonValue: JsonValue, options?: Partial): LiteralOffloadedMetadata { + return new LiteralOffloadedMetadata().fromJson(jsonValue, options); + } + + static fromJsonString(jsonString: string, options?: Partial): LiteralOffloadedMetadata { + return new LiteralOffloadedMetadata().fromJsonString(jsonString, options); + } + + static equals(a: LiteralOffloadedMetadata | PlainMessage | undefined, b: LiteralOffloadedMetadata | PlainMessage | undefined): boolean { + return proto3.util.equals(LiteralOffloadedMetadata, a, b); + } +} + /** * A collection of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field. * diff --git a/flyteidl/gen/pb-go/flyteidl/core/literals.pb.go b/flyteidl/gen/pb-go/flyteidl/core/literals.pb.go index 897ffc0d72..3f6e223749 100644 --- a/flyteidl/gen/pb-go/flyteidl/core/literals.pb.go +++ b/flyteidl/gen/pb-go/flyteidl/core/literals.pb.go @@ -778,6 +778,7 @@ type Literal struct { // *Literal_Scalar // *Literal_Collection // *Literal_Map + // *Literal_OffloadedMetadata Value isLiteral_Value `protobuf_oneof:"value"` // A hash representing this literal. // This is used for caching purposes. For more details refer to RFC 1893 @@ -785,10 +786,6 @@ type Literal struct { Hash string `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` // Additional metadata for literals. Metadata map[string]string `protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // If this literal is offloaded, this field will contain metadata including the offload location. - Uri string `protobuf:"bytes,6,opt,name=uri,proto3" json:"uri,omitempty"` - // Includes information about the size of the literal. - SizeBytes uint64 `protobuf:"varint,7,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` } func (x *Literal) Reset() { @@ -851,6 +848,13 @@ func (x *Literal) GetMap() *LiteralMap { return nil } +func (x *Literal) GetOffloadedMetadata() *LiteralOffloadedMetadata { + if x, ok := x.GetValue().(*Literal_OffloadedMetadata); ok { + return x.OffloadedMetadata + } + return nil +} + func (x *Literal) GetHash() string { if x != nil { return x.Hash @@ -865,20 +869,6 @@ func (x *Literal) GetMetadata() map[string]string { return nil } -func (x *Literal) GetUri() string { - if x != nil { - return x.Uri - } - return "" -} - -func (x *Literal) GetSizeBytes() uint64 { - if x != nil { - return x.SizeBytes - } - return 0 -} - type isLiteral_Value interface { isLiteral_Value() } @@ -898,12 +888,87 @@ type Literal_Map struct { Map *LiteralMap `protobuf:"bytes,3,opt,name=map,proto3,oneof"` } +type Literal_OffloadedMetadata struct { + // Offloaded literal metadata + // When you deserialize the offloaded metadata, it would be of Literal and its type would be defined by LiteralType stored in offloaded_metadata. + OffloadedMetadata *LiteralOffloadedMetadata `protobuf:"bytes,8,opt,name=offloaded_metadata,json=offloadedMetadata,proto3,oneof"` +} + func (*Literal_Scalar) isLiteral_Value() {} func (*Literal_Collection) isLiteral_Value() {} func (*Literal_Map) isLiteral_Value() {} +func (*Literal_OffloadedMetadata) isLiteral_Value() {} + +// A message that contains the metadata of the offloaded data. +type LiteralOffloadedMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The location of the offloaded core.Literal. + Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` + // The size of the offloaded data. + SizeBytes uint64 `protobuf:"varint,2,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` + // The inferred literal type of the offloaded data. + InferredType *LiteralType `protobuf:"bytes,3,opt,name=inferred_type,json=inferredType,proto3" json:"inferred_type,omitempty"` +} + +func (x *LiteralOffloadedMetadata) Reset() { + *x = LiteralOffloadedMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl_core_literals_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LiteralOffloadedMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LiteralOffloadedMetadata) ProtoMessage() {} + +func (x *LiteralOffloadedMetadata) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl_core_literals_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LiteralOffloadedMetadata.ProtoReflect.Descriptor instead. +func (*LiteralOffloadedMetadata) Descriptor() ([]byte, []int) { + return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{11} +} + +func (x *LiteralOffloadedMetadata) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (x *LiteralOffloadedMetadata) GetSizeBytes() uint64 { + if x != nil { + return x.SizeBytes + } + return 0 +} + +func (x *LiteralOffloadedMetadata) GetInferredType() *LiteralType { + if x != nil { + return x.InferredType + } + return nil +} + // A collection of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field. type LiteralCollection struct { state protoimpl.MessageState @@ -916,7 +981,7 @@ type LiteralCollection struct { func (x *LiteralCollection) Reset() { *x = LiteralCollection{} if protoimpl.UnsafeEnabled { - mi := &file_flyteidl_core_literals_proto_msgTypes[11] + mi := &file_flyteidl_core_literals_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -929,7 +994,7 @@ func (x *LiteralCollection) String() string { func (*LiteralCollection) ProtoMessage() {} func (x *LiteralCollection) ProtoReflect() protoreflect.Message { - mi := &file_flyteidl_core_literals_proto_msgTypes[11] + mi := &file_flyteidl_core_literals_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -942,7 +1007,7 @@ func (x *LiteralCollection) ProtoReflect() protoreflect.Message { // Deprecated: Use LiteralCollection.ProtoReflect.Descriptor instead. func (*LiteralCollection) Descriptor() ([]byte, []int) { - return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{11} + return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{12} } func (x *LiteralCollection) GetLiterals() []*Literal { @@ -964,7 +1029,7 @@ type LiteralMap struct { func (x *LiteralMap) Reset() { *x = LiteralMap{} if protoimpl.UnsafeEnabled { - mi := &file_flyteidl_core_literals_proto_msgTypes[12] + mi := &file_flyteidl_core_literals_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -977,7 +1042,7 @@ func (x *LiteralMap) String() string { func (*LiteralMap) ProtoMessage() {} func (x *LiteralMap) ProtoReflect() protoreflect.Message { - mi := &file_flyteidl_core_literals_proto_msgTypes[12] + mi := &file_flyteidl_core_literals_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -990,7 +1055,7 @@ func (x *LiteralMap) ProtoReflect() protoreflect.Message { // Deprecated: Use LiteralMap.ProtoReflect.Descriptor instead. func (*LiteralMap) Descriptor() ([]byte, []int) { - return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{12} + return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{13} } func (x *LiteralMap) GetLiterals() map[string]*Literal { @@ -1012,7 +1077,7 @@ type BindingDataCollection struct { func (x *BindingDataCollection) Reset() { *x = BindingDataCollection{} if protoimpl.UnsafeEnabled { - mi := &file_flyteidl_core_literals_proto_msgTypes[13] + mi := &file_flyteidl_core_literals_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1025,7 +1090,7 @@ func (x *BindingDataCollection) String() string { func (*BindingDataCollection) ProtoMessage() {} func (x *BindingDataCollection) ProtoReflect() protoreflect.Message { - mi := &file_flyteidl_core_literals_proto_msgTypes[13] + mi := &file_flyteidl_core_literals_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1038,7 +1103,7 @@ func (x *BindingDataCollection) ProtoReflect() protoreflect.Message { // Deprecated: Use BindingDataCollection.ProtoReflect.Descriptor instead. func (*BindingDataCollection) Descriptor() ([]byte, []int) { - return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{13} + return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{14} } func (x *BindingDataCollection) GetBindings() []*BindingData { @@ -1060,7 +1125,7 @@ type BindingDataMap struct { func (x *BindingDataMap) Reset() { *x = BindingDataMap{} if protoimpl.UnsafeEnabled { - mi := &file_flyteidl_core_literals_proto_msgTypes[14] + mi := &file_flyteidl_core_literals_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1073,7 +1138,7 @@ func (x *BindingDataMap) String() string { func (*BindingDataMap) ProtoMessage() {} func (x *BindingDataMap) ProtoReflect() protoreflect.Message { - mi := &file_flyteidl_core_literals_proto_msgTypes[14] + mi := &file_flyteidl_core_literals_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1086,7 +1151,7 @@ func (x *BindingDataMap) ProtoReflect() protoreflect.Message { // Deprecated: Use BindingDataMap.ProtoReflect.Descriptor instead. func (*BindingDataMap) Descriptor() ([]byte, []int) { - return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{14} + return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{15} } func (x *BindingDataMap) GetBindings() map[string]*BindingData { @@ -1107,7 +1172,7 @@ type UnionInfo struct { func (x *UnionInfo) Reset() { *x = UnionInfo{} if protoimpl.UnsafeEnabled { - mi := &file_flyteidl_core_literals_proto_msgTypes[15] + mi := &file_flyteidl_core_literals_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1120,7 +1185,7 @@ func (x *UnionInfo) String() string { func (*UnionInfo) ProtoMessage() {} func (x *UnionInfo) ProtoReflect() protoreflect.Message { - mi := &file_flyteidl_core_literals_proto_msgTypes[15] + mi := &file_flyteidl_core_literals_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1133,7 +1198,7 @@ func (x *UnionInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use UnionInfo.ProtoReflect.Descriptor instead. func (*UnionInfo) Descriptor() ([]byte, []int) { - return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{15} + return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{16} } func (x *UnionInfo) GetTargetType() *LiteralType { @@ -1162,7 +1227,7 @@ type BindingData struct { func (x *BindingData) Reset() { *x = BindingData{} if protoimpl.UnsafeEnabled { - mi := &file_flyteidl_core_literals_proto_msgTypes[16] + mi := &file_flyteidl_core_literals_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1175,7 +1240,7 @@ func (x *BindingData) String() string { func (*BindingData) ProtoMessage() {} func (x *BindingData) ProtoReflect() protoreflect.Message { - mi := &file_flyteidl_core_literals_proto_msgTypes[16] + mi := &file_flyteidl_core_literals_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1188,7 +1253,7 @@ func (x *BindingData) ProtoReflect() protoreflect.Message { // Deprecated: Use BindingData.ProtoReflect.Descriptor instead. func (*BindingData) Descriptor() ([]byte, []int) { - return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{16} + return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{17} } func (m *BindingData) GetValue() isBindingData_Value { @@ -1281,7 +1346,7 @@ type Binding struct { func (x *Binding) Reset() { *x = Binding{} if protoimpl.UnsafeEnabled { - mi := &file_flyteidl_core_literals_proto_msgTypes[17] + mi := &file_flyteidl_core_literals_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1294,7 +1359,7 @@ func (x *Binding) String() string { func (*Binding) ProtoMessage() {} func (x *Binding) ProtoReflect() protoreflect.Message { - mi := &file_flyteidl_core_literals_proto_msgTypes[17] + mi := &file_flyteidl_core_literals_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1307,7 +1372,7 @@ func (x *Binding) ProtoReflect() protoreflect.Message { // Deprecated: Use Binding.ProtoReflect.Descriptor instead. func (*Binding) Descriptor() ([]byte, []int) { - return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{17} + return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{18} } func (x *Binding) GetVar() string { @@ -1339,7 +1404,7 @@ type KeyValuePair struct { func (x *KeyValuePair) Reset() { *x = KeyValuePair{} if protoimpl.UnsafeEnabled { - mi := &file_flyteidl_core_literals_proto_msgTypes[18] + mi := &file_flyteidl_core_literals_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1352,7 +1417,7 @@ func (x *KeyValuePair) String() string { func (*KeyValuePair) ProtoMessage() {} func (x *KeyValuePair) ProtoReflect() protoreflect.Message { - mi := &file_flyteidl_core_literals_proto_msgTypes[18] + mi := &file_flyteidl_core_literals_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1365,7 +1430,7 @@ func (x *KeyValuePair) ProtoReflect() protoreflect.Message { // Deprecated: Use KeyValuePair.ProtoReflect.Descriptor instead. func (*KeyValuePair) Descriptor() ([]byte, []int) { - return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{18} + return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{19} } func (x *KeyValuePair) GetKey() string { @@ -1396,7 +1461,7 @@ type RetryStrategy struct { func (x *RetryStrategy) Reset() { *x = RetryStrategy{} if protoimpl.UnsafeEnabled { - mi := &file_flyteidl_core_literals_proto_msgTypes[19] + mi := &file_flyteidl_core_literals_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1409,7 +1474,7 @@ func (x *RetryStrategy) String() string { func (*RetryStrategy) ProtoMessage() {} func (x *RetryStrategy) ProtoReflect() protoreflect.Message { - mi := &file_flyteidl_core_literals_proto_msgTypes[19] + mi := &file_flyteidl_core_literals_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1422,7 +1487,7 @@ func (x *RetryStrategy) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryStrategy.ProtoReflect.Descriptor instead. func (*RetryStrategy) Descriptor() ([]byte, []int) { - return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{19} + return file_flyteidl_core_literals_proto_rawDescGZIP(), []int{20} } func (x *RetryStrategy) GetRetries() uint32 { @@ -1531,7 +1596,7 @@ var file_flyteidl_core_literals_proto_rawDesc = []byte{ 0x74, 0x12, 0x2c, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x42, - 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xfa, 0x02, 0x0a, 0x07, 0x4c, 0x69, 0x74, + 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xaf, 0x03, 0x0a, 0x07, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x63, 0x61, 0x6c, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x63, 0x61, 0x6c, 0x61, 0x72, 0x48, 0x00, 0x52, 0x06, 0x73, @@ -1542,98 +1607,110 @@ var file_flyteidl_core_literals_proto_rawDesc = []byte{ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, - 0x70, 0x48, 0x00, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x40, 0x0a, 0x08, - 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, - 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x10, - 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, - 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x1a, - 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x07, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x47, 0x0a, 0x11, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, - 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x08, 0x6c, 0x69, - 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, - 0x65, 0x72, 0x61, 0x6c, 0x52, 0x08, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x22, 0xa6, - 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x12, 0x43, 0x0a, - 0x08, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, - 0x61, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, - 0x6c, 0x73, 0x1a, 0x53, 0x0a, 0x0d, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4f, 0x0a, 0x15, 0x42, 0x69, 0x6e, 0x64, 0x69, - 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x36, 0x0a, 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x08, - 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xb2, 0x01, 0x0a, 0x0e, 0x42, 0x69, 0x6e, - 0x64, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x70, 0x12, 0x47, 0x0a, 0x08, 0x62, - 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, - 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x70, 0x2e, 0x42, 0x69, 0x6e, - 0x64, 0x69, 0x6e, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x62, 0x69, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x73, 0x1a, 0x57, 0x0a, 0x0d, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x70, 0x48, 0x00, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x12, 0x58, 0x0a, 0x12, 0x6f, 0x66, 0x66, 0x6c, + 0x6f, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4f, 0x66, 0x66, 0x6c, + 0x6f, 0x61, 0x64, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, + 0x11, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x40, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4a, 0x04, + 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x4c, + 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x7a, + 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, + 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x0d, 0x69, 0x6e, 0x66, 0x65, + 0x72, 0x72, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x69, 0x6e, 0x66, + 0x65, 0x72, 0x72, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x22, 0x47, 0x0a, 0x11, 0x4c, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x6c, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, + 0x0a, 0x08, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x52, 0x08, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, + 0x6c, 0x73, 0x22, 0xa6, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, + 0x70, 0x12, 0x43, 0x0a, 0x08, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x2e, 0x4c, + 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6c, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x1a, 0x53, 0x0a, 0x0d, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4f, 0x0a, 0x15, 0x42, + 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x61, - 0x74, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, - 0x09, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, - 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0xae, 0x02, 0x0a, 0x0b, 0x42, 0x69, 0x6e, 0x64, 0x69, - 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x63, 0x61, 0x6c, 0x61, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x63, 0x61, 0x6c, 0x61, 0x72, 0x48, 0x00, 0x52, - 0x06, 0x73, 0x63, 0x61, 0x6c, 0x61, 0x72, 0x12, 0x46, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x3a, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x48, 0x00, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x03, 0x6d, - 0x61, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, - 0x44, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x12, 0x2e, - 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x55, 0x6e, - 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x42, 0x07, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x51, 0x0a, 0x07, 0x42, 0x69, 0x6e, 0x64, 0x69, - 0x6e, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x76, 0x61, 0x72, 0x12, 0x34, 0x0a, 0x07, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, - 0x61, 0x52, 0x07, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x22, 0x36, 0x0a, 0x0c, 0x4b, 0x65, - 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x22, 0x29, 0x0a, 0x0d, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x74, 0x72, 0x61, 0x74, - 0x65, 0x67, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x42, 0xb3, 0x01, - 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x42, 0x0d, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, - 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x63, 0x6f, 0x72, 0x65, - 0xa2, 0x02, 0x03, 0x46, 0x43, 0x58, 0xaa, 0x02, 0x0d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x2e, 0x43, 0x6f, 0x72, 0x65, 0xca, 0x02, 0x0d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0xe2, 0x02, 0x19, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0xea, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x43, - 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x61, 0x52, 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xb2, 0x01, 0x0a, + 0x0e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x70, 0x12, + 0x47, 0x0a, 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x70, + 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, + 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x1a, 0x57, 0x0a, 0x0d, 0x42, 0x69, 0x6e, 0x64, + 0x69, 0x6e, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, + 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x47, 0x0a, 0x09, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, + 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0xae, 0x02, 0x0a, 0x0b, 0x42, + 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x63, + 0x61, 0x6c, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x63, 0x61, 0x6c, 0x61, + 0x72, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x61, 0x6c, 0x61, 0x72, 0x12, 0x46, 0x0a, 0x0a, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x12, + 0x31, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x03, 0x6d, + 0x61, 0x70, 0x12, 0x2e, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x75, 0x6e, 0x69, + 0x6f, 0x6e, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x51, 0x0a, 0x07, 0x42, + 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x76, 0x61, 0x72, 0x12, 0x34, 0x0a, 0x07, 0x62, 0x69, 0x6e, 0x64, + 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x07, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x22, 0x36, + 0x0a, 0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x29, 0x0a, 0x0d, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x42, 0xb3, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x0d, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, + 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, + 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0xa2, 0x02, 0x03, 0x46, 0x43, 0x58, 0xaa, 0x02, 0x0d, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x43, 0x6f, 0x72, 0x65, 0xca, 0x02, 0x0d, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0xe2, 0x02, 0x19, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1648,7 +1725,7 @@ func file_flyteidl_core_literals_proto_rawDescGZIP() []byte { return file_flyteidl_core_literals_proto_rawDescData } -var file_flyteidl_core_literals_proto_msgTypes = make([]protoimpl.MessageInfo, 23) +var file_flyteidl_core_literals_proto_msgTypes = make([]protoimpl.MessageInfo, 24) var file_flyteidl_core_literals_proto_goTypes = []interface{}{ (*Primitive)(nil), // 0: flyteidl.core.Primitive (*Void)(nil), // 1: flyteidl.core.Void @@ -1661,69 +1738,72 @@ var file_flyteidl_core_literals_proto_goTypes = []interface{}{ (*StructuredDataset)(nil), // 8: flyteidl.core.StructuredDataset (*Scalar)(nil), // 9: flyteidl.core.Scalar (*Literal)(nil), // 10: flyteidl.core.Literal - (*LiteralCollection)(nil), // 11: flyteidl.core.LiteralCollection - (*LiteralMap)(nil), // 12: flyteidl.core.LiteralMap - (*BindingDataCollection)(nil), // 13: flyteidl.core.BindingDataCollection - (*BindingDataMap)(nil), // 14: flyteidl.core.BindingDataMap - (*UnionInfo)(nil), // 15: flyteidl.core.UnionInfo - (*BindingData)(nil), // 16: flyteidl.core.BindingData - (*Binding)(nil), // 17: flyteidl.core.Binding - (*KeyValuePair)(nil), // 18: flyteidl.core.KeyValuePair - (*RetryStrategy)(nil), // 19: flyteidl.core.RetryStrategy - nil, // 20: flyteidl.core.Literal.MetadataEntry - nil, // 21: flyteidl.core.LiteralMap.LiteralsEntry - nil, // 22: flyteidl.core.BindingDataMap.BindingsEntry - (*timestamppb.Timestamp)(nil), // 23: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 24: google.protobuf.Duration - (*BlobType)(nil), // 25: flyteidl.core.BlobType - (*SchemaType)(nil), // 26: flyteidl.core.SchemaType - (*LiteralType)(nil), // 27: flyteidl.core.LiteralType - (*StructuredDatasetType)(nil), // 28: flyteidl.core.StructuredDatasetType - (*Error)(nil), // 29: flyteidl.core.Error - (*structpb.Struct)(nil), // 30: google.protobuf.Struct - (*OutputReference)(nil), // 31: flyteidl.core.OutputReference + (*LiteralOffloadedMetadata)(nil), // 11: flyteidl.core.LiteralOffloadedMetadata + (*LiteralCollection)(nil), // 12: flyteidl.core.LiteralCollection + (*LiteralMap)(nil), // 13: flyteidl.core.LiteralMap + (*BindingDataCollection)(nil), // 14: flyteidl.core.BindingDataCollection + (*BindingDataMap)(nil), // 15: flyteidl.core.BindingDataMap + (*UnionInfo)(nil), // 16: flyteidl.core.UnionInfo + (*BindingData)(nil), // 17: flyteidl.core.BindingData + (*Binding)(nil), // 18: flyteidl.core.Binding + (*KeyValuePair)(nil), // 19: flyteidl.core.KeyValuePair + (*RetryStrategy)(nil), // 20: flyteidl.core.RetryStrategy + nil, // 21: flyteidl.core.Literal.MetadataEntry + nil, // 22: flyteidl.core.LiteralMap.LiteralsEntry + nil, // 23: flyteidl.core.BindingDataMap.BindingsEntry + (*timestamppb.Timestamp)(nil), // 24: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 25: google.protobuf.Duration + (*BlobType)(nil), // 26: flyteidl.core.BlobType + (*SchemaType)(nil), // 27: flyteidl.core.SchemaType + (*LiteralType)(nil), // 28: flyteidl.core.LiteralType + (*StructuredDatasetType)(nil), // 29: flyteidl.core.StructuredDatasetType + (*Error)(nil), // 30: flyteidl.core.Error + (*structpb.Struct)(nil), // 31: google.protobuf.Struct + (*OutputReference)(nil), // 32: flyteidl.core.OutputReference } var file_flyteidl_core_literals_proto_depIdxs = []int32{ - 23, // 0: flyteidl.core.Primitive.datetime:type_name -> google.protobuf.Timestamp - 24, // 1: flyteidl.core.Primitive.duration:type_name -> google.protobuf.Duration + 24, // 0: flyteidl.core.Primitive.datetime:type_name -> google.protobuf.Timestamp + 25, // 1: flyteidl.core.Primitive.duration:type_name -> google.protobuf.Duration 3, // 2: flyteidl.core.Blob.metadata:type_name -> flyteidl.core.BlobMetadata - 25, // 3: flyteidl.core.BlobMetadata.type:type_name -> flyteidl.core.BlobType - 26, // 4: flyteidl.core.Schema.type:type_name -> flyteidl.core.SchemaType + 26, // 3: flyteidl.core.BlobMetadata.type:type_name -> flyteidl.core.BlobType + 27, // 4: flyteidl.core.Schema.type:type_name -> flyteidl.core.SchemaType 10, // 5: flyteidl.core.Union.value:type_name -> flyteidl.core.Literal - 27, // 6: flyteidl.core.Union.type:type_name -> flyteidl.core.LiteralType - 28, // 7: flyteidl.core.StructuredDatasetMetadata.structured_dataset_type:type_name -> flyteidl.core.StructuredDatasetType + 28, // 6: flyteidl.core.Union.type:type_name -> flyteidl.core.LiteralType + 29, // 7: flyteidl.core.StructuredDatasetMetadata.structured_dataset_type:type_name -> flyteidl.core.StructuredDatasetType 7, // 8: flyteidl.core.StructuredDataset.metadata:type_name -> flyteidl.core.StructuredDatasetMetadata 0, // 9: flyteidl.core.Scalar.primitive:type_name -> flyteidl.core.Primitive 2, // 10: flyteidl.core.Scalar.blob:type_name -> flyteidl.core.Blob 4, // 11: flyteidl.core.Scalar.binary:type_name -> flyteidl.core.Binary 5, // 12: flyteidl.core.Scalar.schema:type_name -> flyteidl.core.Schema 1, // 13: flyteidl.core.Scalar.none_type:type_name -> flyteidl.core.Void - 29, // 14: flyteidl.core.Scalar.error:type_name -> flyteidl.core.Error - 30, // 15: flyteidl.core.Scalar.generic:type_name -> google.protobuf.Struct + 30, // 14: flyteidl.core.Scalar.error:type_name -> flyteidl.core.Error + 31, // 15: flyteidl.core.Scalar.generic:type_name -> google.protobuf.Struct 8, // 16: flyteidl.core.Scalar.structured_dataset:type_name -> flyteidl.core.StructuredDataset 6, // 17: flyteidl.core.Scalar.union:type_name -> flyteidl.core.Union 9, // 18: flyteidl.core.Literal.scalar:type_name -> flyteidl.core.Scalar - 11, // 19: flyteidl.core.Literal.collection:type_name -> flyteidl.core.LiteralCollection - 12, // 20: flyteidl.core.Literal.map:type_name -> flyteidl.core.LiteralMap - 20, // 21: flyteidl.core.Literal.metadata:type_name -> flyteidl.core.Literal.MetadataEntry - 10, // 22: flyteidl.core.LiteralCollection.literals:type_name -> flyteidl.core.Literal - 21, // 23: flyteidl.core.LiteralMap.literals:type_name -> flyteidl.core.LiteralMap.LiteralsEntry - 16, // 24: flyteidl.core.BindingDataCollection.bindings:type_name -> flyteidl.core.BindingData - 22, // 25: flyteidl.core.BindingDataMap.bindings:type_name -> flyteidl.core.BindingDataMap.BindingsEntry - 27, // 26: flyteidl.core.UnionInfo.targetType:type_name -> flyteidl.core.LiteralType - 9, // 27: flyteidl.core.BindingData.scalar:type_name -> flyteidl.core.Scalar - 13, // 28: flyteidl.core.BindingData.collection:type_name -> flyteidl.core.BindingDataCollection - 31, // 29: flyteidl.core.BindingData.promise:type_name -> flyteidl.core.OutputReference - 14, // 30: flyteidl.core.BindingData.map:type_name -> flyteidl.core.BindingDataMap - 15, // 31: flyteidl.core.BindingData.union:type_name -> flyteidl.core.UnionInfo - 16, // 32: flyteidl.core.Binding.binding:type_name -> flyteidl.core.BindingData - 10, // 33: flyteidl.core.LiteralMap.LiteralsEntry.value:type_name -> flyteidl.core.Literal - 16, // 34: flyteidl.core.BindingDataMap.BindingsEntry.value:type_name -> flyteidl.core.BindingData - 35, // [35:35] is the sub-list for method output_type - 35, // [35:35] is the sub-list for method input_type - 35, // [35:35] is the sub-list for extension type_name - 35, // [35:35] is the sub-list for extension extendee - 0, // [0:35] is the sub-list for field type_name + 12, // 19: flyteidl.core.Literal.collection:type_name -> flyteidl.core.LiteralCollection + 13, // 20: flyteidl.core.Literal.map:type_name -> flyteidl.core.LiteralMap + 11, // 21: flyteidl.core.Literal.offloaded_metadata:type_name -> flyteidl.core.LiteralOffloadedMetadata + 21, // 22: flyteidl.core.Literal.metadata:type_name -> flyteidl.core.Literal.MetadataEntry + 28, // 23: flyteidl.core.LiteralOffloadedMetadata.inferred_type:type_name -> flyteidl.core.LiteralType + 10, // 24: flyteidl.core.LiteralCollection.literals:type_name -> flyteidl.core.Literal + 22, // 25: flyteidl.core.LiteralMap.literals:type_name -> flyteidl.core.LiteralMap.LiteralsEntry + 17, // 26: flyteidl.core.BindingDataCollection.bindings:type_name -> flyteidl.core.BindingData + 23, // 27: flyteidl.core.BindingDataMap.bindings:type_name -> flyteidl.core.BindingDataMap.BindingsEntry + 28, // 28: flyteidl.core.UnionInfo.targetType:type_name -> flyteidl.core.LiteralType + 9, // 29: flyteidl.core.BindingData.scalar:type_name -> flyteidl.core.Scalar + 14, // 30: flyteidl.core.BindingData.collection:type_name -> flyteidl.core.BindingDataCollection + 32, // 31: flyteidl.core.BindingData.promise:type_name -> flyteidl.core.OutputReference + 15, // 32: flyteidl.core.BindingData.map:type_name -> flyteidl.core.BindingDataMap + 16, // 33: flyteidl.core.BindingData.union:type_name -> flyteidl.core.UnionInfo + 17, // 34: flyteidl.core.Binding.binding:type_name -> flyteidl.core.BindingData + 10, // 35: flyteidl.core.LiteralMap.LiteralsEntry.value:type_name -> flyteidl.core.Literal + 17, // 36: flyteidl.core.BindingDataMap.BindingsEntry.value:type_name -> flyteidl.core.BindingData + 37, // [37:37] is the sub-list for method output_type + 37, // [37:37] is the sub-list for method input_type + 37, // [37:37] is the sub-list for extension type_name + 37, // [37:37] is the sub-list for extension extendee + 0, // [0:37] is the sub-list for field type_name } func init() { file_flyteidl_core_literals_proto_init() } @@ -1866,7 +1946,7 @@ func file_flyteidl_core_literals_proto_init() { } } file_flyteidl_core_literals_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LiteralCollection); i { + switch v := v.(*LiteralOffloadedMetadata); i { case 0: return &v.state case 1: @@ -1878,7 +1958,7 @@ func file_flyteidl_core_literals_proto_init() { } } file_flyteidl_core_literals_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LiteralMap); i { + switch v := v.(*LiteralCollection); i { case 0: return &v.state case 1: @@ -1890,7 +1970,7 @@ func file_flyteidl_core_literals_proto_init() { } } file_flyteidl_core_literals_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BindingDataCollection); i { + switch v := v.(*LiteralMap); i { case 0: return &v.state case 1: @@ -1902,7 +1982,7 @@ func file_flyteidl_core_literals_proto_init() { } } file_flyteidl_core_literals_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BindingDataMap); i { + switch v := v.(*BindingDataCollection); i { case 0: return &v.state case 1: @@ -1914,7 +1994,7 @@ func file_flyteidl_core_literals_proto_init() { } } file_flyteidl_core_literals_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UnionInfo); i { + switch v := v.(*BindingDataMap); i { case 0: return &v.state case 1: @@ -1926,7 +2006,7 @@ func file_flyteidl_core_literals_proto_init() { } } file_flyteidl_core_literals_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BindingData); i { + switch v := v.(*UnionInfo); i { case 0: return &v.state case 1: @@ -1938,7 +2018,7 @@ func file_flyteidl_core_literals_proto_init() { } } file_flyteidl_core_literals_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Binding); i { + switch v := v.(*BindingData); i { case 0: return &v.state case 1: @@ -1950,7 +2030,7 @@ func file_flyteidl_core_literals_proto_init() { } } file_flyteidl_core_literals_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*KeyValuePair); i { + switch v := v.(*Binding); i { case 0: return &v.state case 1: @@ -1962,6 +2042,18 @@ func file_flyteidl_core_literals_proto_init() { } } file_flyteidl_core_literals_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValuePair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl_core_literals_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RetryStrategy); i { case 0: return &v.state @@ -1997,8 +2089,9 @@ func file_flyteidl_core_literals_proto_init() { (*Literal_Scalar)(nil), (*Literal_Collection)(nil), (*Literal_Map)(nil), + (*Literal_OffloadedMetadata)(nil), } - file_flyteidl_core_literals_proto_msgTypes[16].OneofWrappers = []interface{}{ + file_flyteidl_core_literals_proto_msgTypes[17].OneofWrappers = []interface{}{ (*BindingData_Scalar)(nil), (*BindingData_Collection)(nil), (*BindingData_Promise)(nil), @@ -2010,7 +2103,7 @@ func file_flyteidl_core_literals_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_flyteidl_core_literals_proto_rawDesc, NumEnums: 0, - NumMessages: 23, + NumMessages: 24, NumExtensions: 0, NumServices: 0, }, diff --git a/flyteidl/gen/pb-go/gateway/flyteidl/cacheservice/cacheservice.swagger.json b/flyteidl/gen/pb-go/gateway/flyteidl/cacheservice/cacheservice.swagger.json index c000257abc..c30c350754 100644 --- a/flyteidl/gen/pb-go/gateway/flyteidl/cacheservice/cacheservice.swagger.json +++ b/flyteidl/gen/pb-go/gateway/flyteidl/cacheservice/cacheservice.swagger.json @@ -217,6 +217,10 @@ "$ref": "#/definitions/coreLiteralMap", "description": "A map of strings to literals." }, + "offloaded_metadata": { + "$ref": "#/definitions/coreLiteralOffloadedMetadata", + "description": "Offloaded literal metadata\nWhen you deserialize the offloaded metadata, it would be of Literal and its type would be defined by LiteralType stored in offloaded_metadata." + }, "hash": { "type": "string", "title": "A hash representing this literal.\nThis is used for caching purposes. For more details refer to RFC 1893\n(https://github.com/flyteorg/flyte/blob/master/rfc/system/1893-caching-of-offloaded-objects.md)" @@ -227,15 +231,6 @@ "type": "string" }, "description": "Additional metadata for literals." - }, - "uri": { - "type": "string", - "description": "If this literal is offloaded, this field will contain metadata including the offload location." - }, - "size_bytes": { - "type": "string", - "format": "uint64", - "description": "Includes information about the size of the literal." } }, "description": "A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives." @@ -265,6 +260,25 @@ }, "description": "A map of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field." }, + "coreLiteralOffloadedMetadata": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "description": "The location of the offloaded core.Literal." + }, + "size_bytes": { + "type": "string", + "format": "uint64", + "description": "The size of the offloaded data." + }, + "inferred_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "The inferred literal type of the offloaded data." + } + }, + "description": "A message that contains the metadata of the offloaded data." + }, "coreLiteralType": { "type": "object", "properties": { diff --git a/flyteidl/gen/pb-go/gateway/flyteidl/datacatalog/datacatalog.swagger.json b/flyteidl/gen/pb-go/gateway/flyteidl/datacatalog/datacatalog.swagger.json index 3173437543..dbfcc5b85e 100644 --- a/flyteidl/gen/pb-go/gateway/flyteidl/datacatalog/datacatalog.swagger.json +++ b/flyteidl/gen/pb-go/gateway/flyteidl/datacatalog/datacatalog.swagger.json @@ -161,6 +161,10 @@ "$ref": "#/definitions/coreLiteralMap", "description": "A map of strings to literals." }, + "offloaded_metadata": { + "$ref": "#/definitions/coreLiteralOffloadedMetadata", + "description": "Offloaded literal metadata\nWhen you deserialize the offloaded metadata, it would be of Literal and its type would be defined by LiteralType stored in offloaded_metadata." + }, "hash": { "type": "string", "title": "A hash representing this literal.\nThis is used for caching purposes. For more details refer to RFC 1893\n(https://github.com/flyteorg/flyte/blob/master/rfc/system/1893-caching-of-offloaded-objects.md)" @@ -171,15 +175,6 @@ "type": "string" }, "description": "Additional metadata for literals." - }, - "uri": { - "type": "string", - "description": "If this literal is offloaded, this field will contain metadata including the offload location." - }, - "size_bytes": { - "type": "string", - "format": "uint64", - "description": "Includes information about the size of the literal." } }, "description": "A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives." @@ -209,6 +204,25 @@ }, "description": "A map of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field." }, + "coreLiteralOffloadedMetadata": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "description": "The location of the offloaded core.Literal." + }, + "size_bytes": { + "type": "string", + "format": "uint64", + "description": "The size of the offloaded data." + }, + "inferred_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "The inferred literal type of the offloaded data." + } + }, + "description": "A message that contains the metadata of the offloaded data." + }, "coreLiteralType": { "type": "object", "properties": { diff --git a/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json b/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json index 6ebfd70f8d..ef81380d1e 100644 --- a/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json +++ b/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json @@ -7418,6 +7418,10 @@ "$ref": "#/definitions/coreLiteralMap", "description": "A map of strings to literals." }, + "offloaded_metadata": { + "$ref": "#/definitions/coreLiteralOffloadedMetadata", + "description": "Offloaded literal metadata\nWhen you deserialize the offloaded metadata, it would be of Literal and its type would be defined by LiteralType stored in offloaded_metadata." + }, "hash": { "type": "string", "title": "A hash representing this literal.\nThis is used for caching purposes. For more details refer to RFC 1893\n(https://github.com/flyteorg/flyte/blob/master/rfc/system/1893-caching-of-offloaded-objects.md)" @@ -7428,15 +7432,6 @@ "type": "string" }, "description": "Additional metadata for literals." - }, - "uri": { - "type": "string", - "description": "If this literal is offloaded, this field will contain metadata including the offload location." - }, - "size_bytes": { - "type": "string", - "format": "uint64", - "description": "Includes information about the size of the literal." } }, "description": "A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives." @@ -7466,6 +7461,25 @@ }, "description": "A map of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field." }, + "coreLiteralOffloadedMetadata": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "description": "The location of the offloaded core.Literal." + }, + "size_bytes": { + "type": "string", + "format": "uint64", + "description": "The size of the offloaded data." + }, + "inferred_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "The inferred literal type of the offloaded data." + } + }, + "description": "A message that contains the metadata of the offloaded data." + }, "coreLiteralType": { "type": "object", "properties": { diff --git a/flyteidl/gen/pb-go/gateway/flyteidl/service/agent.swagger.json b/flyteidl/gen/pb-go/gateway/flyteidl/service/agent.swagger.json index ef57647773..373b9c4c3d 100644 --- a/flyteidl/gen/pb-go/gateway/flyteidl/service/agent.swagger.json +++ b/flyteidl/gen/pb-go/gateway/flyteidl/service/agent.swagger.json @@ -1255,6 +1255,10 @@ "$ref": "#/definitions/coreLiteralMap", "description": "A map of strings to literals." }, + "offloaded_metadata": { + "$ref": "#/definitions/coreLiteralOffloadedMetadata", + "description": "Offloaded literal metadata\nWhen you deserialize the offloaded metadata, it would be of Literal and its type would be defined by LiteralType stored in offloaded_metadata." + }, "hash": { "type": "string", "title": "A hash representing this literal.\nThis is used for caching purposes. For more details refer to RFC 1893\n(https://github.com/flyteorg/flyte/blob/master/rfc/system/1893-caching-of-offloaded-objects.md)" @@ -1265,15 +1269,6 @@ "type": "string" }, "description": "Additional metadata for literals." - }, - "uri": { - "type": "string", - "description": "If this literal is offloaded, this field will contain metadata including the offload location." - }, - "size_bytes": { - "type": "string", - "format": "uint64", - "description": "Includes information about the size of the literal." } }, "description": "A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives." @@ -1303,6 +1298,25 @@ }, "description": "A map of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field." }, + "coreLiteralOffloadedMetadata": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "description": "The location of the offloaded core.Literal." + }, + "size_bytes": { + "type": "string", + "format": "uint64", + "description": "The size of the offloaded data." + }, + "inferred_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "The inferred literal type of the offloaded data." + } + }, + "description": "A message that contains the metadata of the offloaded data." + }, "coreLiteralType": { "type": "object", "properties": { diff --git a/flyteidl/gen/pb-go/gateway/flyteidl/service/dataproxy.swagger.json b/flyteidl/gen/pb-go/gateway/flyteidl/service/dataproxy.swagger.json index ea24c34e3e..bff6ca737a 100644 --- a/flyteidl/gen/pb-go/gateway/flyteidl/service/dataproxy.swagger.json +++ b/flyteidl/gen/pb-go/gateway/flyteidl/service/dataproxy.swagger.json @@ -277,6 +277,10 @@ "$ref": "#/definitions/coreLiteralMap", "description": "A map of strings to literals." }, + "offloaded_metadata": { + "$ref": "#/definitions/coreLiteralOffloadedMetadata", + "description": "Offloaded literal metadata\nWhen you deserialize the offloaded metadata, it would be of Literal and its type would be defined by LiteralType stored in offloaded_metadata." + }, "hash": { "type": "string", "title": "A hash representing this literal.\nThis is used for caching purposes. For more details refer to RFC 1893\n(https://github.com/flyteorg/flyte/blob/master/rfc/system/1893-caching-of-offloaded-objects.md)" @@ -287,15 +291,6 @@ "type": "string" }, "description": "Additional metadata for literals." - }, - "uri": { - "type": "string", - "description": "If this literal is offloaded, this field will contain metadata including the offload location." - }, - "size_bytes": { - "type": "string", - "format": "uint64", - "description": "Includes information about the size of the literal." } }, "description": "A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives." @@ -325,6 +320,25 @@ }, "description": "A map of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field." }, + "coreLiteralOffloadedMetadata": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "description": "The location of the offloaded core.Literal." + }, + "size_bytes": { + "type": "string", + "format": "uint64", + "description": "The size of the offloaded data." + }, + "inferred_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "The inferred literal type of the offloaded data." + } + }, + "description": "A message that contains the metadata of the offloaded data." + }, "coreLiteralType": { "type": "object", "properties": { diff --git a/flyteidl/gen/pb-go/gateway/flyteidl/service/external_plugin_service.swagger.json b/flyteidl/gen/pb-go/gateway/flyteidl/service/external_plugin_service.swagger.json index 097504b251..029c42ffd3 100644 --- a/flyteidl/gen/pb-go/gateway/flyteidl/service/external_plugin_service.swagger.json +++ b/flyteidl/gen/pb-go/gateway/flyteidl/service/external_plugin_service.swagger.json @@ -571,6 +571,10 @@ "$ref": "#/definitions/coreLiteralMap", "description": "A map of strings to literals." }, + "offloaded_metadata": { + "$ref": "#/definitions/coreLiteralOffloadedMetadata", + "description": "Offloaded literal metadata\nWhen you deserialize the offloaded metadata, it would be of Literal and its type would be defined by LiteralType stored in offloaded_metadata." + }, "hash": { "type": "string", "title": "A hash representing this literal.\nThis is used for caching purposes. For more details refer to RFC 1893\n(https://github.com/flyteorg/flyte/blob/master/rfc/system/1893-caching-of-offloaded-objects.md)" @@ -581,15 +585,6 @@ "type": "string" }, "description": "Additional metadata for literals." - }, - "uri": { - "type": "string", - "description": "If this literal is offloaded, this field will contain metadata including the offload location." - }, - "size_bytes": { - "type": "string", - "format": "uint64", - "description": "Includes information about the size of the literal." } }, "description": "A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives." @@ -619,6 +614,25 @@ }, "description": "A map of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field." }, + "coreLiteralOffloadedMetadata": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "description": "The location of the offloaded core.Literal." + }, + "size_bytes": { + "type": "string", + "format": "uint64", + "description": "The size of the offloaded data." + }, + "inferred_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "The inferred literal type of the offloaded data." + } + }, + "description": "A message that contains the metadata of the offloaded data." + }, "coreLiteralType": { "type": "object", "properties": { diff --git a/flyteidl/gen/pb-go/gateway/flyteidl/service/signal.swagger.json b/flyteidl/gen/pb-go/gateway/flyteidl/service/signal.swagger.json index 6d0bd1f15a..d325ed4764 100644 --- a/flyteidl/gen/pb-go/gateway/flyteidl/service/signal.swagger.json +++ b/flyteidl/gen/pb-go/gateway/flyteidl/service/signal.swagger.json @@ -353,6 +353,10 @@ "$ref": "#/definitions/coreLiteralMap", "description": "A map of strings to literals." }, + "offloaded_metadata": { + "$ref": "#/definitions/coreLiteralOffloadedMetadata", + "description": "Offloaded literal metadata\nWhen you deserialize the offloaded metadata, it would be of Literal and its type would be defined by LiteralType stored in offloaded_metadata." + }, "hash": { "type": "string", "title": "A hash representing this literal.\nThis is used for caching purposes. For more details refer to RFC 1893\n(https://github.com/flyteorg/flyte/blob/master/rfc/system/1893-caching-of-offloaded-objects.md)" @@ -363,15 +367,6 @@ "type": "string" }, "description": "Additional metadata for literals." - }, - "uri": { - "type": "string", - "description": "If this literal is offloaded, this field will contain metadata including the offload location." - }, - "size_bytes": { - "type": "string", - "format": "uint64", - "description": "Includes information about the size of the literal." } }, "description": "A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives." @@ -401,6 +396,25 @@ }, "description": "A map of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field." }, + "coreLiteralOffloadedMetadata": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "description": "The location of the offloaded core.Literal." + }, + "size_bytes": { + "type": "string", + "format": "uint64", + "description": "The size of the offloaded data." + }, + "inferred_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "The inferred literal type of the offloaded data." + } + }, + "description": "A message that contains the metadata of the offloaded data." + }, "coreLiteralType": { "type": "object", "properties": { diff --git a/flyteidl/gen/pb-js/flyteidl.d.ts b/flyteidl/gen/pb-js/flyteidl.d.ts index b0f382ee88..0ff2422577 100644 --- a/flyteidl/gen/pb-js/flyteidl.d.ts +++ b/flyteidl/gen/pb-js/flyteidl.d.ts @@ -3385,17 +3385,14 @@ export namespace flyteidl { /** Literal map */ map?: (flyteidl.core.ILiteralMap|null); + /** Literal offloadedMetadata */ + offloadedMetadata?: (flyteidl.core.ILiteralOffloadedMetadata|null); + /** Literal hash */ hash?: (string|null); /** Literal metadata */ metadata?: ({ [k: string]: string }|null); - - /** Literal uri */ - uri?: (string|null); - - /** Literal sizeBytes */ - sizeBytes?: (Long|null); } /** Represents a Literal. */ @@ -3416,20 +3413,17 @@ export namespace flyteidl { /** Literal map. */ public map?: (flyteidl.core.ILiteralMap|null); + /** Literal offloadedMetadata. */ + public offloadedMetadata?: (flyteidl.core.ILiteralOffloadedMetadata|null); + /** Literal hash. */ public hash: string; /** Literal metadata. */ public metadata: { [k: string]: string }; - /** Literal uri. */ - public uri: string; - - /** Literal sizeBytes. */ - public sizeBytes: Long; - /** Literal value. */ - public value?: ("scalar"|"collection"|"map"); + public value?: ("scalar"|"collection"|"map"|"offloadedMetadata"); /** * Creates a new Literal instance using the specified properties. @@ -3464,6 +3458,70 @@ export namespace flyteidl { public static verify(message: { [k: string]: any }): (string|null); } + /** Properties of a LiteralOffloadedMetadata. */ + interface ILiteralOffloadedMetadata { + + /** LiteralOffloadedMetadata uri */ + uri?: (string|null); + + /** LiteralOffloadedMetadata sizeBytes */ + sizeBytes?: (Long|null); + + /** LiteralOffloadedMetadata inferredType */ + inferredType?: (flyteidl.core.ILiteralType|null); + } + + /** Represents a LiteralOffloadedMetadata. */ + class LiteralOffloadedMetadata implements ILiteralOffloadedMetadata { + + /** + * Constructs a new LiteralOffloadedMetadata. + * @param [properties] Properties to set + */ + constructor(properties?: flyteidl.core.ILiteralOffloadedMetadata); + + /** LiteralOffloadedMetadata uri. */ + public uri: string; + + /** LiteralOffloadedMetadata sizeBytes. */ + public sizeBytes: Long; + + /** LiteralOffloadedMetadata inferredType. */ + public inferredType?: (flyteidl.core.ILiteralType|null); + + /** + * Creates a new LiteralOffloadedMetadata instance using the specified properties. + * @param [properties] Properties to set + * @returns LiteralOffloadedMetadata instance + */ + public static create(properties?: flyteidl.core.ILiteralOffloadedMetadata): flyteidl.core.LiteralOffloadedMetadata; + + /** + * Encodes the specified LiteralOffloadedMetadata message. Does not implicitly {@link flyteidl.core.LiteralOffloadedMetadata.verify|verify} messages. + * @param message LiteralOffloadedMetadata message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: flyteidl.core.ILiteralOffloadedMetadata, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a LiteralOffloadedMetadata message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns LiteralOffloadedMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): flyteidl.core.LiteralOffloadedMetadata; + + /** + * Verifies a LiteralOffloadedMetadata message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + } + /** Properties of a LiteralCollection. */ interface ILiteralCollection { diff --git a/flyteidl/gen/pb-js/flyteidl.js b/flyteidl/gen/pb-js/flyteidl.js index 6b78cbc030..042343eecf 100644 --- a/flyteidl/gen/pb-js/flyteidl.js +++ b/flyteidl/gen/pb-js/flyteidl.js @@ -8103,10 +8103,9 @@ * @property {flyteidl.core.IScalar|null} [scalar] Literal scalar * @property {flyteidl.core.ILiteralCollection|null} [collection] Literal collection * @property {flyteidl.core.ILiteralMap|null} [map] Literal map + * @property {flyteidl.core.ILiteralOffloadedMetadata|null} [offloadedMetadata] Literal offloadedMetadata * @property {string|null} [hash] Literal hash * @property {Object.|null} [metadata] Literal metadata - * @property {string|null} [uri] Literal uri - * @property {Long|null} [sizeBytes] Literal sizeBytes */ /** @@ -8149,6 +8148,14 @@ */ Literal.prototype.map = null; + /** + * Literal offloadedMetadata. + * @member {flyteidl.core.ILiteralOffloadedMetadata|null|undefined} offloadedMetadata + * @memberof flyteidl.core.Literal + * @instance + */ + Literal.prototype.offloadedMetadata = null; + /** * Literal hash. * @member {string} hash @@ -8165,33 +8172,17 @@ */ Literal.prototype.metadata = $util.emptyObject; - /** - * Literal uri. - * @member {string} uri - * @memberof flyteidl.core.Literal - * @instance - */ - Literal.prototype.uri = ""; - - /** - * Literal sizeBytes. - * @member {Long} sizeBytes - * @memberof flyteidl.core.Literal - * @instance - */ - Literal.prototype.sizeBytes = $util.Long ? $util.Long.fromBits(0,0,true) : 0; - // OneOf field names bound to virtual getters and setters var $oneOfFields; /** * Literal value. - * @member {"scalar"|"collection"|"map"|undefined} value + * @member {"scalar"|"collection"|"map"|"offloadedMetadata"|undefined} value * @memberof flyteidl.core.Literal * @instance */ Object.defineProperty(Literal.prototype, "value", { - get: $util.oneOfGetter($oneOfFields = ["scalar", "collection", "map"]), + get: $util.oneOfGetter($oneOfFields = ["scalar", "collection", "map", "offloadedMetadata"]), set: $util.oneOfSetter($oneOfFields) }); @@ -8230,10 +8221,8 @@ if (message.metadata != null && message.hasOwnProperty("metadata")) for (var keys = Object.keys(message.metadata), i = 0; i < keys.length; ++i) writer.uint32(/* id 5, wireType 2 =*/42).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.metadata[keys[i]]).ldelim(); - if (message.uri != null && message.hasOwnProperty("uri")) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.uri); - if (message.sizeBytes != null && message.hasOwnProperty("sizeBytes")) - writer.uint32(/* id 7, wireType 0 =*/56).uint64(message.sizeBytes); + if (message.offloadedMetadata != null && message.hasOwnProperty("offloadedMetadata")) + $root.flyteidl.core.LiteralOffloadedMetadata.encode(message.offloadedMetadata, writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim(); return writer; }; @@ -8264,6 +8253,9 @@ case 3: message.map = $root.flyteidl.core.LiteralMap.decode(reader, reader.uint32()); break; + case 8: + message.offloadedMetadata = $root.flyteidl.core.LiteralOffloadedMetadata.decode(reader, reader.uint32()); + break; case 4: message.hash = reader.string(); break; @@ -8275,12 +8267,6 @@ reader.pos++; message.metadata[key] = reader.string(); break; - case 6: - message.uri = reader.string(); - break; - case 7: - message.sizeBytes = reader.uint64(); - break; default: reader.skipType(tag & 7); break; @@ -8329,6 +8315,16 @@ return "map." + error; } } + if (message.offloadedMetadata != null && message.hasOwnProperty("offloadedMetadata")) { + if (properties.value === 1) + return "value: multiple values"; + properties.value = 1; + { + var error = $root.flyteidl.core.LiteralOffloadedMetadata.verify(message.offloadedMetadata); + if (error) + return "offloadedMetadata." + error; + } + } if (message.hash != null && message.hasOwnProperty("hash")) if (!$util.isString(message.hash)) return "hash: string expected"; @@ -8340,16 +8336,156 @@ if (!$util.isString(message.metadata[key[i]])) return "metadata: string{k:string} expected"; } + return null; + }; + + return Literal; + })(); + + core.LiteralOffloadedMetadata = (function() { + + /** + * Properties of a LiteralOffloadedMetadata. + * @memberof flyteidl.core + * @interface ILiteralOffloadedMetadata + * @property {string|null} [uri] LiteralOffloadedMetadata uri + * @property {Long|null} [sizeBytes] LiteralOffloadedMetadata sizeBytes + * @property {flyteidl.core.ILiteralType|null} [inferredType] LiteralOffloadedMetadata inferredType + */ + + /** + * Constructs a new LiteralOffloadedMetadata. + * @memberof flyteidl.core + * @classdesc Represents a LiteralOffloadedMetadata. + * @implements ILiteralOffloadedMetadata + * @constructor + * @param {flyteidl.core.ILiteralOffloadedMetadata=} [properties] Properties to set + */ + function LiteralOffloadedMetadata(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * LiteralOffloadedMetadata uri. + * @member {string} uri + * @memberof flyteidl.core.LiteralOffloadedMetadata + * @instance + */ + LiteralOffloadedMetadata.prototype.uri = ""; + + /** + * LiteralOffloadedMetadata sizeBytes. + * @member {Long} sizeBytes + * @memberof flyteidl.core.LiteralOffloadedMetadata + * @instance + */ + LiteralOffloadedMetadata.prototype.sizeBytes = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + /** + * LiteralOffloadedMetadata inferredType. + * @member {flyteidl.core.ILiteralType|null|undefined} inferredType + * @memberof flyteidl.core.LiteralOffloadedMetadata + * @instance + */ + LiteralOffloadedMetadata.prototype.inferredType = null; + + /** + * Creates a new LiteralOffloadedMetadata instance using the specified properties. + * @function create + * @memberof flyteidl.core.LiteralOffloadedMetadata + * @static + * @param {flyteidl.core.ILiteralOffloadedMetadata=} [properties] Properties to set + * @returns {flyteidl.core.LiteralOffloadedMetadata} LiteralOffloadedMetadata instance + */ + LiteralOffloadedMetadata.create = function create(properties) { + return new LiteralOffloadedMetadata(properties); + }; + + /** + * Encodes the specified LiteralOffloadedMetadata message. Does not implicitly {@link flyteidl.core.LiteralOffloadedMetadata.verify|verify} messages. + * @function encode + * @memberof flyteidl.core.LiteralOffloadedMetadata + * @static + * @param {flyteidl.core.ILiteralOffloadedMetadata} message LiteralOffloadedMetadata message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + LiteralOffloadedMetadata.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.uri != null && message.hasOwnProperty("uri")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.uri); + if (message.sizeBytes != null && message.hasOwnProperty("sizeBytes")) + writer.uint32(/* id 2, wireType 0 =*/16).uint64(message.sizeBytes); + if (message.inferredType != null && message.hasOwnProperty("inferredType")) + $root.flyteidl.core.LiteralType.encode(message.inferredType, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + return writer; + }; + + /** + * Decodes a LiteralOffloadedMetadata message from the specified reader or buffer. + * @function decode + * @memberof flyteidl.core.LiteralOffloadedMetadata + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {flyteidl.core.LiteralOffloadedMetadata} LiteralOffloadedMetadata + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + LiteralOffloadedMetadata.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.flyteidl.core.LiteralOffloadedMetadata(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uri = reader.string(); + break; + case 2: + message.sizeBytes = reader.uint64(); + break; + case 3: + message.inferredType = $root.flyteidl.core.LiteralType.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Verifies a LiteralOffloadedMetadata message. + * @function verify + * @memberof flyteidl.core.LiteralOffloadedMetadata + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + LiteralOffloadedMetadata.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; if (message.uri != null && message.hasOwnProperty("uri")) if (!$util.isString(message.uri)) return "uri: string expected"; if (message.sizeBytes != null && message.hasOwnProperty("sizeBytes")) if (!$util.isInteger(message.sizeBytes) && !(message.sizeBytes && $util.isInteger(message.sizeBytes.low) && $util.isInteger(message.sizeBytes.high))) return "sizeBytes: integer|Long expected"; + if (message.inferredType != null && message.hasOwnProperty("inferredType")) { + var error = $root.flyteidl.core.LiteralType.verify(message.inferredType); + if (error) + return "inferredType." + error; + } return null; }; - return Literal; + return LiteralOffloadedMetadata; })(); core.LiteralCollection = (function() { diff --git a/flyteidl/gen/pb_python/flyteidl/core/literals_pb2.py b/flyteidl/gen/pb_python/flyteidl/core/literals_pb2.py index dc2f436e8e..9b0a9f9ed8 100644 --- a/flyteidl/gen/pb_python/flyteidl/core/literals_pb2.py +++ b/flyteidl/gen/pb_python/flyteidl/core/literals_pb2.py @@ -17,7 +17,7 @@ from flyteidl.core import types_pb2 as flyteidl_dot_core_dot_types__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66lyteidl/core/literals.proto\x12\rflyteidl.core\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x19\x66lyteidl/core/types.proto\"\x87\x02\n\tPrimitive\x12\x1a\n\x07integer\x18\x01 \x01(\x03H\x00R\x07integer\x12!\n\x0b\x66loat_value\x18\x02 \x01(\x01H\x00R\nfloatValue\x12#\n\x0cstring_value\x18\x03 \x01(\tH\x00R\x0bstringValue\x12\x1a\n\x07\x62oolean\x18\x04 \x01(\x08H\x00R\x07\x62oolean\x12\x38\n\x08\x64\x61tetime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00R\x08\x64\x61tetime\x12\x37\n\x08\x64uration\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00R\x08\x64urationB\x07\n\x05value\"\x06\n\x04Void\"Q\n\x04\x42lob\x12\x37\n\x08metadata\x18\x01 \x01(\x0b\x32\x1b.flyteidl.core.BlobMetadataR\x08metadata\x12\x10\n\x03uri\x18\x03 \x01(\tR\x03uri\";\n\x0c\x42lobMetadata\x12+\n\x04type\x18\x01 \x01(\x0b\x32\x17.flyteidl.core.BlobTypeR\x04type\"0\n\x06\x42inary\x12\x14\n\x05value\x18\x01 \x01(\x0cR\x05value\x12\x10\n\x03tag\x18\x02 \x01(\tR\x03tag\"I\n\x06Schema\x12\x10\n\x03uri\x18\x01 \x01(\tR\x03uri\x12-\n\x04type\x18\x03 \x01(\x0b\x32\x19.flyteidl.core.SchemaTypeR\x04type\"e\n\x05Union\x12,\n\x05value\x18\x01 \x01(\x0b\x32\x16.flyteidl.core.LiteralR\x05value\x12.\n\x04type\x18\x02 \x01(\x0b\x32\x1a.flyteidl.core.LiteralTypeR\x04type\"y\n\x19StructuredDatasetMetadata\x12\\\n\x17structured_dataset_type\x18\x01 \x01(\x0b\x32$.flyteidl.core.StructuredDatasetTypeR\x15structuredDatasetType\"k\n\x11StructuredDataset\x12\x10\n\x03uri\x18\x01 \x01(\tR\x03uri\x12\x44\n\x08metadata\x18\x02 \x01(\x0b\x32(.flyteidl.core.StructuredDatasetMetadataR\x08metadata\"\xf0\x03\n\x06Scalar\x12\x38\n\tprimitive\x18\x01 \x01(\x0b\x32\x18.flyteidl.core.PrimitiveH\x00R\tprimitive\x12)\n\x04\x62lob\x18\x02 \x01(\x0b\x32\x13.flyteidl.core.BlobH\x00R\x04\x62lob\x12/\n\x06\x62inary\x18\x03 \x01(\x0b\x32\x15.flyteidl.core.BinaryH\x00R\x06\x62inary\x12/\n\x06schema\x18\x04 \x01(\x0b\x32\x15.flyteidl.core.SchemaH\x00R\x06schema\x12\x32\n\tnone_type\x18\x05 \x01(\x0b\x32\x13.flyteidl.core.VoidH\x00R\x08noneType\x12,\n\x05\x65rror\x18\x06 \x01(\x0b\x32\x14.flyteidl.core.ErrorH\x00R\x05\x65rror\x12\x33\n\x07generic\x18\x07 \x01(\x0b\x32\x17.google.protobuf.StructH\x00R\x07generic\x12Q\n\x12structured_dataset\x18\x08 \x01(\x0b\x32 .flyteidl.core.StructuredDatasetH\x00R\x11structuredDataset\x12,\n\x05union\x18\t \x01(\x0b\x32\x14.flyteidl.core.UnionH\x00R\x05unionB\x07\n\x05value\"\xfa\x02\n\x07Literal\x12/\n\x06scalar\x18\x01 \x01(\x0b\x32\x15.flyteidl.core.ScalarH\x00R\x06scalar\x12\x42\n\ncollection\x18\x02 \x01(\x0b\x32 .flyteidl.core.LiteralCollectionH\x00R\ncollection\x12-\n\x03map\x18\x03 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x00R\x03map\x12\x12\n\x04hash\x18\x04 \x01(\tR\x04hash\x12@\n\x08metadata\x18\x05 \x03(\x0b\x32$.flyteidl.core.Literal.MetadataEntryR\x08metadata\x12\x10\n\x03uri\x18\x06 \x01(\tR\x03uri\x12\x1d\n\nsize_bytes\x18\x07 \x01(\x04R\tsizeBytes\x1a;\n\rMetadataEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\x07\n\x05value\"G\n\x11LiteralCollection\x12\x32\n\x08literals\x18\x01 \x03(\x0b\x32\x16.flyteidl.core.LiteralR\x08literals\"\xa6\x01\n\nLiteralMap\x12\x43\n\x08literals\x18\x01 \x03(\x0b\x32\'.flyteidl.core.LiteralMap.LiteralsEntryR\x08literals\x1aS\n\rLiteralsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12,\n\x05value\x18\x02 \x01(\x0b\x32\x16.flyteidl.core.LiteralR\x05value:\x02\x38\x01\"O\n\x15\x42indingDataCollection\x12\x36\n\x08\x62indings\x18\x01 \x03(\x0b\x32\x1a.flyteidl.core.BindingDataR\x08\x62indings\"\xb2\x01\n\x0e\x42indingDataMap\x12G\n\x08\x62indings\x18\x01 \x03(\x0b\x32+.flyteidl.core.BindingDataMap.BindingsEntryR\x08\x62indings\x1aW\n\rBindingsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32\x1a.flyteidl.core.BindingDataR\x05value:\x02\x38\x01\"G\n\tUnionInfo\x12:\n\ntargetType\x18\x01 \x01(\x0b\x32\x1a.flyteidl.core.LiteralTypeR\ntargetType\"\xae\x02\n\x0b\x42indingData\x12/\n\x06scalar\x18\x01 \x01(\x0b\x32\x15.flyteidl.core.ScalarH\x00R\x06scalar\x12\x46\n\ncollection\x18\x02 \x01(\x0b\x32$.flyteidl.core.BindingDataCollectionH\x00R\ncollection\x12:\n\x07promise\x18\x03 \x01(\x0b\x32\x1e.flyteidl.core.OutputReferenceH\x00R\x07promise\x12\x31\n\x03map\x18\x04 \x01(\x0b\x32\x1d.flyteidl.core.BindingDataMapH\x00R\x03map\x12.\n\x05union\x18\x05 \x01(\x0b\x32\x18.flyteidl.core.UnionInfoR\x05unionB\x07\n\x05value\"Q\n\x07\x42inding\x12\x10\n\x03var\x18\x01 \x01(\tR\x03var\x12\x34\n\x07\x62inding\x18\x02 \x01(\x0b\x32\x1a.flyteidl.core.BindingDataR\x07\x62inding\"6\n\x0cKeyValuePair\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value\")\n\rRetryStrategy\x12\x18\n\x07retries\x18\x05 \x01(\rR\x07retriesB\xb3\x01\n\x11\x63om.flyteidl.coreB\rLiteralsProtoP\x01Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\xa2\x02\x03\x46\x43X\xaa\x02\rFlyteidl.Core\xca\x02\rFlyteidl\\Core\xe2\x02\x19\x46lyteidl\\Core\\GPBMetadata\xea\x02\x0e\x46lyteidl::Coreb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66lyteidl/core/literals.proto\x12\rflyteidl.core\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x19\x66lyteidl/core/types.proto\"\x87\x02\n\tPrimitive\x12\x1a\n\x07integer\x18\x01 \x01(\x03H\x00R\x07integer\x12!\n\x0b\x66loat_value\x18\x02 \x01(\x01H\x00R\nfloatValue\x12#\n\x0cstring_value\x18\x03 \x01(\tH\x00R\x0bstringValue\x12\x1a\n\x07\x62oolean\x18\x04 \x01(\x08H\x00R\x07\x62oolean\x12\x38\n\x08\x64\x61tetime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00R\x08\x64\x61tetime\x12\x37\n\x08\x64uration\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00R\x08\x64urationB\x07\n\x05value\"\x06\n\x04Void\"Q\n\x04\x42lob\x12\x37\n\x08metadata\x18\x01 \x01(\x0b\x32\x1b.flyteidl.core.BlobMetadataR\x08metadata\x12\x10\n\x03uri\x18\x03 \x01(\tR\x03uri\";\n\x0c\x42lobMetadata\x12+\n\x04type\x18\x01 \x01(\x0b\x32\x17.flyteidl.core.BlobTypeR\x04type\"0\n\x06\x42inary\x12\x14\n\x05value\x18\x01 \x01(\x0cR\x05value\x12\x10\n\x03tag\x18\x02 \x01(\tR\x03tag\"I\n\x06Schema\x12\x10\n\x03uri\x18\x01 \x01(\tR\x03uri\x12-\n\x04type\x18\x03 \x01(\x0b\x32\x19.flyteidl.core.SchemaTypeR\x04type\"e\n\x05Union\x12,\n\x05value\x18\x01 \x01(\x0b\x32\x16.flyteidl.core.LiteralR\x05value\x12.\n\x04type\x18\x02 \x01(\x0b\x32\x1a.flyteidl.core.LiteralTypeR\x04type\"y\n\x19StructuredDatasetMetadata\x12\\\n\x17structured_dataset_type\x18\x01 \x01(\x0b\x32$.flyteidl.core.StructuredDatasetTypeR\x15structuredDatasetType\"k\n\x11StructuredDataset\x12\x10\n\x03uri\x18\x01 \x01(\tR\x03uri\x12\x44\n\x08metadata\x18\x02 \x01(\x0b\x32(.flyteidl.core.StructuredDatasetMetadataR\x08metadata\"\xf0\x03\n\x06Scalar\x12\x38\n\tprimitive\x18\x01 \x01(\x0b\x32\x18.flyteidl.core.PrimitiveH\x00R\tprimitive\x12)\n\x04\x62lob\x18\x02 \x01(\x0b\x32\x13.flyteidl.core.BlobH\x00R\x04\x62lob\x12/\n\x06\x62inary\x18\x03 \x01(\x0b\x32\x15.flyteidl.core.BinaryH\x00R\x06\x62inary\x12/\n\x06schema\x18\x04 \x01(\x0b\x32\x15.flyteidl.core.SchemaH\x00R\x06schema\x12\x32\n\tnone_type\x18\x05 \x01(\x0b\x32\x13.flyteidl.core.VoidH\x00R\x08noneType\x12,\n\x05\x65rror\x18\x06 \x01(\x0b\x32\x14.flyteidl.core.ErrorH\x00R\x05\x65rror\x12\x33\n\x07generic\x18\x07 \x01(\x0b\x32\x17.google.protobuf.StructH\x00R\x07generic\x12Q\n\x12structured_dataset\x18\x08 \x01(\x0b\x32 .flyteidl.core.StructuredDatasetH\x00R\x11structuredDataset\x12,\n\x05union\x18\t \x01(\x0b\x32\x14.flyteidl.core.UnionH\x00R\x05unionB\x07\n\x05value\"\xaf\x03\n\x07Literal\x12/\n\x06scalar\x18\x01 \x01(\x0b\x32\x15.flyteidl.core.ScalarH\x00R\x06scalar\x12\x42\n\ncollection\x18\x02 \x01(\x0b\x32 .flyteidl.core.LiteralCollectionH\x00R\ncollection\x12-\n\x03map\x18\x03 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapH\x00R\x03map\x12X\n\x12offloaded_metadata\x18\x08 \x01(\x0b\x32\'.flyteidl.core.LiteralOffloadedMetadataH\x00R\x11offloadedMetadata\x12\x12\n\x04hash\x18\x04 \x01(\tR\x04hash\x12@\n\x08metadata\x18\x05 \x03(\x0b\x32$.flyteidl.core.Literal.MetadataEntryR\x08metadata\x1a;\n\rMetadataEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\x07\n\x05valueJ\x04\x08\x06\x10\x07J\x04\x08\x07\x10\x08\"\x8c\x01\n\x18LiteralOffloadedMetadata\x12\x10\n\x03uri\x18\x01 \x01(\tR\x03uri\x12\x1d\n\nsize_bytes\x18\x02 \x01(\x04R\tsizeBytes\x12?\n\rinferred_type\x18\x03 \x01(\x0b\x32\x1a.flyteidl.core.LiteralTypeR\x0cinferredType\"G\n\x11LiteralCollection\x12\x32\n\x08literals\x18\x01 \x03(\x0b\x32\x16.flyteidl.core.LiteralR\x08literals\"\xa6\x01\n\nLiteralMap\x12\x43\n\x08literals\x18\x01 \x03(\x0b\x32\'.flyteidl.core.LiteralMap.LiteralsEntryR\x08literals\x1aS\n\rLiteralsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12,\n\x05value\x18\x02 \x01(\x0b\x32\x16.flyteidl.core.LiteralR\x05value:\x02\x38\x01\"O\n\x15\x42indingDataCollection\x12\x36\n\x08\x62indings\x18\x01 \x03(\x0b\x32\x1a.flyteidl.core.BindingDataR\x08\x62indings\"\xb2\x01\n\x0e\x42indingDataMap\x12G\n\x08\x62indings\x18\x01 \x03(\x0b\x32+.flyteidl.core.BindingDataMap.BindingsEntryR\x08\x62indings\x1aW\n\rBindingsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32\x1a.flyteidl.core.BindingDataR\x05value:\x02\x38\x01\"G\n\tUnionInfo\x12:\n\ntargetType\x18\x01 \x01(\x0b\x32\x1a.flyteidl.core.LiteralTypeR\ntargetType\"\xae\x02\n\x0b\x42indingData\x12/\n\x06scalar\x18\x01 \x01(\x0b\x32\x15.flyteidl.core.ScalarH\x00R\x06scalar\x12\x46\n\ncollection\x18\x02 \x01(\x0b\x32$.flyteidl.core.BindingDataCollectionH\x00R\ncollection\x12:\n\x07promise\x18\x03 \x01(\x0b\x32\x1e.flyteidl.core.OutputReferenceH\x00R\x07promise\x12\x31\n\x03map\x18\x04 \x01(\x0b\x32\x1d.flyteidl.core.BindingDataMapH\x00R\x03map\x12.\n\x05union\x18\x05 \x01(\x0b\x32\x18.flyteidl.core.UnionInfoR\x05unionB\x07\n\x05value\"Q\n\x07\x42inding\x12\x10\n\x03var\x18\x01 \x01(\tR\x03var\x12\x34\n\x07\x62inding\x18\x02 \x01(\x0b\x32\x1a.flyteidl.core.BindingDataR\x07\x62inding\"6\n\x0cKeyValuePair\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value\")\n\rRetryStrategy\x12\x18\n\x07retries\x18\x05 \x01(\rR\x07retriesB\xb3\x01\n\x11\x63om.flyteidl.coreB\rLiteralsProtoP\x01Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\xa2\x02\x03\x46\x43X\xaa\x02\rFlyteidl.Core\xca\x02\rFlyteidl\\Core\xe2\x02\x19\x46lyteidl\\Core\\GPBMetadata\xea\x02\x0e\x46lyteidl::Coreb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -53,29 +53,31 @@ _globals['_SCALAR']._serialized_start=1048 _globals['_SCALAR']._serialized_end=1544 _globals['_LITERAL']._serialized_start=1547 - _globals['_LITERAL']._serialized_end=1925 - _globals['_LITERAL_METADATAENTRY']._serialized_start=1857 - _globals['_LITERAL_METADATAENTRY']._serialized_end=1916 - _globals['_LITERALCOLLECTION']._serialized_start=1927 - _globals['_LITERALCOLLECTION']._serialized_end=1998 - _globals['_LITERALMAP']._serialized_start=2001 - _globals['_LITERALMAP']._serialized_end=2167 - _globals['_LITERALMAP_LITERALSENTRY']._serialized_start=2084 - _globals['_LITERALMAP_LITERALSENTRY']._serialized_end=2167 - _globals['_BINDINGDATACOLLECTION']._serialized_start=2169 - _globals['_BINDINGDATACOLLECTION']._serialized_end=2248 - _globals['_BINDINGDATAMAP']._serialized_start=2251 - _globals['_BINDINGDATAMAP']._serialized_end=2429 - _globals['_BINDINGDATAMAP_BINDINGSENTRY']._serialized_start=2342 - _globals['_BINDINGDATAMAP_BINDINGSENTRY']._serialized_end=2429 - _globals['_UNIONINFO']._serialized_start=2431 - _globals['_UNIONINFO']._serialized_end=2502 - _globals['_BINDINGDATA']._serialized_start=2505 - _globals['_BINDINGDATA']._serialized_end=2807 - _globals['_BINDING']._serialized_start=2809 - _globals['_BINDING']._serialized_end=2890 - _globals['_KEYVALUEPAIR']._serialized_start=2892 - _globals['_KEYVALUEPAIR']._serialized_end=2946 - _globals['_RETRYSTRATEGY']._serialized_start=2948 - _globals['_RETRYSTRATEGY']._serialized_end=2989 + _globals['_LITERAL']._serialized_end=1978 + _globals['_LITERAL_METADATAENTRY']._serialized_start=1898 + _globals['_LITERAL_METADATAENTRY']._serialized_end=1957 + _globals['_LITERALOFFLOADEDMETADATA']._serialized_start=1981 + _globals['_LITERALOFFLOADEDMETADATA']._serialized_end=2121 + _globals['_LITERALCOLLECTION']._serialized_start=2123 + _globals['_LITERALCOLLECTION']._serialized_end=2194 + _globals['_LITERALMAP']._serialized_start=2197 + _globals['_LITERALMAP']._serialized_end=2363 + _globals['_LITERALMAP_LITERALSENTRY']._serialized_start=2280 + _globals['_LITERALMAP_LITERALSENTRY']._serialized_end=2363 + _globals['_BINDINGDATACOLLECTION']._serialized_start=2365 + _globals['_BINDINGDATACOLLECTION']._serialized_end=2444 + _globals['_BINDINGDATAMAP']._serialized_start=2447 + _globals['_BINDINGDATAMAP']._serialized_end=2625 + _globals['_BINDINGDATAMAP_BINDINGSENTRY']._serialized_start=2538 + _globals['_BINDINGDATAMAP_BINDINGSENTRY']._serialized_end=2625 + _globals['_UNIONINFO']._serialized_start=2627 + _globals['_UNIONINFO']._serialized_end=2698 + _globals['_BINDINGDATA']._serialized_start=2701 + _globals['_BINDINGDATA']._serialized_end=3003 + _globals['_BINDING']._serialized_start=3005 + _globals['_BINDING']._serialized_end=3086 + _globals['_KEYVALUEPAIR']._serialized_start=3088 + _globals['_KEYVALUEPAIR']._serialized_end=3142 + _globals['_RETRYSTRATEGY']._serialized_start=3144 + _globals['_RETRYSTRATEGY']._serialized_end=3185 # @@protoc_insertion_point(module_scope) diff --git a/flyteidl/gen/pb_python/flyteidl/core/literals_pb2.pyi b/flyteidl/gen/pb_python/flyteidl/core/literals_pb2.pyi index 0dc2af20da..e2337f8efa 100644 --- a/flyteidl/gen/pb_python/flyteidl/core/literals_pb2.pyi +++ b/flyteidl/gen/pb_python/flyteidl/core/literals_pb2.pyi @@ -104,7 +104,7 @@ class Scalar(_message.Message): def __init__(self, primitive: _Optional[_Union[Primitive, _Mapping]] = ..., blob: _Optional[_Union[Blob, _Mapping]] = ..., binary: _Optional[_Union[Binary, _Mapping]] = ..., schema: _Optional[_Union[Schema, _Mapping]] = ..., none_type: _Optional[_Union[Void, _Mapping]] = ..., error: _Optional[_Union[_types_pb2.Error, _Mapping]] = ..., generic: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ..., structured_dataset: _Optional[_Union[StructuredDataset, _Mapping]] = ..., union: _Optional[_Union[Union, _Mapping]] = ...) -> None: ... class Literal(_message.Message): - __slots__ = ["scalar", "collection", "map", "hash", "metadata", "uri", "size_bytes"] + __slots__ = ["scalar", "collection", "map", "offloaded_metadata", "hash", "metadata"] class MetadataEntry(_message.Message): __slots__ = ["key", "value"] KEY_FIELD_NUMBER: _ClassVar[int] @@ -115,18 +115,26 @@ class Literal(_message.Message): SCALAR_FIELD_NUMBER: _ClassVar[int] COLLECTION_FIELD_NUMBER: _ClassVar[int] MAP_FIELD_NUMBER: _ClassVar[int] + OFFLOADED_METADATA_FIELD_NUMBER: _ClassVar[int] HASH_FIELD_NUMBER: _ClassVar[int] METADATA_FIELD_NUMBER: _ClassVar[int] - URI_FIELD_NUMBER: _ClassVar[int] - SIZE_BYTES_FIELD_NUMBER: _ClassVar[int] scalar: Scalar collection: LiteralCollection map: LiteralMap + offloaded_metadata: LiteralOffloadedMetadata hash: str metadata: _containers.ScalarMap[str, str] + def __init__(self, scalar: _Optional[_Union[Scalar, _Mapping]] = ..., collection: _Optional[_Union[LiteralCollection, _Mapping]] = ..., map: _Optional[_Union[LiteralMap, _Mapping]] = ..., offloaded_metadata: _Optional[_Union[LiteralOffloadedMetadata, _Mapping]] = ..., hash: _Optional[str] = ..., metadata: _Optional[_Mapping[str, str]] = ...) -> None: ... + +class LiteralOffloadedMetadata(_message.Message): + __slots__ = ["uri", "size_bytes", "inferred_type"] + URI_FIELD_NUMBER: _ClassVar[int] + SIZE_BYTES_FIELD_NUMBER: _ClassVar[int] + INFERRED_TYPE_FIELD_NUMBER: _ClassVar[int] uri: str size_bytes: int - def __init__(self, scalar: _Optional[_Union[Scalar, _Mapping]] = ..., collection: _Optional[_Union[LiteralCollection, _Mapping]] = ..., map: _Optional[_Union[LiteralMap, _Mapping]] = ..., hash: _Optional[str] = ..., metadata: _Optional[_Mapping[str, str]] = ..., uri: _Optional[str] = ..., size_bytes: _Optional[int] = ...) -> None: ... + inferred_type: _types_pb2.LiteralType + def __init__(self, uri: _Optional[str] = ..., size_bytes: _Optional[int] = ..., inferred_type: _Optional[_Union[_types_pb2.LiteralType, _Mapping]] = ...) -> None: ... class LiteralCollection(_message.Message): __slots__ = ["literals"] diff --git a/flyteidl/gen/pb_rust/flyteidl.core.rs b/flyteidl/gen/pb_rust/flyteidl.core.rs index f2b73c9b11..441609be89 100644 --- a/flyteidl/gen/pb_rust/flyteidl.core.rs +++ b/flyteidl/gen/pb_rust/flyteidl.core.rs @@ -489,13 +489,7 @@ pub struct Literal { /// Additional metadata for literals. #[prost(map="string, string", tag="5")] pub metadata: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - /// If this literal is offloaded, this field will contain metadata including the offload location. - #[prost(string, tag="6")] - pub uri: ::prost::alloc::string::String, - /// Includes information about the size of the literal. - #[prost(uint64, tag="7")] - pub size_bytes: u64, - #[prost(oneof="literal::Value", tags="1, 2, 3")] + #[prost(oneof="literal::Value", tags="1, 2, 3, 8")] pub value: ::core::option::Option, } /// Nested message and enum types in `Literal`. @@ -512,8 +506,26 @@ pub mod literal { /// A map of strings to literals. #[prost(message, tag="3")] Map(super::LiteralMap), + /// Offloaded literal metadata + /// When you deserialize the offloaded metadata, it would be of Literal and its type would be defined by LiteralType stored in offloaded_metadata. + #[prost(message, tag="8")] + OffloadedMetadata(super::LiteralOffloadedMetadata), } } +/// A message that contains the metadata of the offloaded data. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LiteralOffloadedMetadata { + /// The location of the offloaded core.Literal. + #[prost(string, tag="1")] + pub uri: ::prost::alloc::string::String, + /// The size of the offloaded data. + #[prost(uint64, tag="2")] + pub size_bytes: u64, + /// The inferred literal type of the offloaded data. + #[prost(message, optional, tag="3")] + pub inferred_type: ::core::option::Option, +} /// A collection of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] diff --git a/flyteidl/protos/flyteidl/core/literals.proto b/flyteidl/protos/flyteidl/core/literals.proto index 00b03f9456..1eb004482c 100644 --- a/flyteidl/protos/flyteidl/core/literals.proto +++ b/flyteidl/protos/flyteidl/core/literals.proto @@ -93,6 +93,7 @@ message Scalar { // A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives. message Literal { + reserved 6, 7; oneof value { // A simple value. Scalar scalar = 1; @@ -102,6 +103,10 @@ message Literal { // A map of strings to literals. LiteralMap map = 3; + + // Offloaded literal metadata + // When you deserialize the offloaded metadata, it would be of Literal and its type would be defined by LiteralType stored in offloaded_metadata. + LiteralOffloadedMetadata offloaded_metadata = 8; } // A hash representing this literal. @@ -111,12 +116,18 @@ message Literal { // Additional metadata for literals. map metadata = 5; +} + +// A message that contains the metadata of the offloaded data. +message LiteralOffloadedMetadata { + // The location of the offloaded core.Literal. + string uri = 1; - // If this literal is offloaded, this field will contain metadata including the offload location. - string uri = 6; + // The size of the offloaded data. + uint64 size_bytes = 2; - // Includes information about the size of the literal. - uint64 size_bytes = 7; + // The inferred literal type of the offloaded data. + LiteralType inferred_type = 3; } // A collection of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field. From da2a5e653ff79ed8170ca1245e6cc0a0337054cc Mon Sep 17 00:00:00 2001 From: Kevin Su Date: Thu, 29 Aug 2024 18:11:19 -0700 Subject: [PATCH 48/65] Improve error message for MismatchingTypes (#5639) * wip Signed-off-by: Kevin Su * wip Signed-off-by: Kevin Su * wip Signed-off-by: Kevin Su * update tests Signed-off-by: Kevin Su * nit Signed-off-by: Kevin Su * fix tests Signed-off-by: Kevin Su * address comment Signed-off-by: Kevin Su * fix tests Signed-off-by: Future-Outlier * nit Signed-off-by: Kevin Su * lint Signed-off-by: Kevin Su * fix ci Signed-off-by: Kevin Su * lint Signed-off-by: Kevin Su --------- Signed-off-by: Kevin Su Signed-off-by: Future-Outlier Co-authored-by: Future-Outlier Signed-off-by: Bugra Gedik --- .github/workflows/single-binary.yml | 2 +- .../pkg/manager/impl/workflow_manager.go | 2 +- .../pkg/manager/impl/workflow_manager_test.go | 4 +-- .../pkg/workflowengine/impl/compiler.go | 2 +- .../pkg/compiler/errors/compiler_errors.go | 11 ++++++-- .../pkg/compiler/validators/bindings.go | 27 ++++++++++++++----- .../pkg/compiler/validators/bindings_test.go | 20 ++++++++++++++ 7 files changed, 54 insertions(+), 14 deletions(-) diff --git a/.github/workflows/single-binary.yml b/.github/workflows/single-binary.yml index d4cb79f4d5..c40b33e3a4 100644 --- a/.github/workflows/single-binary.yml +++ b/.github/workflows/single-binary.yml @@ -175,7 +175,7 @@ jobs: run: | python -m pip install --upgrade pip pip install uv - uv pip install --system flytekit flytekitplugins-deck-standard flytekitplugins-envd "numpy<2.0.0" pyarrow + uv pip install --system flytekit flytekitplugins-deck-standard "numpy<2.0.0" pyarrow uv pip freeze - name: Checkout flytesnacks uses: actions/checkout@v4 diff --git a/flyteadmin/pkg/manager/impl/workflow_manager.go b/flyteadmin/pkg/manager/impl/workflow_manager.go index e4cc5cc120..d842615921 100644 --- a/flyteadmin/pkg/manager/impl/workflow_manager.go +++ b/flyteadmin/pkg/manager/impl/workflow_manager.go @@ -147,7 +147,7 @@ func (w *WorkflowManager) CreateWorkflow( if err != nil { logger.Errorf(ctx, "Failed to compile workflow with err: %v", err) return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, - "failed to compile workflow for [%+v] with err %v", request.Id, err) + "failed to compile workflow for [%+v] with err: %v", request.Id, err) } err = validation.ValidateCompiledWorkflow( *request.Id, workflowClosure, w.config.RegistrationValidationConfiguration()) diff --git a/flyteadmin/pkg/manager/impl/workflow_manager_test.go b/flyteadmin/pkg/manager/impl/workflow_manager_test.go index 33626cae84..c05a5e4a57 100644 --- a/flyteadmin/pkg/manager/impl/workflow_manager_test.go +++ b/flyteadmin/pkg/manager/impl/workflow_manager_test.go @@ -227,7 +227,7 @@ func TestCreateWorkflow_CompilerGetRequirementsError(t *testing.T) { response, err := workflowManager.CreateWorkflow(context.Background(), request) utils.AssertEqualWithSanitizedRegex(t, fmt.Sprintf( "failed to compile workflow for [resource_type:WORKFLOW project:\"project\" domain:\"domain\" "+ - "name:\"name\" version:\"version\" ] with err %v", expectedErr.Error()), err.Error()) + "name:\"name\" version:\"version\" ] with err: %v", expectedErr.Error()), err.Error()) assert.Nil(t, response) } @@ -251,7 +251,7 @@ func TestCreateWorkflow_CompileWorkflowError(t *testing.T) { assert.Equal(t, codes.InvalidArgument, s.Code()) utils.AssertEqualWithSanitizedRegex(t, fmt.Sprintf( "failed to compile workflow for [resource_type:WORKFLOW project:\"project\" domain:\"domain\" "+ - "name:\"name\" version:\"version\" ] with err %v", expectedErr.Error()), err.Error()) + "name:\"name\" version:\"version\" ] with err: %v", expectedErr.Error()), err.Error()) } func TestCreateWorkflow_DatabaseError(t *testing.T) { diff --git a/flyteadmin/pkg/workflowengine/impl/compiler.go b/flyteadmin/pkg/workflowengine/impl/compiler.go index ebe8438f30..17c4c7ec4d 100644 --- a/flyteadmin/pkg/workflowengine/impl/compiler.go +++ b/flyteadmin/pkg/workflowengine/impl/compiler.go @@ -37,7 +37,7 @@ func (c *workflowCompiler) CompileWorkflow( compiledWorkflowClosure, err := compiler.CompileWorkflow(primaryWf, subworkflows, tasks, launchPlans) if err != nil { - return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "failed to compile workflow with err %v", err) + return nil, errors.NewFlyteAdminError(codes.InvalidArgument, err.Error()) } return compiledWorkflowClosure, nil } diff --git a/flytepropeller/pkg/compiler/errors/compiler_errors.go b/flytepropeller/pkg/compiler/errors/compiler_errors.go index 2e0e15367a..9b762f72ca 100755 --- a/flytepropeller/pkg/compiler/errors/compiler_errors.go +++ b/flytepropeller/pkg/compiler/errors/compiler_errors.go @@ -205,8 +205,15 @@ func NewDuplicateIDFoundErr(nodeID string) *CompileError { func NewMismatchingTypesErr(nodeID, fromVar, fromType, toType string) *CompileError { return newError( MismatchingTypes, - fmt.Sprintf("Variable [%v] (type [%v]) doesn't match expected type [%v].", fromVar, fromType, - toType), + fmt.Sprintf("Variable [%v] (type [%v]) doesn't match expected type [%v].", fromVar, fromType, toType), + nodeID, + ) +} + +func NewMismatchingVariablesErr(nodeID, fromVar, fromType, toVar, toType string) *CompileError { + return newError( + MismatchingTypes, + fmt.Sprintf("The output variable '%v' has type [%v], but it's assigned to the input variable '%v' which has type type [%v].", fromVar, fromType, toVar, toType), nodeID, ) } diff --git a/flytepropeller/pkg/compiler/validators/bindings.go b/flytepropeller/pkg/compiler/validators/bindings.go index 527dc091c9..337d04966d 100644 --- a/flytepropeller/pkg/compiler/validators/bindings.go +++ b/flytepropeller/pkg/compiler/validators/bindings.go @@ -1,6 +1,7 @@ package validators import ( + "fmt" "reflect" "k8s.io/apimachinery/pkg/util/sets" @@ -11,9 +12,10 @@ import ( "github.com/flyteorg/flyte/flytepropeller/pkg/compiler/typing" ) -func validateBinding(w c.WorkflowBuilder, nodeID c.NodeID, nodeParam string, binding *flyte.BindingData, +func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding *flyte.BindingData, expectedType *flyte.LiteralType, errs errors.CompileErrors, validateParamTypes bool) ( resolvedType *flyte.LiteralType, upstreamNodes []c.NodeID, ok bool) { + nodeID := node.GetId() // Non-scalar bindings will fail to introspect the type through a union type so we resolve them beforehand switch binding.GetValue().(type) { @@ -31,7 +33,7 @@ func validateBinding(w c.WorkflowBuilder, nodeID c.NodeID, nodeParam string, bin var ok bool for _, t := range expectedType.GetUnionType().GetVariants() { - resolvedType1, nodeIds1, ok1 := validateBinding(w, nodeID, nodeParam, binding, t, errors.NewCompileErrors(), validateParamTypes) + resolvedType1, nodeIds1, ok1 := validateBinding(w, node, nodeParam, binding, t, errors.NewCompileErrors(), validateParamTypes) if ok1 { if ok { errs.Collect(errors.NewAmbiguousBindingUnionValue(nodeID, nodeParam, expectedType.String(), binding.String(), matchingType.String(), t.String())) @@ -63,7 +65,7 @@ func validateBinding(w c.WorkflowBuilder, nodeID c.NodeID, nodeParam string, bin allNodeIds := make([]c.NodeID, 0, len(val.Collection.GetBindings())) var subType *flyte.LiteralType for _, v := range val.Collection.GetBindings() { - if resolvedType, nodeIds, ok := validateBinding(w, nodeID, nodeParam, v, expectedType.GetCollectionType(), errs.NewScope(), validateParamTypes); ok { + if resolvedType, nodeIds, ok := validateBinding(w, node, nodeParam, v, expectedType.GetCollectionType(), errs.NewScope(), validateParamTypes); ok { allNodeIds = append(allNodeIds, nodeIds...) subType = resolvedType } @@ -87,7 +89,7 @@ func validateBinding(w c.WorkflowBuilder, nodeID c.NodeID, nodeParam string, bin allNodeIds := make([]c.NodeID, 0, len(val.Map.GetBindings())) var subType *flyte.LiteralType for _, v := range val.Map.GetBindings() { - if resolvedType, nodeIds, ok := validateBinding(w, nodeID, nodeParam, v, expectedType.GetMapValueType(), errs.NewScope(), validateParamTypes); ok { + if resolvedType, nodeIds, ok := validateBinding(w, node, nodeParam, v, expectedType.GetMapValueType(), errs.NewScope(), validateParamTypes); ok { allNodeIds = append(allNodeIds, nodeIds...) subType = resolvedType } @@ -114,12 +116,22 @@ func validateBinding(w c.WorkflowBuilder, nodeID c.NodeID, nodeParam string, bin return nil, nil, !errs.HasErrors() } + inputVar := nodeParam + outputVar := val.Promise.Var + + if node.GetMetadata() != nil { + inputVar = fmt.Sprintf("%s.%s", node.GetMetadata().Name, nodeParam) + } + if upNode.GetMetadata() != nil { + outputVar = fmt.Sprintf("%s.%s", upNode.GetMetadata().Name, val.Promise.Var) + } + if param, paramFound := validateOutputVar(upNode, v.Name, errs.NewScope()); paramFound { sourceType := param.Type // If the variable has an index. We expect param to be a collection. if v.Index != nil { if cType := param.GetType().GetCollectionType(); cType == nil { - errs.Collect(errors.NewMismatchingTypesErr(nodeID, val.Promise.Var, param.Type.String(), expectedType.String())) + errs.Collect(errors.NewMismatchingVariablesErr(nodeID, outputVar, param.Type.String(), inputVar, expectedType.String())) } else { sourceType = cType } @@ -152,7 +164,8 @@ func validateBinding(w c.WorkflowBuilder, nodeID c.NodeID, nodeParam string, bin return param.GetType(), []c.NodeID{val.Promise.NodeId}, true } - errs.Collect(errors.NewMismatchingTypesErr(nodeID, val.Promise.Var, sourceType.String(), expectedType.String())) + errs.Collect(errors.NewMismatchingVariablesErr(node.GetId(), outputVar, sourceType.String(), inputVar, expectedType.String())) + return nil, nil, !errs.HasErrors() } } @@ -223,7 +236,7 @@ func ValidateBindings(w c.WorkflowBuilder, node c.Node, bindings []*flyte.Bindin } providedBindings.Insert(binding.GetVar()) - if resolvedType, upstreamNodes, bindingOk := validateBinding(w, node.GetId(), binding.GetVar(), binding.GetBinding(), + if resolvedType, upstreamNodes, bindingOk := validateBinding(w, node, binding.GetVar(), binding.GetBinding(), param.Type, errs.NewScope(), validateParamTypes); bindingOk { for _, upNode := range upstreamNodes { // Add implicit Edges diff --git a/flytepropeller/pkg/compiler/validators/bindings_test.go b/flytepropeller/pkg/compiler/validators/bindings_test.go index 182bee63ce..522c6bd677 100644 --- a/flytepropeller/pkg/compiler/validators/bindings_test.go +++ b/flytepropeller/pkg/compiler/validators/bindings_test.go @@ -246,6 +246,7 @@ func TestValidateBindings(t *testing.T) { t.Run("Promises", func(t *testing.T) { n := &mocks.NodeBuilder{} n.OnGetId().Return("node1") + n.OnGetMetadata().Return(&core.NodeMetadata{Name: "node1"}) n.OnGetInterface().Return(&core.TypedInterface{ Inputs: &core.VariableMap{ Variables: map[string]*core.Variable{}, @@ -257,6 +258,7 @@ func TestValidateBindings(t *testing.T) { n2 := &mocks.NodeBuilder{} n2.OnGetId().Return("node2") + n2.OnGetMetadata().Return(&core.NodeMetadata{Name: "node2"}) n2.OnGetOutputAliases().Return(nil) n2.OnGetInterface().Return(&core.TypedInterface{ Inputs: &core.VariableMap{ @@ -310,6 +312,7 @@ func TestValidateBindings(t *testing.T) { n := &mocks.NodeBuilder{} n.OnGetId().Return("node1") + n.OnGetMetadata().Return(&core.NodeMetadata{Name: "node1"}) n.OnGetInterface().Return(&core.TypedInterface{ Inputs: &core.VariableMap{ Variables: map[string]*core.Variable{}, @@ -321,6 +324,7 @@ func TestValidateBindings(t *testing.T) { n2 := &mocks.NodeBuilder{} n2.OnGetId().Return("node2") + n2.OnGetMetadata().Return(&core.NodeMetadata{Name: "node2"}) n2.OnGetOutputAliases().Return(nil) n2.OnGetInterface().Return(&core.TypedInterface{ Inputs: &core.VariableMap{ @@ -382,6 +386,7 @@ func TestValidateBindings(t *testing.T) { n := &mocks.NodeBuilder{} n.OnGetId().Return("node1") + n.OnGetMetadata().Return(&core.NodeMetadata{Name: "node1"}) n.OnGetInterface().Return(&core.TypedInterface{ Inputs: &core.VariableMap{ Variables: map[string]*core.Variable{}, @@ -393,6 +398,7 @@ func TestValidateBindings(t *testing.T) { n2 := &mocks.NodeBuilder{} n2.OnGetId().Return("node2") + n2.OnGetMetadata().Return(&core.NodeMetadata{Name: "node2"}) n2.OnGetOutputAliases().Return(nil) literalType := LiteralTypeForLiteral(coreutils.MustMakeLiteral(&structpb.Struct{})) literalType.Structure = &core.TypeStructure{} @@ -718,6 +724,7 @@ func TestValidateBindings(t *testing.T) { wf := &mocks.WorkflowBuilder{} n := &mocks.NodeBuilder{} n.OnGetId().Return("node1") + n.OnGetMetadata().Return(&core.NodeMetadata{Name: "node1"}) bindings := []*core.Binding{ { @@ -769,6 +776,9 @@ func TestValidateBindings(t *testing.T) { _, ok := ValidateBindings(wf, n, bindings, vars, true, c.EdgeDirectionBidirectional, compileErrors) assert.False(t, ok) assert.Equal(t, "MismatchingTypes", string(compileErrors.Errors().List()[0].Code())) + assert.Equal(t, "Code: MismatchingTypes, Node Id: node1, Description: Variable [x]"+ + " (type [union_type:{variants:{simple:INTEGER structure:{tag:\"int\"}}}]) doesn't match expected type"+ + " [union_type:{variants:{simple:INTEGER structure:{tag:\"int_other\"}}}].", compileErrors.Errors().List()[0].Error()) }) t.Run("List of Int to List of Unions Binding", func(t *testing.T) { @@ -1035,6 +1045,7 @@ func TestValidateBindings(t *testing.T) { t.Run("Union Promise Unambiguous", func(t *testing.T) { n := &mocks.NodeBuilder{} n.OnGetId().Return("node1") + n.OnGetMetadata().Return(&core.NodeMetadata{Name: "node1"}) n.OnGetInterface().Return(&core.TypedInterface{ Inputs: &core.VariableMap{ Variables: map[string]*core.Variable{}, @@ -1046,6 +1057,7 @@ func TestValidateBindings(t *testing.T) { n2 := &mocks.NodeBuilder{} n2.OnGetId().Return("node2") + n2.OnGetMetadata().Return(&core.NodeMetadata{Name: "node2"}) n2.OnGetOutputAliases().Return(nil) n2.OnGetInterface().Return(&core.TypedInterface{ Inputs: &core.VariableMap{ @@ -1116,6 +1128,7 @@ func TestValidateBindings(t *testing.T) { t.Run("Union Promise Ambiguous", func(t *testing.T) { n := &mocks.NodeBuilder{} n.OnGetId().Return("node1") + n.OnGetMetadata().Return(&core.NodeMetadata{Name: "n"}) n.OnGetInterface().Return(&core.TypedInterface{ Inputs: &core.VariableMap{ Variables: map[string]*core.Variable{}, @@ -1127,6 +1140,7 @@ func TestValidateBindings(t *testing.T) { n2 := &mocks.NodeBuilder{} n2.OnGetId().Return("node2") + n2.OnGetMetadata().Return(&core.NodeMetadata{Name: "n2"}) n2.OnGetOutputAliases().Return(nil) n2.OnGetInterface().Return(&core.TypedInterface{ Inputs: &core.VariableMap{ @@ -1196,11 +1210,16 @@ func TestValidateBindings(t *testing.T) { _, ok := ValidateBindings(wf, n, bindings, vars, true, c.EdgeDirectionBidirectional, compileErrors) assert.False(t, ok) assert.Equal(t, "MismatchingTypes", string(compileErrors.Errors().List()[0].Code())) + assert.Equal(t, "Code: MismatchingTypes, Node Id: node1, Description: The output variable 'n2.n2_out'"+ + " has type [simple:INTEGER], but it's assigned to the input variable 'n.x' which has type"+ + " type [union_type:{variants:{simple:STRING structure:{tag:\"str\"}} variants:{simple:INTEGER structure:{tag:\"int1\"}}"+ + " variants:{simple:INTEGER structure:{tag:\"int2\"}}}].", compileErrors.Errors().List()[0].Error()) }) t.Run("Union Promise Union Literal", func(t *testing.T) { n := &mocks.NodeBuilder{} n.OnGetId().Return("node1") + n.OnGetMetadata().Return(&core.NodeMetadata{Name: "node1"}) n.OnGetInterface().Return(&core.TypedInterface{ Inputs: &core.VariableMap{ Variables: map[string]*core.Variable{}, @@ -1212,6 +1231,7 @@ func TestValidateBindings(t *testing.T) { n2 := &mocks.NodeBuilder{} n2.OnGetId().Return("node2") + n2.OnGetMetadata().Return(&core.NodeMetadata{Name: "node2"}) n2.OnGetOutputAliases().Return(nil) n2.OnGetInterface().Return(&core.TypedInterface{ Inputs: &core.VariableMap{ From ef6fcc08737984d4371516221fc8b1fa88b856e7 Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Fri, 30 Aug 2024 23:55:14 +0800 Subject: [PATCH 49/65] [Docs] Echo Task (#5707) * add echo tasks Signed-off-by: Future-Outlier * update nikki's advice Signed-off-by: Future-Outlier Co-authored-by: nikki everett --------- Signed-off-by: Future-Outlier Co-authored-by: nikki everett Signed-off-by: Bugra Gedik --- .../advanced_composition/conditionals.md | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/docs/user_guide/advanced_composition/conditionals.md b/docs/user_guide/advanced_composition/conditionals.md index bbaf3dc27a..3afca88772 100644 --- a/docs/user_guide/advanced_composition/conditionals.md +++ b/docs/user_guide/advanced_composition/conditionals.md @@ -124,6 +124,25 @@ You can run the workflow locally as follows: :lines: 181-188 ``` +## Running a noop task in a conditional + +In some cases, you may want to skip the execution of a conditional workflow if a certain condition is not met. +You can achieve this by using the `echo` task, which simply returns the input value. + +:::{note} +To enable the echo plugin in the backend, add the plugin to Flyte's configuration file. +```yaml +task-plugins: + enabled-plugins: + - echo +``` +::: + +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/89bf7bc7788802097904c5f9ffb75ba70ef980a6/examples/advanced_composition/advanced_composition/conditional.py +:caption: advanced_composition/conditional.py +:lines: 197-209 +``` + ## Run the example on the Flyte cluster To run the provided workflows on the Flyte cluster, use the following commands: @@ -170,4 +189,10 @@ pyflyte run --remote \ consume_task_output --radius 0.4 --seed 7 ``` +``` +pyflyte run --remote \ + https://raw.githubusercontent.com/flyteorg/flytesnacks/89bf7bc7788802097904c5f9ffb75ba70ef980a6/examples/advanced_composition/advanced_composition/conditional.py \ + noop_in_conditional --radius 0.4 --seed 5 +``` + [flytesnacks]: https://github.com/flyteorg/flytesnacks/tree/master/examples/advanced_composition From c5f7d84f3849d7aa826849ce476b6d61a5b96727 Mon Sep 17 00:00:00 2001 From: Wei-Yu Kao <115421902+wayner0628@users.noreply.github.com> Date: Sat, 31 Aug 2024 01:49:42 +0800 Subject: [PATCH 50/65] Improve execution name readability (#5637) Signed-off-by: wayner0628 Signed-off-by: Kevin Su Co-authored-by: Kevin Su Signed-off-by: Bugra Gedik --- flyteadmin/go.mod | 1 + flyteadmin/go.sum | 2 + .../async/schedule/aws/workflow_executor.go | 5 +- flyteadmin/pkg/common/executions.go | 13 ---- flyteadmin/pkg/common/executions_test.go | 23 ------- .../pkg/common/naming/execution_name.go | 30 +++++++++ .../pkg/common/naming/execution_name_test.go | 64 +++++++++++++++++++ flyteadmin/pkg/manager/impl/util/shared.go | 3 +- .../pkg/manager/impl/util/shared_test.go | 4 +- .../interfaces/application_configuration.go | 3 +- .../scheduler/executor/executor_impl.go | 21 ++---- go.mod | 1 + go.sum | 2 + 13 files changed, 112 insertions(+), 60 deletions(-) delete mode 100644 flyteadmin/pkg/common/executions_test.go create mode 100644 flyteadmin/pkg/common/naming/execution_name.go create mode 100644 flyteadmin/pkg/common/naming/execution_name_test.go diff --git a/flyteadmin/go.mod b/flyteadmin/go.mod index ac74384250..b9eba5b83a 100644 --- a/flyteadmin/go.mod +++ b/flyteadmin/go.mod @@ -48,6 +48,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 github.com/wI2L/jsondiff v0.5.0 + github.com/wolfeidau/humanhash v1.1.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 go.opentelemetry.io/otel v1.24.0 golang.org/x/oauth2 v0.16.0 diff --git a/flyteadmin/go.sum b/flyteadmin/go.sum index dba9da2e86..049add4bbc 100644 --- a/flyteadmin/go.sum +++ b/flyteadmin/go.sum @@ -1297,6 +1297,8 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/wI2L/jsondiff v0.5.0 h1:RRMTi/mH+R2aXcPe1VYyvGINJqQfC3R+KSEakuU1Ikw= github.com/wI2L/jsondiff v0.5.0/go.mod h1:qqG6hnK0Lsrz2BpIVCxWiK9ItsBCpIZQiv0izJjOZ9s= +github.com/wolfeidau/humanhash v1.1.0 h1:06KgtyyABJGBbrfMONrW7S+b5TTYVyrNB/jss5n7F3E= +github.com/wolfeidau/humanhash v1.1.0/go.mod h1:jkpynR1bfyfkmKEQudIC0osWKynFAoayRjzH9OJdVIg= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= diff --git a/flyteadmin/pkg/async/schedule/aws/workflow_executor.go b/flyteadmin/pkg/async/schedule/aws/workflow_executor.go index 918402836b..13d2041f9d 100644 --- a/flyteadmin/pkg/async/schedule/aws/workflow_executor.go +++ b/flyteadmin/pkg/async/schedule/aws/workflow_executor.go @@ -15,7 +15,7 @@ import ( "github.com/flyteorg/flyte/flyteadmin/pkg/async" scheduleInterfaces "github.com/flyteorg/flyte/flyteadmin/pkg/async/schedule/interfaces" - "github.com/flyteorg/flyte/flyteadmin/pkg/common" + "github.com/flyteorg/flyte/flyteadmin/pkg/common/naming" "github.com/flyteorg/flyte/flyteadmin/pkg/errors" "github.com/flyteorg/flyte/flyteadmin/pkg/manager/interfaces" runtimeInterfaces "github.com/flyteorg/flyte/flyteadmin/pkg/runtime/interfaces" @@ -129,7 +129,7 @@ func generateExecutionName(launchPlan admin.LaunchPlan, kickoffTime time.Time) s Name: launchPlan.Id.Name, }) randomSeed := kickoffTime.UnixNano() + int64(hashedIdentifier) - return common.GetExecutionName(randomSeed) + return naming.GetExecutionName(randomSeed) } func (e *workflowExecutor) formulateExecutionCreateRequest( @@ -207,7 +207,6 @@ func (e *workflowExecutor) run() error { continue } executionRequest := e.formulateExecutionCreateRequest(launchPlan, scheduledWorkflowExecutionRequest.KickoffTime) - ctx = contextutils.WithWorkflowID(ctx, fmt.Sprintf(workflowIdentifierFmt, executionRequest.Project, executionRequest.Domain, executionRequest.Name)) err = e.resolveKickoffTimeArg(scheduledWorkflowExecutionRequest, launchPlan, &executionRequest) diff --git a/flyteadmin/pkg/common/executions.go b/flyteadmin/pkg/common/executions.go index fbb5bdd6bd..4ac1ec7300 100644 --- a/flyteadmin/pkg/common/executions.go +++ b/flyteadmin/pkg/common/executions.go @@ -1,22 +1,9 @@ package common import ( - "fmt" - - "k8s.io/apimachinery/pkg/util/rand" - "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" ) -const ExecutionIDLength = 20 -const ExecutionStringFormat = "a%s" - -/* #nosec */ -func GetExecutionName(seed int64) string { - rand.Seed(seed) - return fmt.Sprintf(ExecutionStringFormat, rand.String(ExecutionIDLength-1)) -} - var terminalExecutionPhases = map[core.WorkflowExecution_Phase]bool{ core.WorkflowExecution_SUCCEEDED: true, core.WorkflowExecution_FAILED: true, diff --git a/flyteadmin/pkg/common/executions_test.go b/flyteadmin/pkg/common/executions_test.go deleted file mode 100644 index 628abd6e9d..0000000000 --- a/flyteadmin/pkg/common/executions_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package common - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -const AllowedExecutionIDStartCharStr = "abcdefghijklmnopqrstuvwxyz" -const AllowedExecutionIDStr = "abcdefghijklmnopqrstuvwxyz1234567890" - -var AllowedExecutionIDStartChars = []rune(AllowedExecutionIDStartCharStr) -var AllowedExecutionIDChars = []rune(AllowedExecutionIDStr) - -func TestGetExecutionName(t *testing.T) { - randString := GetExecutionName(time.Now().UnixNano()) - assert.Len(t, randString, ExecutionIDLength) - assert.Contains(t, AllowedExecutionIDStartChars, rune(randString[0])) - for i := 1; i < len(randString); i++ { - assert.Contains(t, AllowedExecutionIDChars, rune(randString[i])) - } -} diff --git a/flyteadmin/pkg/common/naming/execution_name.go b/flyteadmin/pkg/common/naming/execution_name.go new file mode 100644 index 0000000000..01aa3fe8b6 --- /dev/null +++ b/flyteadmin/pkg/common/naming/execution_name.go @@ -0,0 +1,30 @@ +package naming + +import ( + "fmt" + + "github.com/wolfeidau/humanhash" + "k8s.io/apimachinery/pkg/util/rand" + + "github.com/flyteorg/flyte/flyteadmin/pkg/runtime" + runtimeInterfaces "github.com/flyteorg/flyte/flyteadmin/pkg/runtime/interfaces" +) + +const ExecutionIDLength = 20 +const ExecutionIDLengthLimit = 63 +const ExecutionStringFormat = "a%s" + +var configProvider runtimeInterfaces.ApplicationConfiguration = runtime.NewApplicationConfigurationProvider() + +/* #nosec */ +func GetExecutionName(seed int64) string { + rand.Seed(seed) + config := configProvider.GetTopLevelConfig() + if config.FeatureGates.EnableFriendlyNames { + hashKey := []byte(rand.String(ExecutionIDLength)) + // Ignoring the error as it's guaranteed hash key longer than result in this context. + result, _ := humanhash.Humanize(hashKey, 4) + return result + } + return fmt.Sprintf(ExecutionStringFormat, rand.String(ExecutionIDLength-1)) +} diff --git a/flyteadmin/pkg/common/naming/execution_name_test.go b/flyteadmin/pkg/common/naming/execution_name_test.go new file mode 100644 index 0000000000..22729dbb9b --- /dev/null +++ b/flyteadmin/pkg/common/naming/execution_name_test.go @@ -0,0 +1,64 @@ +package naming + +import ( + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + runtimeInterfaces "github.com/flyteorg/flyte/flyteadmin/pkg/runtime/interfaces" + runtimeMocks "github.com/flyteorg/flyte/flyteadmin/pkg/runtime/mocks" +) + +const AllowedExecutionIDAlphabetStr = "abcdefghijklmnopqrstuvwxyz" +const AllowedExecutionIDAlphanumericStr = "abcdefghijklmnopqrstuvwxyz1234567890" +const AllowedExecutionIDFriendlyNameStr = "abcdefghijklmnopqrstuvwxyz-" + +var AllowedExecutionIDAlphabets = []rune(AllowedExecutionIDAlphabetStr) +var AllowedExecutionIDAlphanumerics = []rune(AllowedExecutionIDAlphanumericStr) +var AllowedExecutionIDFriendlyNameChars = []rune(AllowedExecutionIDFriendlyNameStr) + +func TestGetExecutionName(t *testing.T) { + originalConfigProvider := configProvider + defer func() { configProvider = originalConfigProvider }() + + mockConfigProvider := &runtimeMocks.MockApplicationProvider{} + configProvider = mockConfigProvider + + t.Run("general name", func(t *testing.T) { + appConfig := runtimeInterfaces.ApplicationConfig{ + FeatureGates: runtimeInterfaces.FeatureGates{ + EnableFriendlyNames: false, + }, + } + mockConfigProvider.SetTopLevelConfig(appConfig) + + randString := GetExecutionName(time.Now().UnixNano()) + assert.Len(t, randString, ExecutionIDLength) + assert.Contains(t, AllowedExecutionIDAlphabets, rune(randString[0])) + for i := 1; i < len(randString); i++ { + assert.Contains(t, AllowedExecutionIDAlphanumerics, rune(randString[i])) + } + }) + + t.Run("friendly name", func(t *testing.T) { + appConfig := runtimeInterfaces.ApplicationConfig{ + FeatureGates: runtimeInterfaces.FeatureGates{ + EnableFriendlyNames: true, + }, + } + mockConfigProvider.SetTopLevelConfig(appConfig) + + randString := GetExecutionName(time.Now().UnixNano()) + assert.LessOrEqual(t, len(randString), ExecutionIDLengthLimit) + for i := 0; i < len(randString); i++ { + assert.Contains(t, AllowedExecutionIDFriendlyNameChars, rune(randString[i])) + } + hyphenCount := strings.Count(randString, "-") + assert.Equal(t, 3, hyphenCount, "FriendlyName should contain exactly three hyphens") + words := strings.Split(randString, "-") + assert.Equal(t, 4, len(words), "FriendlyName should be split into exactly four words") + }) + +} diff --git a/flyteadmin/pkg/manager/impl/util/shared.go b/flyteadmin/pkg/manager/impl/util/shared.go index 24c97f416a..b1395697ef 100644 --- a/flyteadmin/pkg/manager/impl/util/shared.go +++ b/flyteadmin/pkg/manager/impl/util/shared.go @@ -8,6 +8,7 @@ import ( "google.golang.org/grpc/codes" "github.com/flyteorg/flyte/flyteadmin/pkg/common" + "github.com/flyteorg/flyte/flyteadmin/pkg/common/naming" "github.com/flyteorg/flyte/flyteadmin/pkg/errors" "github.com/flyteorg/flyte/flyteadmin/pkg/manager/impl/shared" "github.com/flyteorg/flyte/flyteadmin/pkg/manager/impl/validation" @@ -25,7 +26,7 @@ func GetExecutionName(request admin.ExecutionCreateRequest) string { if request.Name != "" { return request.Name } - return common.GetExecutionName(time.Now().UnixNano()) + return naming.GetExecutionName(time.Now().UnixNano()) } func GetTask(ctx context.Context, repo repoInterfaces.Repository, identifier core.Identifier) ( diff --git a/flyteadmin/pkg/manager/impl/util/shared_test.go b/flyteadmin/pkg/manager/impl/util/shared_test.go index 21a78997c3..75759485db 100644 --- a/flyteadmin/pkg/manager/impl/util/shared_test.go +++ b/flyteadmin/pkg/manager/impl/util/shared_test.go @@ -12,8 +12,8 @@ import ( "github.com/stretchr/testify/assert" "google.golang.org/grpc/codes" - "github.com/flyteorg/flyte/flyteadmin/pkg/common" commonMocks "github.com/flyteorg/flyte/flyteadmin/pkg/common/mocks" + "github.com/flyteorg/flyte/flyteadmin/pkg/common/naming" flyteAdminErrors "github.com/flyteorg/flyte/flyteadmin/pkg/errors" "github.com/flyteorg/flyte/flyteadmin/pkg/manager/impl/testutils" managerInterfaces "github.com/flyteorg/flyte/flyteadmin/pkg/manager/interfaces" @@ -42,7 +42,7 @@ func TestPopulateExecutionID(t *testing.T) { Domain: "domain", }) assert.NotEmpty(t, name) - assert.Len(t, name, common.ExecutionIDLength) + assert.Len(t, name, naming.ExecutionIDLength) } func TestPopulateExecutionID_ExistingName(t *testing.T) { diff --git a/flyteadmin/pkg/runtime/interfaces/application_configuration.go b/flyteadmin/pkg/runtime/interfaces/application_configuration.go index ca6dc60923..8be59abe14 100644 --- a/flyteadmin/pkg/runtime/interfaces/application_configuration.go +++ b/flyteadmin/pkg/runtime/interfaces/application_configuration.go @@ -49,7 +49,8 @@ type PostgresConfig struct { } type FeatureGates struct { - EnableArtifacts bool `json:"enableArtifacts" pflag:",Enable artifacts feature."` + EnableArtifacts bool `json:"enableArtifacts" pflag:",Enable artifacts feature."` + EnableFriendlyNames bool `json:"enableFriendlyNames" pflag:",Enable generation of friendly execution names feature."` } // ApplicationConfig is the base configuration to start admin diff --git a/flyteadmin/scheduler/executor/executor_impl.go b/flyteadmin/scheduler/executor/executor_impl.go index dffb98e1b6..f3fd86c6cf 100644 --- a/flyteadmin/scheduler/executor/executor_impl.go +++ b/flyteadmin/scheduler/executor/executor_impl.go @@ -2,7 +2,6 @@ package executor import ( "context" - "strings" "time" "github.com/prometheus/client_golang/prometheus" @@ -12,7 +11,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" - "github.com/flyteorg/flyte/flyteadmin/scheduler/identifier" + "github.com/flyteorg/flyte/flyteadmin/pkg/common/naming" "github.com/flyteorg/flyte/flyteadmin/scheduler/repositories/models" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" @@ -53,23 +52,11 @@ func (w *executor) Execute(ctx context.Context, scheduledTime time.Time, s model } } - // Making the identifier deterministic using the hash of the identifier and scheduled time - executionIdentifier, err := identifier.GetExecutionIdentifier(ctx, core.Identifier{ - Project: s.Project, - Domain: s.Domain, - Name: s.Name, - Version: s.Version, - }, scheduledTime) - - if err != nil { - logger.Errorf(ctx, "failed to generate execution identifier for schedule %+v due to %v", s, err) - return err - } - + executionName := naming.GetExecutionName(time.Now().UnixNano()) executionRequest := &admin.ExecutionCreateRequest{ Project: s.Project, Domain: s.Domain, - Name: "f" + strings.ReplaceAll(executionIdentifier.String(), "-", "")[:19], + Name: executionName, Spec: &admin.ExecutionSpec{ LaunchPlan: &core.Identifier{ ResourceType: core.ResourceType_LAUNCH_PLAN, @@ -97,7 +84,7 @@ func (w *executor) Execute(ctx context.Context, scheduledTime time.Time, s model // Do maximum of 30 retries on failures with constant backoff factor opts := wait.Backoff{Duration: 3000, Factor: 2.0, Steps: 30} - err = retry.OnError(opts, + err := retry.OnError(opts, func(err error) bool { // For idempotent behavior ignore the AlreadyExists error which happens if we try to schedule a launchplan // for execution at the same time which is already available in admin. diff --git a/go.mod b/go.mod index 3a7098d3c0..8fd55ed61a 100644 --- a/go.mod +++ b/go.mod @@ -178,6 +178,7 @@ require ( github.com/tidwall/pretty v1.2.0 // indirect github.com/tidwall/sjson v1.2.5 // indirect github.com/wI2L/jsondiff v0.5.0 // indirect + github.com/wolfeidau/humanhash v1.1.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect diff --git a/go.sum b/go.sum index 05db1b9c1c..ae60f26800 100644 --- a/go.sum +++ b/go.sum @@ -1333,6 +1333,8 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/wI2L/jsondiff v0.5.0 h1:RRMTi/mH+R2aXcPe1VYyvGINJqQfC3R+KSEakuU1Ikw= github.com/wI2L/jsondiff v0.5.0/go.mod h1:qqG6hnK0Lsrz2BpIVCxWiK9ItsBCpIZQiv0izJjOZ9s= +github.com/wolfeidau/humanhash v1.1.0 h1:06KgtyyABJGBbrfMONrW7S+b5TTYVyrNB/jss5n7F3E= +github.com/wolfeidau/humanhash v1.1.0/go.mod h1:jkpynR1bfyfkmKEQudIC0osWKynFAoayRjzH9OJdVIg= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= From b46e203262988aae8dfc792a650b571eaa4cacc7 Mon Sep 17 00:00:00 2001 From: Prafulla Mahindrakar Date: Fri, 30 Aug 2024 12:56:32 -0700 Subject: [PATCH 51/65] Configure imagePullPolicy to be Always pull on flyte sandbox environment (#5709) Signed-off-by: Bugra Gedik --- flyte-single-binary-local.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/flyte-single-binary-local.yaml b/flyte-single-binary-local.yaml index 4cb63e8d4d..e7350212a2 100644 --- a/flyte-single-binary-local.yaml +++ b/flyte-single-binary-local.yaml @@ -57,6 +57,7 @@ plugins: - FLYTE_AWS_ENDPOINT: http://flyte-sandbox-minio.flyte:9000 - FLYTE_AWS_ACCESS_KEY_ID: minio - FLYTE_AWS_SECRET_ACCESS_KEY: miniostorage + image-pull-policy: Always # Helps in better iteration of flytekit changes k8s-array: logs: config: From 2afe101c8eee4675f172338fbb200650f4967ac8 Mon Sep 17 00:00:00 2001 From: Kevin Su Date: Sun, 1 Sep 2024 18:30:33 -0700 Subject: [PATCH 52/65] should not set echo plugin as default (#5713) Signed-off-by: Kevin Su Signed-off-by: Bugra Gedik --- flyteplugins/go/tasks/plugins/testing/echo.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flyteplugins/go/tasks/plugins/testing/echo.go b/flyteplugins/go/tasks/plugins/testing/echo.go index 7c55d3862f..09c4dc53b1 100644 --- a/flyteplugins/go/tasks/plugins/testing/echo.go +++ b/flyteplugins/go/tasks/plugins/testing/echo.go @@ -181,7 +181,7 @@ func init() { taskStartTimes: make(map[string]time.Time), }, nil }, - IsDefault: true, + IsDefault: false, }, ) } From 9f630b8d2037eabbe90110900e8c9a8ea7319518 Mon Sep 17 00:00:00 2001 From: Wei-Yu Kao <115421902+wayner0628@users.noreply.github.com> Date: Mon, 2 Sep 2024 16:20:06 +0800 Subject: [PATCH 53/65] Move default execution name generation to flyteadmin (#5714) Signed-off-by: wayner0628 Signed-off-by: Bugra Gedik --- flytectl/cmd/create/execution_util.go | 5 ----- flytectl/go.mod | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/flytectl/cmd/create/execution_util.go b/flytectl/cmd/create/execution_util.go index 7ebb757a29..bcb5c5639f 100644 --- a/flytectl/cmd/create/execution_util.go +++ b/flytectl/cmd/create/execution_util.go @@ -4,14 +4,12 @@ import ( "context" "fmt" "io/ioutil" - "strings" cmdCore "github.com/flyteorg/flyte/flytectl/cmd/core" cmdGet "github.com/flyteorg/flyte/flytectl/cmd/get" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flytestdlib/logger" - "github.com/google/uuid" "sigs.k8s.io/yaml" ) @@ -149,9 +147,6 @@ func recoverExecution(ctx context.Context, executionName string, project string, func createExecutionRequest(ID *core.Identifier, inputs *core.LiteralMap, envs *admin.Envs, securityContext *core.SecurityContext, authRole *admin.AuthRole, targetExecName string, targetExecutionCluster string) *admin.ExecutionCreateRequest { - if len(targetExecName) == 0 { - targetExecName = "f" + strings.ReplaceAll(uuid.New().String(), "-", "")[:19] - } var clusterAssignment *admin.ClusterAssignment if executionConfig.ClusterPool != "" { clusterAssignment = &admin.ClusterAssignment{ClusterPoolName: executionConfig.ClusterPool} diff --git a/flytectl/go.mod b/flytectl/go.mod index 9e4baeea63..0298ad38f0 100644 --- a/flytectl/go.mod +++ b/flytectl/go.mod @@ -21,7 +21,6 @@ require ( github.com/go-ozzo/ozzo-validation/v4 v4.3.0 github.com/golang/protobuf v1.5.3 github.com/google/go-github/v42 v42.0.0 - github.com/google/uuid v1.6.0 github.com/hashicorp/go-version v1.3.0 github.com/hexops/gotextdiff v1.0.3 github.com/kataras/tablewriter v0.0.0-20180708051242-e063d29b7c23 @@ -102,6 +101,7 @@ require ( github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.7 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect From 2dc91ca9899d166ab1d715619b6c90cab5c13b1b Mon Sep 17 00:00:00 2001 From: Jason Parraga Date: Mon, 2 Sep 2024 21:10:05 -0700 Subject: [PATCH 54/65] Update helm/docs per changes in supported task discovery (#5694) * Update helm/docs per changes in supported task discovery Signed-off-by: Jason Parraga * Cleanup flyte-binary Signed-off-by: Jason Parraga * fixes Signed-off-by: Jason Parraga --------- Signed-off-by: Jason Parraga Signed-off-by: Bugra Gedik --- charts/flyte-binary/README.md | 1 - charts/flyte-binary/values.yaml | 2 -- charts/flyte-core/README.md | 4 ++-- charts/flyte-core/values.yaml | 8 +++----- .../manifests/complete-agent.yaml | 8 +++----- docker/sandbox-bundled/manifests/complete.yaml | 4 ++-- docker/sandbox-bundled/manifests/dev.yaml | 4 ++-- docs/deployment/agents/airflow.rst | 11 +---------- docs/deployment/agents/bigquery.rst | 11 +---------- docs/deployment/agents/chatgpt.rst | 6 +----- docs/deployment/agents/databricks.rst | 18 +++--------------- docs/deployment/agents/mmcloud.rst | 2 -- docs/deployment/agents/openai_batch.rst | 12 ++---------- docs/deployment/agents/sagemaker_inference.rst | 14 ++------------ docs/deployment/agents/sensor.rst | 11 +---------- docs/deployment/agents/snowflake.rst | 11 +---------- .../generated/flytepropeller_config.rst | 3 --- docs/flyte_agents/developing_agents.md | 6 +----- 18 files changed, 25 insertions(+), 111 deletions(-) diff --git a/charts/flyte-binary/README.md b/charts/flyte-binary/README.md index cc39b38cdc..43e0a71437 100644 --- a/charts/flyte-binary/README.md +++ b/charts/flyte-binary/README.md @@ -25,7 +25,6 @@ Chart for basic single Flyte executable deployment | configuration.agentService.defaultAgent.endpoint | string | `"dns:///flyteagent.flyte.svc.cluster.local:8000"` | | | configuration.agentService.defaultAgent.insecure | bool | `true` | | | configuration.agentService.defaultAgent.timeouts.GetTask | string | `"10s"` | | -| configuration.agentService.supportedTaskTypes[0] | string | `"default_task"` | | | configuration.annotations | object | `{}` | | | configuration.auth.authorizedUris | list | `[]` | | | configuration.auth.clientSecretsExternalSecretRef | string | `""` | | diff --git a/charts/flyte-binary/values.yaml b/charts/flyte-binary/values.yaml index 0f7261d86e..3f87556c3e 100644 --- a/charts/flyte-binary/values.yaml +++ b/charts/flyte-binary/values.yaml @@ -168,8 +168,6 @@ configuration: timeouts: GetTask: 10s defaultTimeout: 10s - supportedTaskTypes: - - default_task # externalConfigMap Specify an existing, external ConfigMap to use as configuration for Flyte # If set, no Flyte configuration will be generated by this chart externalConfigMap: "" diff --git a/charts/flyte-core/README.md b/charts/flyte-core/README.md index 673ba7b6ef..ba11205264 100644 --- a/charts/flyte-core/README.md +++ b/charts/flyte-core/README.md @@ -195,11 +195,11 @@ helm install gateway bitnami/contour -n flyte | flyteadmin.serviceMonitor.scrapeTimeout | string | `"30s"` | Sets the timeout after which request to scrape metrics will time out | | flyteadmin.tolerations | list | `[]` | tolerations for Flyteadmin deployment | | flyteagent.enabled | bool | `false` | | -| flyteagent.plugin_config.plugins.agent-service | object | `{"defaultAgent":{"endpoint":"dns:///flyteagent.flyte.svc.cluster.local:8000","insecure":true},"supportedTaskTypes":["sensor"]}` | Agent service configuration for propeller. | +| flyteagent.plugin_config.plugins.agent-service | object | `{"defaultAgent":{"endpoint":"dns:///flyteagent.flyte.svc.cluster.local:8000","insecure":true},"supportedTaskTypes":[]}` | Agent service configuration for propeller. | | flyteagent.plugin_config.plugins.agent-service.defaultAgent | object | `{"endpoint":"dns:///flyteagent.flyte.svc.cluster.local:8000","insecure":true}` | The default agent service to use for plugin tasks. | | flyteagent.plugin_config.plugins.agent-service.defaultAgent.endpoint | string | `"dns:///flyteagent.flyte.svc.cluster.local:8000"` | The agent service endpoint propeller should connect to. | | flyteagent.plugin_config.plugins.agent-service.defaultAgent.insecure | bool | `true` | Whether the connection from propeller to the agent service should use TLS. | -| flyteagent.plugin_config.plugins.agent-service.supportedTaskTypes | list | `["sensor"]` | The task types supported by the default agent. | +| flyteagent.plugin_config.plugins.agent-service.supportedTaskTypes | list | `[]` | The task types supported by the default agent. As of #5460 these are discovered automatically and don't need to be configured. | | flyteagent.podLabels | object | `{}` | Labels for flyteagent pods | | flyteconsole.affinity | object | `{}` | affinity for Flyteconsole deployment | | flyteconsole.enabled | bool | `true` | | diff --git a/charts/flyte-core/values.yaml b/charts/flyte-core/values.yaml index 93c0d9b389..7442222e9b 100755 --- a/charts/flyte-core/values.yaml +++ b/charts/flyte-core/values.yaml @@ -287,11 +287,9 @@ flyteagent: endpoint: "dns:///flyteagent.flyte.svc.cluster.local:8000" # -- Whether the connection from propeller to the agent service should use TLS. insecure: true - # -- The task types supported by the default agent. - supportedTaskTypes: - - sensor - # -- Uncomment to enable task type that uses Flyte Agent - # - bigquery_query_job_task + # -- The task types supported by the default agent. As of #5460 these are discovered automatically and don't + # need to be configured. + supportedTaskTypes: [] # -- Labels for flyteagent pods podLabels: {} diff --git a/docker/sandbox-bundled/manifests/complete-agent.yaml b/docker/sandbox-bundled/manifests/complete-agent.yaml index 78a678ae34..1279b64c60 100644 --- a/docker/sandbox-bundled/manifests/complete-agent.yaml +++ b/docker/sandbox-bundled/manifests/complete-agent.yaml @@ -485,8 +485,6 @@ data: insecure: true timeouts: GetTask: 10s - supportedTaskTypes: - - default_task 002-database.yaml: | database: postgres: @@ -818,7 +816,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: cWlOc1c1bnl5ZGI3YTlzSw== + haSharedSecret: WVl4Y1pFNm1JTkxOMjJpZQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1249,7 +1247,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: 6e8a4cc6177037f26cee65d09c37c010437ea3f0989a2a2dfef380fed9f468c2 + checksum/configuration: f746817691b502fb50f04992e148847f89c0a32d7df822dbda7f1f5fdf84f420 checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1415,7 +1413,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 7f8247a0b84f43018fdf11a598132b8a67ed9fde6573ffce801b725a6f955012 + checksum/secret: f0211dde23276e14b7254edd85e3afd32044562e409b56d79b27202e23dc224c labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/complete.yaml b/docker/sandbox-bundled/manifests/complete.yaml index 5d46b89edf..e9e3007dc9 100644 --- a/docker/sandbox-bundled/manifests/complete.yaml +++ b/docker/sandbox-bundled/manifests/complete.yaml @@ -798,7 +798,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: UUxqaW5SeGlBbFNlQzVoag== + haSharedSecret: Q3BDekMwb0JHM1pxaXAycg== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1362,7 +1362,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: bea0c8f293b54e309a353e0e8563e709ad817d372d2b1dce1114188693aa3f12 + checksum/secret: 65e52b61d9d5af16c4217a1ea9f625d8d18e3251ede28f012cc14ef665412685 labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/dev.yaml b/docker/sandbox-bundled/manifests/dev.yaml index 917645af33..d0983c6f0d 100644 --- a/docker/sandbox-bundled/manifests/dev.yaml +++ b/docker/sandbox-bundled/manifests/dev.yaml @@ -499,7 +499,7 @@ metadata: --- apiVersion: v1 data: - haSharedSecret: ZmdJNWs5RUg4cWNVTVBzRw== + haSharedSecret: RU9mMGpZWGNCcnp4cEJ3bg== proxyPassword: "" proxyUsername: "" kind: Secret @@ -934,7 +934,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: a896f2c43dff6c05c154b51e4c9ec21c9e2f03ecaf4c1fed045d84523219cf63 + checksum/secret: 90ba33230f60ed1ee81e3088a4c88b0c9408c36ad5d40270cf05f503149f25a8 labels: app: docker-registry release: flyte-sandbox diff --git a/docs/deployment/agents/airflow.rst b/docs/deployment/agents/airflow.rst index ad6a6dab36..174967e20c 100644 --- a/docs/deployment/agents/airflow.rst +++ b/docs/deployment/agents/airflow.rst @@ -21,7 +21,7 @@ Specify agent configuration kubectl edit configmap flyte-sandbox-config -n flyte .. code-block:: yaml - :emphasize-lines: 7,11,16 + :emphasize-lines: 7,11 tasks: task-plugins: @@ -35,11 +35,6 @@ Specify agent configuration - container_array: k8s-array - airflow: agent-service - plugins: - agent-service: - supportedTaskTypes: - - airflow - .. group-tab:: Flyte core Create a file named ``values-override.yaml`` and add the following configuration to it. @@ -63,10 +58,6 @@ Specify agent configuration sidecar: sidecar container_array: k8s-array airflow: agent-service - plugins: - agent-service: - supportedTaskTypes: - - airflow Upgrade the Flyte Helm release diff --git a/docs/deployment/agents/bigquery.rst b/docs/deployment/agents/bigquery.rst index d3e4ee490e..0d30d0d3b3 100644 --- a/docs/deployment/agents/bigquery.rst +++ b/docs/deployment/agents/bigquery.rst @@ -29,7 +29,7 @@ Specify agent configuration kubectl edit configmap flyte-sandbox-config -n flyte .. code-block:: yaml - :emphasize-lines: 7,11,16 + :emphasize-lines: 7,11 tasks: task-plugins: @@ -42,11 +42,6 @@ Specify agent configuration - container: container - container_array: k8s-array - bigquery_query_job_task: agent-service - - plugins: - agent-service: - supportedTaskTypes: - - bigquery_query_job_task .. group-tab:: Flyte core @@ -71,10 +66,6 @@ Specify agent configuration sidecar: sidecar container_array: k8s-array bigquery_query_job_task: agent-service - plugins: - agent-service: - supportedTaskTypes: - - bigquery_query_job_task Ensure that the propeller has the correct service account for BigQuery. diff --git a/docs/deployment/agents/chatgpt.rst b/docs/deployment/agents/chatgpt.rst index afc569222f..cb0b44fa39 100644 --- a/docs/deployment/agents/chatgpt.rst +++ b/docs/deployment/agents/chatgpt.rst @@ -20,7 +20,7 @@ Specify agent configuration kubectl edit configmap flyte-sandbox-config -n flyte .. code-block:: yaml - :emphasize-lines: 7,11,16 + :emphasize-lines: 7,11 tasks: task-plugins: @@ -36,8 +36,6 @@ Specify agent configuration plugins: agent-service: - supportedTaskTypes: - - chatgpt # Configuring the timeout is optional. # Tasks like using ChatGPT with a large model might require a longer time, # so we have the option to adjust the timeout setting here. @@ -70,8 +68,6 @@ Specify agent configuration chatgpt: agent-service plugins: agent-service: - supportedTaskTypes: - - chatgpt # Configuring the timeout is optional. # Tasks like using ChatGPT with a large model might require a longer time, # so we have the option to adjust the timeout setting here. diff --git a/docs/deployment/agents/databricks.rst b/docs/deployment/agents/databricks.rst index b21fab3c57..0458fb3667 100644 --- a/docs/deployment/agents/databricks.rst +++ b/docs/deployment/agents/databricks.rst @@ -139,7 +139,7 @@ Specify agent configuration kubectl edit configmap flyte-sandbox-config -n flyte .. code-block:: yaml - :emphasize-lines: 7,12,16 + :emphasize-lines: 7,12 tasks: task-plugins: @@ -153,17 +153,13 @@ Specify agent configuration - sidecar - k8s-array - agent-service - plugins: - agent-service: - supportedTaskTypes: - - spark .. group-tab:: Helm chart Edit the relevant YAML file to specify the plugin. .. code-block:: yaml - :emphasize-lines: 7,11,15 + :emphasize-lines: 7,11 tasks: task-plugins: @@ -176,17 +172,13 @@ Specify agent configuration - container: container - container_array: k8s-array - spark: agent-service - plugins: - agent-service: - supportedTaskTypes: - - spark .. group-tab:: Flyte core Create a file named ``values-override.yaml`` and add the following config to it: .. code-block:: yaml - :emphasize-lines: 9,14-17 + :emphasize-lines: 9 enabled_plugins: tasks: @@ -201,10 +193,6 @@ Specify agent configuration sidecar: sidecar container_array: k8s-array spark: agent-service - plugins: - agent-service: - supportedTaskTypes: - - spark Add the Databricks access token ------------------------------- diff --git a/docs/deployment/agents/mmcloud.rst b/docs/deployment/agents/mmcloud.rst index 422162af27..9ccb101aa2 100644 --- a/docs/deployment/agents/mmcloud.rst +++ b/docs/deployment/agents/mmcloud.rst @@ -71,8 +71,6 @@ Enable the MMCloud agent by adding the following config to the relevant YAML fil mmcloud-agent: endpoint: insecure: true - supportedTaskTypes: - - mmcloud_task agentForTaskTypes: - mmcloud_task: mmcloud-agent diff --git a/docs/deployment/agents/openai_batch.rst b/docs/deployment/agents/openai_batch.rst index 7aff9d262e..2cfa70471a 100644 --- a/docs/deployment/agents/openai_batch.rst +++ b/docs/deployment/agents/openai_batch.rst @@ -19,7 +19,7 @@ Specify agent configuration kubectl edit configmap flyte-sandbox-config -n flyte .. code-block:: yaml - :emphasize-lines: 7,11,15 + :emphasize-lines: 7,11 tasks: task-plugins: @@ -32,17 +32,13 @@ Specify agent configuration - container: container - container_array: k8s-array - openai-batch: agent-service - plugins: - agent-service: - supportedTaskTypes: - - openai-batch .. group-tab:: Flyte core Create a file named ``values-override.yaml`` and add the following configuration to it: .. code-block:: yaml - :emphasize-lines: 9,14,18 + :emphasize-lines: 9,14 configmap: enabled_plugins: @@ -58,10 +54,6 @@ Specify agent configuration sidecar: sidecar container_array: k8s-array openai-batch: agent-service - plugins: - agent-service: - supportedTaskTypes: - - openai-batch Add the OpenAI API token ------------------------ diff --git a/docs/deployment/agents/sagemaker_inference.rst b/docs/deployment/agents/sagemaker_inference.rst index 5ceb248c2d..3f03e08f55 100644 --- a/docs/deployment/agents/sagemaker_inference.rst +++ b/docs/deployment/agents/sagemaker_inference.rst @@ -19,7 +19,7 @@ Specify agent configuration kubectl edit configmap flyte-sandbox-config -n flyte .. code-block:: yaml - :emphasize-lines: 7,11-12,16-17 + :emphasize-lines: 7,11-12 tasks: task-plugins: @@ -33,18 +33,13 @@ Specify agent configuration - container_array: k8s-array - boto: agent-service - sagemaker-endpoint: agent-service - plugins: - agent-service: - supportedTaskTypes: - - boto - - sagemaker-endpoint .. group-tab:: Flyte core Create a file named ``values-override.yaml`` and add the following configuration to it: .. code-block:: yaml - :emphasize-lines: 9,14-15,19-20 + :emphasize-lines: 9,14-15 configmap: enabled_plugins: @@ -61,11 +56,6 @@ Specify agent configuration container_array: k8s-array boto: agent-service sagemaker-endpoint: agent-service - plugins: - agent-service: - supportedTaskTypes: - - boto - - sagemaker-endpoint AWS credentials --------------- diff --git a/docs/deployment/agents/sensor.rst b/docs/deployment/agents/sensor.rst index 958e5d896a..312e34bcd1 100644 --- a/docs/deployment/agents/sensor.rst +++ b/docs/deployment/agents/sensor.rst @@ -56,7 +56,7 @@ Enable the sensor agent by adding the following config to the relevant YAML file kubectl edit configmap flyte-sandbox-config -n flyte .. code-block:: yaml - :emphasize-lines: 7,11,16 + :emphasize-lines: 7,11 tasks: task-plugins: @@ -69,11 +69,6 @@ Enable the sensor agent by adding the following config to the relevant YAML file - container: container - container_array: k8s-array - sensor: agent-service - - plugins: - agent-service: - supportedTaskTypes: - - sensor .. group-tab:: Flyte core @@ -98,10 +93,6 @@ Enable the sensor agent by adding the following config to the relevant YAML file sidecar: sidecar container_array: k8s-array sensor: agent-service - plugins: - agent-service: - supportedTaskTypes: - - sensor Upgrade the deployment diff --git a/docs/deployment/agents/snowflake.rst b/docs/deployment/agents/snowflake.rst index d6ee74125b..dad62a3795 100644 --- a/docs/deployment/agents/snowflake.rst +++ b/docs/deployment/agents/snowflake.rst @@ -35,7 +35,7 @@ Specify agent configuration kubectl edit configmap flyte-sandbox-config -n flyte .. code-block:: yaml - :emphasize-lines: 7,11,16 + :emphasize-lines: 7,11 tasks: task-plugins: @@ -49,11 +49,6 @@ Specify agent configuration - container_array: k8s-array - snowflake: agent-service - plugins: - agent-service: - supportedTaskTypes: - - snowflake - .. group-tab:: Flyte core Create a file named ``values-override.yaml`` and add the following configuration to it. @@ -77,10 +72,6 @@ Specify agent configuration sidecar: sidecar container_array: k8s-array snowflake: agent-service - plugins: - agent-service: - supportedTaskTypes: - - snowflake Ensure that the propeller has the correct service account for Snowflake. diff --git a/docs/deployment/configuration/generated/flytepropeller_config.rst b/docs/deployment/configuration/generated/flytepropeller_config.rst index be6f7ee7f0..554ebec849 100644 --- a/docs/deployment/configuration/generated/flytepropeller_config.rst +++ b/docs/deployment/configuration/generated/flytepropeller_config.rst @@ -994,9 +994,6 @@ agent-service (`agent.Config`_) Value: 50 ProjectScopeResourceConstraint: Value: 100 - supportedTaskTypes: - - task_type_1 - - task_type_2 webApi: caching: maxSystemFailures: 5 diff --git a/docs/flyte_agents/developing_agents.md b/docs/flyte_agents/developing_agents.md index ee989b812f..fd310088ef 100644 --- a/docs/flyte_agents/developing_agents.md +++ b/docs/flyte_agents/developing_agents.md @@ -197,10 +197,6 @@ you can route particular task requests to designated agent services by adjusting ```yaml plugins: agent-service: - supportedTaskTypes: - - bigquery_query_job_task - - default_task - - custom_task # By default, all requests will be sent to the default agent. defaultAgent: endpoint: "dns:///flyteagent.flyte.svc.cluster.local:8000" @@ -224,4 +220,4 @@ you can route particular task requests to designated agent services by adjusting agentForTaskTypes: # It will override the default agent for custom_task, which means propeller will send the request to this agent. - custom_task: custom_agent -``` \ No newline at end of file +``` From 573516f68122df04134ea9a8a09019689d6311ba Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Tue, 3 Sep 2024 13:11:26 +0800 Subject: [PATCH 55/65] [flyteagent] Add Logging for Agent Supported Task Types (#5718) * Add Logging for Agent-Supported Task Types Signed-off-by: Future-Outlier * use make(map[string]struct{}) Signed-off-by: Future-Outlier --------- Signed-off-by: Future-Outlier Signed-off-by: Bugra Gedik --- flyteplugins/go/tasks/plugins/webapi/agent/client.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/client.go b/flyteplugins/go/tasks/plugins/webapi/agent/client.go index 35e6662107..148113fb38 100644 --- a/flyteplugins/go/tasks/plugins/webapi/agent/client.go +++ b/flyteplugins/go/tasks/plugins/webapi/agent/client.go @@ -3,6 +3,7 @@ package agent import ( "context" "crypto/x509" + "strings" "golang.org/x/exp/maps" "google.golang.org/grpc" @@ -127,19 +128,26 @@ func getAgentRegistry(ctx context.Context, cs *ClientSet) Registry { continue } + agentSupportedTaskCategories := make(map[string]struct{}) for _, agent := range res.GetAgents() { deprecatedSupportedTaskTypes := agent.SupportedTaskTypes for _, supportedTaskType := range deprecatedSupportedTaskTypes { agent := &Agent{AgentDeployment: agentDeployment, IsSync: agent.IsSync} newAgentRegistry[supportedTaskType] = map[int32]*Agent{defaultTaskTypeVersion: agent} + agentSupportedTaskCategories[supportedTaskType] = struct{}{} } supportedTaskCategories := agent.SupportedTaskCategories for _, supportedCategory := range supportedTaskCategories { agent := &Agent{AgentDeployment: agentDeployment, IsSync: agent.IsSync} - newAgentRegistry[supportedCategory.GetName()] = map[int32]*Agent{supportedCategory.GetVersion(): agent} + supportedCategoryName := supportedCategory.GetName() + newAgentRegistry[supportedCategoryName] = map[int32]*Agent{supportedCategory.GetVersion(): agent} + agentSupportedTaskCategories[supportedCategoryName] = struct{}{} } + } + logger.Infof(ctx, "AgentDeployment [%v] supports the following task types: [%v]", agentDeployment.Endpoint, + strings.Join(maps.Keys(agentSupportedTaskCategories), ", ")) } // If the agent doesn't implement the metadata service, we construct the registry based on the configuration @@ -160,6 +168,7 @@ func getAgentRegistry(ctx context.Context, cs *ClientSet) Registry { } } + logger.Infof(ctx, "AgentDeployments support the following task types: [%v]", strings.Join(maps.Keys(newAgentRegistry), ", ")) return newAgentRegistry } From 9070fe7d418a2a1bc561033113df5e58ab03220c Mon Sep 17 00:00:00 2001 From: Samhita Alla Date: Wed, 4 Sep 2024 02:36:46 +0530 Subject: [PATCH 56/65] extend pod customization to include init containers (#5685) * apply pod config to init containers Signed-off-by: Samhita Alla * remove container Signed-off-by: Samhita Alla * add test Signed-off-by: Samhita Alla * Add bool to check init containers Signed-off-by: Eduardo Apolinario --------- Signed-off-by: Samhita Alla Signed-off-by: Eduardo Apolinario Co-authored-by: Eduardo Apolinario Signed-off-by: Bugra Gedik --- .../tasks/pluginmachinery/flytek8s/pod_helper.go | 9 +++++++++ .../go/tasks/plugins/k8s/pod/container_test.go | 16 ++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go index db62aeb4e7..e8252090df 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go @@ -348,6 +348,15 @@ func ApplyFlytePodConfiguration(ctx context.Context, tCtx pluginsCore.TaskExecut IncludeConsoleURL: hasExternalLinkType(taskTemplate), } + // iterate over the initContainers first + for index := range podSpec.InitContainers { + var resourceMode = ResourceCustomizationModeEnsureExistingResourcesInRange + + if err := AddFlyteCustomizationsToContainer(ctx, templateParameters, resourceMode, &podSpec.InitContainers[index]); err != nil { + return nil, nil, err + } + } + resourceRequests := make([]v1.ResourceRequirements, 0, len(podSpec.Containers)) var primaryContainer *v1.Container for index, container := range podSpec.Containers { diff --git a/flyteplugins/go/tasks/plugins/k8s/pod/container_test.go b/flyteplugins/go/tasks/plugins/k8s/pod/container_test.go index 5d89e2f0ec..f9d49b2448 100644 --- a/flyteplugins/go/tasks/plugins/k8s/pod/container_test.go +++ b/flyteplugins/go/tasks/plugins/k8s/pod/container_test.go @@ -49,6 +49,13 @@ func dummyContainerTaskTemplate(command []string, args []string) *core.TaskTempl func dummyContainerTaskTemplateWithPodSpec(command []string, args []string) *core.TaskTemplate { podSpec := v1.PodSpec{ + InitContainers: []v1.Container{ + v1.Container{ + Name: "test-image", + Command: command, + Args: args, + }, + }, Containers: []v1.Container{ v1.Container{ Name: "test-image", @@ -174,24 +181,28 @@ func TestContainerTaskExecutor_BuildResource(t *testing.T) { taskTemplate *core.TaskTemplate taskMetadata pluginsCore.TaskExecutionMetadata expectServiceAccount string + checkInitContainer bool }{ { name: "BuildResource", taskTemplate: dummyContainerTaskTemplate(command, args), taskMetadata: dummyContainerTaskMetadata(containerResourceRequirements, nil, true, ""), expectServiceAccount: serviceAccount, + checkInitContainer: false, }, { name: "BuildResource_PodTemplate", taskTemplate: dummyContainerTaskTemplateWithPodSpec(command, args), taskMetadata: dummyContainerTaskMetadata(containerResourceRequirements, nil, true, ""), expectServiceAccount: podTemplateServiceAccount, + checkInitContainer: true, }, { name: "BuildResource_SecurityContext", taskTemplate: dummyContainerTaskTemplate(command, args), taskMetadata: dummyContainerTaskMetadata(containerResourceRequirements, nil, false, ""), expectServiceAccount: securityContextServiceAccount, + checkInitContainer: false, }, } for _, tc := range testCases { @@ -213,6 +224,11 @@ func TestContainerTaskExecutor_BuildResource(t *testing.T) { assert.Equal(t, command, j.Spec.Containers[0].Command) assert.Equal(t, []string{"test-data-reference"}, j.Spec.Containers[0].Args) + if tc.checkInitContainer { + assert.Equal(t, command, j.Spec.InitContainers[0].Command) + assert.Equal(t, []string{"test-data-reference"}, j.Spec.InitContainers[0].Args) + } + assert.Equal(t, tc.expectServiceAccount, j.Spec.ServiceAccountName) }) } From 4a7cb55fe7b7a875e7d372e08ce2b74d804b58bf Mon Sep 17 00:00:00 2001 From: Nikki Everett Date: Thu, 5 Sep 2024 01:58:20 -0500 Subject: [PATCH 57/65] Update "Try Serverless" language in Quickstart guide (#5698) * update langauge Signed-off-by: nikki everett * Update docs/quickstart_guide.md Co-authored-by: Haytham Abuelfutuh Signed-off-by: Nikki Everett --------- Signed-off-by: nikki everett Signed-off-by: Nikki Everett Co-authored-by: Haytham Abuelfutuh Signed-off-by: Bugra Gedik --- docs/quickstart_guide.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/quickstart_guide.md b/docs/quickstart_guide.md index e2e03cc8c5..244f0934c6 100644 --- a/docs/quickstart_guide.md +++ b/docs/quickstart_guide.md @@ -6,12 +6,12 @@ In this guide, you will create and run a Flyte workflow in a local Python enviro ````{dropdown} Try Flyte in your browser :animate: fade-in-slide-down -Union Serverless is a version of Flyte hosted by [Union](https://www.union.ai/) that you can try in your browser. Sign up for the waitlist below and check out the [Union Serverless Quickstart guide](https://docs.union.ai/serverless/quick-start). +Union Serverless is a SaaS offering by [Union](https://www.union.ai/) built on Flyte. Sign up below and check out the [Union Serverless Quickstart guide](https://docs.union.ai/serverless/quick-start). ```{button-link} https://signup.union.ai/ :color: warning -Join the Union Serverless waitlist +Sign up for Union Serverless ``` ```` From 8c303f4d74f4931ca364295ecfa13265deabe758 Mon Sep 17 00:00:00 2001 From: Jason Parraga Date: Thu, 5 Sep 2024 04:46:10 -0700 Subject: [PATCH 58/65] Refactor flyteadmin to pass proto structs as pointers (#5717) Signed-off-by: Bugra Gedik --- flyteadmin/.golangci.yml | 5 +- flyteadmin/dataproxy/service.go | 32 ++-- flyteadmin/dataproxy/service_test.go | 16 +- .../implementations/cloudevent_publisher.go | 12 +- .../node_execution_event_writer.go | 6 +- .../node_execution_event_writer_test.go | 2 +- .../workflow_execution_event_writer.go | 6 +- .../workflow_execution_event_writer_test.go | 2 +- .../async/events/interfaces/node_execution.go | 2 +- .../events/interfaces/workflow_execution.go | 2 +- .../mocks/node_execution_event_writer.go | 2 +- .../mocks/workflow_execution_event_writer.go | 2 +- flyteadmin/pkg/async/notifications/email.go | 34 ++-- .../pkg/async/notifications/email_test.go | 10 +- .../implementations/aws_emailer.go | 4 +- .../implementations/aws_emailer_test.go | 8 +- .../implementations/aws_processor.go | 4 +- .../implementations/aws_processor_test.go | 4 +- .../implementations/gcp_processor.go | 4 +- .../implementations/gcp_processor_test.go | 4 +- .../implementations/noop_notifications.go | 2 +- .../implementations/sandbox_processor.go | 4 +- .../implementations/sandbox_processor_test.go | 6 +- .../implementations/sendgrid_emailer.go | 4 +- .../implementations/sendgrid_emailer_test.go | 2 +- .../async/notifications/interfaces/emailer.go | 2 +- .../async/notifications/mocks/processor.go | 4 +- .../schedule/aws/cloud_watch_scheduler.go | 12 +- .../aws/cloud_watch_scheduler_test.go | 16 +- .../pkg/async/schedule/aws/serialization.go | 8 +- .../async/schedule/aws/serialization_test.go | 4 +- flyteadmin/pkg/async/schedule/aws/shared.go | 2 +- .../pkg/async/schedule/aws/shared_test.go | 2 +- .../async/schedule/aws/workflow_executor.go | 24 +-- .../schedule/aws/workflow_executor_test.go | 30 +-- .../schedule/interfaces/event_scheduler.go | 8 +- .../schedule/mocks/mock_event_scheduler.go | 6 +- .../async/schedule/noop/event_scheduler.go | 2 +- flyteadmin/pkg/common/flyte_url.go | 8 +- flyteadmin/pkg/common/flyte_url_test.go | 8 +- .../data/implementations/aws_remote_url.go | 10 +- .../data/implementations/gcp_remote_url.go | 10 +- .../data/implementations/noop_remote_url.go | 6 +- flyteadmin/pkg/data/interfaces/remote.go | 2 +- flyteadmin/pkg/data/mocks/remote.go | 6 +- .../impl/description_entity_manager.go | 6 +- .../impl/description_entity_manager_test.go | 16 +- .../pkg/manager/impl/execution_manager.go | 108 +++++------ .../manager/impl/execution_manager_test.go | 174 +++++++++--------- .../executions/quality_of_service_test.go | 2 +- .../pkg/manager/impl/executions/queues.go | 4 +- .../manager/impl/executions/queues_test.go | 16 +- .../pkg/manager/impl/launch_plan_manager.go | 58 +++--- .../manager/impl/launch_plan_manager_test.go | 92 ++++----- .../pkg/manager/impl/metrics_manager.go | 28 +-- .../pkg/manager/impl/metrics_manager_test.go | 10 +- .../pkg/manager/impl/named_entity_manager.go | 12 +- .../manager/impl/named_entity_manager_test.go | 12 +- .../manager/impl/node_execution_manager.go | 34 ++-- .../impl/node_execution_manager_test.go | 88 ++++----- .../pkg/manager/impl/project_manager.go | 18 +- .../pkg/manager/impl/project_manager_test.go | 30 +-- .../impl/resources/resource_manager.go | 40 ++-- .../impl/resources/resource_manager_test.go | 34 ++-- flyteadmin/pkg/manager/impl/signal_manager.go | 10 +- .../pkg/manager/impl/signal_manager_test.go | 20 +- .../manager/impl/task_execution_manager.go | 28 +-- .../impl/task_execution_manager_test.go | 24 +-- flyteadmin/pkg/manager/impl/task_manager.go | 20 +- .../pkg/manager/impl/task_manager_test.go | 16 +- .../manager/impl/testutils/mock_requests.go | 39 ++-- flyteadmin/pkg/manager/impl/util/data.go | 16 +- flyteadmin/pkg/manager/impl/util/data_test.go | 34 ++-- flyteadmin/pkg/manager/impl/util/filters.go | 6 +- .../pkg/manager/impl/util/filters_test.go | 4 +- flyteadmin/pkg/manager/impl/util/shared.go | 30 +-- .../pkg/manager/impl/util/shared_test.go | 72 ++++---- .../impl/util/single_task_execution.go | 22 +-- .../impl/util/single_task_execution_test.go | 8 +- .../impl/validation/attributes_validator.go | 16 +- .../validation/attributes_validator_test.go | 50 ++--- .../impl/validation/execution_validator.go | 4 +- .../validation/execution_validator_test.go | 12 +- .../impl/validation/launch_plan_validator.go | 4 +- .../validation/launch_plan_validator_test.go | 12 +- .../impl/validation/named_entity_validator.go | 6 +- .../validation/named_entity_validator_test.go | 36 ++-- .../validation/node_execution_validator.go | 4 +- .../node_execution_validator_test.go | 14 +- .../impl/validation/project_validator.go | 8 +- .../impl/validation/project_validator_test.go | 44 ++--- .../impl/validation/signal_validator.go | 12 +- .../impl/validation/signal_validator_test.go | 26 +-- .../validation/task_execution_validator.go | 4 +- .../task_execution_validator_test.go | 16 +- .../manager/impl/validation/task_validator.go | 16 +- .../impl/validation/task_validator_test.go | 14 +- .../pkg/manager/impl/validation/validation.go | 12 +- .../impl/validation/validation_test.go | 54 +++--- .../impl/validation/workflow_validator.go | 6 +- .../validation/workflow_validator_test.go | 4 +- .../pkg/manager/impl/workflow_manager.go | 40 ++-- .../pkg/manager/impl/workflow_manager_test.go | 30 +-- .../manager/interfaces/description_entity.go | 4 +- .../pkg/manager/interfaces/execution.go | 18 +- .../pkg/manager/interfaces/launch_plan.go | 14 +- flyteadmin/pkg/manager/interfaces/metrics.go | 2 +- .../pkg/manager/interfaces/named_entity.go | 6 +- .../pkg/manager/interfaces/node_execution.go | 12 +- flyteadmin/pkg/manager/interfaces/project.go | 10 +- flyteadmin/pkg/manager/interfaces/resource.go | 20 +- flyteadmin/pkg/manager/interfaces/signal.go | 6 +- flyteadmin/pkg/manager/interfaces/task.go | 8 +- .../pkg/manager/interfaces/task_execution.go | 8 +- flyteadmin/pkg/manager/interfaces/workflow.go | 8 +- flyteadmin/pkg/manager/mocks/execution.go | 36 ++-- flyteadmin/pkg/manager/mocks/launch_plan.go | 28 +-- .../pkg/manager/mocks/metrics_interface.go | 8 +- flyteadmin/pkg/manager/mocks/named_entity.go | 12 +- .../pkg/manager/mocks/node_execution.go | 22 +-- flyteadmin/pkg/manager/mocks/project.go | 20 +- flyteadmin/pkg/manager/mocks/resource.go | 34 ++-- .../pkg/manager/mocks/signal_interface.go | 48 ++--- flyteadmin/pkg/manager/mocks/task.go | 12 +- .../pkg/manager/mocks/task_execution.go | 16 +- flyteadmin/pkg/manager/mocks/workflow.go | 12 +- .../gormimpl/node_execution_repo_test.go | 8 +- .../gormimpl/task_execution_repo_test.go | 2 +- .../interfaces/node_execution_repo.go | 2 +- .../interfaces/task_execution_repo.go | 2 +- .../transformers/description_entity.go | 2 +- .../transformers/description_entity_test.go | 2 +- .../repositories/transformers/execution.go | 4 +- .../transformers/execution_event.go | 2 +- .../transformers/execution_event_test.go | 2 +- .../transformers/execution_test.go | 30 +-- .../repositories/transformers/launch_plan.go | 8 +- .../transformers/launch_plan_test.go | 12 +- .../transformers/node_execution_event.go | 2 +- .../transformers/node_execution_event_test.go | 2 +- .../pkg/repositories/transformers/project.go | 8 +- .../repositories/transformers/project_test.go | 2 +- .../pkg/repositories/transformers/resource.go | 16 +- .../transformers/resource_test.go | 10 +- .../pkg/repositories/transformers/signal.go | 10 +- .../repositories/transformers/signal_test.go | 14 +- .../pkg/repositories/transformers/task.go | 6 +- .../repositories/transformers/task_test.go | 2 +- .../pkg/repositories/transformers/workflow.go | 2 +- flyteadmin/pkg/rpc/adminservice/attributes.go | 56 +----- .../rpc/adminservice/description_entity.go | 13 +- flyteadmin/pkg/rpc/adminservice/execution.go | 53 +----- .../pkg/rpc/adminservice/launch_plan.go | 39 +--- .../pkg/rpc/adminservice/named_entity.go | 21 +-- .../pkg/rpc/adminservice/node_execution.go | 34 +--- flyteadmin/pkg/rpc/adminservice/project.go | 28 +-- flyteadmin/pkg/rpc/adminservice/task.go | 23 +-- .../pkg/rpc/adminservice/task_execution.go | 26 +-- .../rpc/adminservice/tests/execution_test.go | 41 ++--- .../adminservice/tests/launch_plan_test.go | 12 +- .../adminservice/tests/node_execution_test.go | 18 +- .../adminservice/tests/project_domain_test.go | 8 +- .../rpc/adminservice/tests/project_test.go | 10 +- .../adminservice/tests/task_execution_test.go | 34 ++-- .../pkg/rpc/adminservice/tests/task_test.go | 10 +- .../rpc/adminservice/tests/workflow_test.go | 4 +- flyteadmin/pkg/rpc/adminservice/workflow.go | 24 +-- flyteadmin/pkg/rpc/signal_service.go | 17 +- flyteadmin/pkg/rpc/signal_service_test.go | 36 ---- .../interfaces/application_configuration.go | 4 +- .../mocks/quality_of_service_configuration.go | 10 +- .../quality_of_service_configuration.go | 2 +- .../mocks/mock_configuration_provider.go | 2 +- .../runtime/quality_of_service_provider.go | 6 +- .../workflowengine/impl/interface_provider.go | 20 +- .../impl/interface_provider_test.go | 6 +- .../workflowengine/impl/prepare_execution.go | 5 +- .../scheduler/dbapi/event_scheduler_impl.go | 4 +- .../dbapi/event_scheduler_impl_test.go | 16 +- flyteadmin/scheduler/identifier/identifier.go | 8 +- flyteadmin/tests/task_test.go | 24 +-- flyteadmin/tests/workflow_test.go | 2 +- flytectl/cmd/compile/compile.go | 4 +- flytepropeller/pkg/compiler/admin.go | 14 +- flytepropeller/pkg/compiler/admin_test.go | 4 +- flytepropeller/pkg/compiler/builders.go | 4 +- flytepropeller/pkg/compiler/common/id_set.go | 2 +- flytepropeller/pkg/compiler/common/index.go | 2 +- .../pkg/compiler/common/mocks/task.go | 12 +- .../pkg/compiler/common/mocks/workflow.go | 32 ++-- .../compiler/common/mocks/workflow_builder.go | 34 ++-- flytepropeller/pkg/compiler/requirements.go | 4 +- .../pkg/compiler/validators/bindings_test.go | 8 +- .../pkg/compiler/validators/interface.go | 6 +- .../pkg/compiler/validators/interface_test.go | 12 +- .../pkg/compiler/validators/node.go | 4 +- .../pkg/compiler/workflow_compiler_test.go | 4 +- .../nodes/dynamic/dynamic_workflow.go | 5 +- 198 files changed, 1472 insertions(+), 1733 deletions(-) diff --git a/flyteadmin/.golangci.yml b/flyteadmin/.golangci.yml index 76977148dc..4dbb031812 100644 --- a/flyteadmin/.golangci.yml +++ b/flyteadmin/.golangci.yml @@ -36,5 +36,6 @@ linters-settings: - prefix(github.com/flyteorg) skip-generated: true issues: - exclude: - - copylocks + exclude-rules: + - path: pkg/workflowengine/impl/prepare_execution.go + text: "copies lock" diff --git a/flyteadmin/dataproxy/service.go b/flyteadmin/dataproxy/service.go index 5bb7a16632..d61998835f 100644 --- a/flyteadmin/dataproxy/service.go +++ b/flyteadmin/dataproxy/service.go @@ -161,7 +161,7 @@ func (s Service) CreateDownloadLink(ctx context.Context, req *service.CreateDown // Lookup task, node, workflow execution var nativeURL string if nodeExecutionIDEnvelope, casted := req.GetSource().(*service.CreateDownloadLinkRequest_NodeExecutionId); casted { - node, err := s.nodeExecutionManager.GetNodeExecution(ctx, admin.NodeExecutionGetRequest{ + node, err := s.nodeExecutionManager.GetNodeExecution(ctx, &admin.NodeExecutionGetRequest{ Id: nodeExecutionIDEnvelope.NodeExecutionId, }) @@ -309,9 +309,9 @@ func (s Service) validateResolveArtifactRequest(req *service.GetDataRequest) err // GetCompleteTaskExecutionID returns the task execution identifier for the task execution with the Task ID filled in. // The one coming from the node execution doesn't have this as this is not data encapsulated in the flyte url. -func (s Service) GetCompleteTaskExecutionID(ctx context.Context, taskExecID core.TaskExecutionIdentifier) (*core.TaskExecutionIdentifier, error) { +func (s Service) GetCompleteTaskExecutionID(ctx context.Context, taskExecID *core.TaskExecutionIdentifier) (*core.TaskExecutionIdentifier, error) { - taskExecs, err := s.taskExecutionManager.ListTaskExecutions(ctx, admin.TaskExecutionListRequest{ + taskExecs, err := s.taskExecutionManager.ListTaskExecutions(ctx, &admin.TaskExecutionListRequest{ NodeExecutionId: taskExecID.GetNodeExecutionId(), Limit: 1, Filters: fmt.Sprintf("eq(retry_attempt,%s)", strconv.Itoa(int(taskExecID.RetryAttempt))), @@ -326,9 +326,9 @@ func (s Service) GetCompleteTaskExecutionID(ctx context.Context, taskExecID core return taskExec.Id, nil } -func (s Service) GetTaskExecutionID(ctx context.Context, attempt int, nodeExecID core.NodeExecutionIdentifier) (*core.TaskExecutionIdentifier, error) { - taskExecs, err := s.taskExecutionManager.ListTaskExecutions(ctx, admin.TaskExecutionListRequest{ - NodeExecutionId: &nodeExecID, +func (s Service) GetTaskExecutionID(ctx context.Context, attempt int, nodeExecID *core.NodeExecutionIdentifier) (*core.TaskExecutionIdentifier, error) { + taskExecs, err := s.taskExecutionManager.ListTaskExecutions(ctx, &admin.TaskExecutionListRequest{ + NodeExecutionId: nodeExecID, Limit: 1, Filters: fmt.Sprintf("eq(retry_attempt,%s)", strconv.Itoa(attempt)), }) @@ -342,11 +342,11 @@ func (s Service) GetTaskExecutionID(ctx context.Context, attempt int, nodeExecID return taskExec.Id, nil } -func (s Service) GetDataFromNodeExecution(ctx context.Context, nodeExecID core.NodeExecutionIdentifier, ioType common.ArtifactType, name string) ( +func (s Service) GetDataFromNodeExecution(ctx context.Context, nodeExecID *core.NodeExecutionIdentifier, ioType common.ArtifactType, name string) ( *service.GetDataResponse, error) { - resp, err := s.nodeExecutionManager.GetNodeExecutionData(ctx, admin.NodeExecutionGetDataRequest{ - Id: &nodeExecID, + resp, err := s.nodeExecutionManager.GetNodeExecutionData(ctx, &admin.NodeExecutionGetDataRequest{ + Id: nodeExecID, }) if err != nil { return nil, err @@ -361,7 +361,7 @@ func (s Service) GetDataFromNodeExecution(ctx context.Context, nodeExecID core.N // Assume deck, and create a download link request dlRequest := service.CreateDownloadLinkRequest{ ArtifactType: service.ArtifactType_ARTIFACT_TYPE_DECK, - Source: &service.CreateDownloadLinkRequest_NodeExecutionId{NodeExecutionId: &nodeExecID}, + Source: &service.CreateDownloadLinkRequest_NodeExecutionId{NodeExecutionId: nodeExecID}, } resp, err := s.CreateDownloadLink(ctx, &dlRequest) if err != nil { @@ -391,12 +391,12 @@ func (s Service) GetDataFromNodeExecution(ctx context.Context, nodeExecID core.N }, nil } -func (s Service) GetDataFromTaskExecution(ctx context.Context, taskExecID core.TaskExecutionIdentifier, ioType common.ArtifactType, name string) ( +func (s Service) GetDataFromTaskExecution(ctx context.Context, taskExecID *core.TaskExecutionIdentifier, ioType common.ArtifactType, name string) ( *service.GetDataResponse, error) { var lm *core.LiteralMap - reqT := admin.TaskExecutionGetDataRequest{ - Id: &taskExecID, + reqT := &admin.TaskExecutionGetDataRequest{ + Id: taskExecID, } resp, err := s.taskExecutionManager.GetTaskExecutionData(ctx, reqT) if err != nil { @@ -445,13 +445,13 @@ func (s Service) GetData(ctx context.Context, req *service.GetDataRequest) ( } if execution.NodeExecID != nil { - return s.GetDataFromNodeExecution(ctx, *execution.NodeExecID, execution.IOType, execution.LiteralName) + return s.GetDataFromNodeExecution(ctx, execution.NodeExecID, execution.IOType, execution.LiteralName) } else if execution.PartialTaskExecID != nil { - taskExecID, err := s.GetCompleteTaskExecutionID(ctx, *execution.PartialTaskExecID) + taskExecID, err := s.GetCompleteTaskExecutionID(ctx, execution.PartialTaskExecID) if err != nil { return nil, err } - return s.GetDataFromTaskExecution(ctx, *taskExecID, execution.IOType, execution.LiteralName) + return s.GetDataFromTaskExecution(ctx, taskExecID, execution.IOType, execution.LiteralName) } return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "failed to parse get data request %v", req) diff --git a/flyteadmin/dataproxy/service_test.go b/flyteadmin/dataproxy/service_test.go index 3716b98914..81193e106b 100644 --- a/flyteadmin/dataproxy/service_test.go +++ b/flyteadmin/dataproxy/service_test.go @@ -160,7 +160,7 @@ func TestCreateUploadLocationMore(t *testing.T) { func TestCreateDownloadLink(t *testing.T) { dataStore := commonMocks.GetMockStorageClient() nodeExecutionManager := &mocks.MockNodeExecutionManager{} - nodeExecutionManager.SetGetNodeExecutionFunc(func(ctx context.Context, request admin.NodeExecutionGetRequest) (*admin.NodeExecution, error) { + nodeExecutionManager.SetGetNodeExecutionFunc(func(ctx context.Context, request *admin.NodeExecutionGetRequest) (*admin.NodeExecution, error) { return &admin.NodeExecution{ Closure: &admin.NodeExecutionClosure{ DeckUri: "s3://something/something", @@ -282,14 +282,14 @@ func TestService_GetData(t *testing.T) { } nodeExecutionManager.SetGetNodeExecutionDataFunc( - func(ctx context.Context, request admin.NodeExecutionGetDataRequest) (*admin.NodeExecutionGetDataResponse, error) { + func(ctx context.Context, request *admin.NodeExecutionGetDataRequest) (*admin.NodeExecutionGetDataResponse, error) { return &admin.NodeExecutionGetDataResponse{ FullInputs: inputsLM, FullOutputs: outputsLM, }, nil }, ) - taskExecutionManager.SetListTaskExecutionsCallback(func(ctx context.Context, request admin.TaskExecutionListRequest) (*admin.TaskExecutionList, error) { + taskExecutionManager.SetListTaskExecutionsCallback(func(ctx context.Context, request *admin.TaskExecutionListRequest) (*admin.TaskExecutionList, error) { return &admin.TaskExecutionList{ TaskExecutions: []*admin.TaskExecution{ { @@ -315,7 +315,7 @@ func TestService_GetData(t *testing.T) { }, }, nil }) - taskExecutionManager.SetGetTaskExecutionDataCallback(func(ctx context.Context, request admin.TaskExecutionGetDataRequest) (*admin.TaskExecutionGetDataResponse, error) { + taskExecutionManager.SetGetTaskExecutionDataCallback(func(ctx context.Context, request *admin.TaskExecutionGetDataRequest) (*admin.TaskExecutionGetDataResponse, error) { return &admin.TaskExecutionGetDataResponse{ FullInputs: inputsLM, FullOutputs: outputsLM, @@ -388,10 +388,10 @@ func TestService_Error(t *testing.T) { assert.NoError(t, err) t.Run("get a working set of urls without retry attempt", func(t *testing.T) { - taskExecutionManager.SetListTaskExecutionsCallback(func(ctx context.Context, request admin.TaskExecutionListRequest) (*admin.TaskExecutionList, error) { + taskExecutionManager.SetListTaskExecutionsCallback(func(ctx context.Context, request *admin.TaskExecutionListRequest) (*admin.TaskExecutionList, error) { return nil, errors.NewFlyteAdminErrorf(1, "not found") }) - nodeExecID := core.NodeExecutionIdentifier{ + nodeExecID := &core.NodeExecutionIdentifier{ NodeId: "n0", ExecutionId: &core.WorkflowExecutionIdentifier{ Project: "proj", @@ -404,13 +404,13 @@ func TestService_Error(t *testing.T) { }) t.Run("get a working set of urls without retry attempt", func(t *testing.T) { - taskExecutionManager.SetListTaskExecutionsCallback(func(ctx context.Context, request admin.TaskExecutionListRequest) (*admin.TaskExecutionList, error) { + taskExecutionManager.SetListTaskExecutionsCallback(func(ctx context.Context, request *admin.TaskExecutionListRequest) (*admin.TaskExecutionList, error) { return &admin.TaskExecutionList{ TaskExecutions: nil, Token: "", }, nil }) - nodeExecID := core.NodeExecutionIdentifier{ + nodeExecID := &core.NodeExecutionIdentifier{ NodeId: "n0", ExecutionId: &core.WorkflowExecutionIdentifier{ Project: "proj", diff --git a/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go b/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go index 46bd0f0ede..228db852d0 100644 --- a/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go +++ b/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go @@ -204,8 +204,8 @@ func getNodeExecutionContext(ctx context.Context, identifier *core.NodeExecution // This is a rough copy of the ListTaskExecutions function in TaskExecutionManager. It can be deprecated once we move the processing out of Admin itself. // Just return the highest retry attempt. -func (c *CloudEventWrappedPublisher) getLatestTaskExecutions(ctx context.Context, nodeExecutionID core.NodeExecutionIdentifier) (*admin.TaskExecution, error) { - ctx = getNodeExecutionContext(ctx, &nodeExecutionID) +func (c *CloudEventWrappedPublisher) getLatestTaskExecutions(ctx context.Context, nodeExecutionID *core.NodeExecutionIdentifier) (*admin.TaskExecution, error) { + ctx = getNodeExecutionContext(ctx, nodeExecutionID) identifierFilters, err := util.GetNodeExecutionIdentifierFilters(ctx, nodeExecutionID) if err != nil { @@ -283,7 +283,7 @@ func (c *CloudEventWrappedPublisher) TransformNodeExecutionEvent(ctx context.Con var taskExecID *core.TaskExecutionIdentifier var typedInterface *core.TypedInterface - lte, err := c.getLatestTaskExecutions(ctx, *rawEvent.Id) + lte, err := c.getLatestTaskExecutions(ctx, rawEvent.Id) if err != nil { logger.Errorf(ctx, "failed to get latest task execution for node exec id [%+v] with err: %v", rawEvent.Id, err) return nil, err @@ -353,7 +353,7 @@ func (c *CloudEventWrappedPublisher) Publish(ctx context.Context, notificationTy phase = e.Phase.String() eventTime = e.OccurredAt.AsTime() - dummyNodeExecutionID := core.NodeExecutionIdentifier{ + dummyNodeExecutionID := &core.NodeExecutionIdentifier{ NodeId: "end-node", ExecutionId: e.ExecutionId, } @@ -378,7 +378,7 @@ func (c *CloudEventWrappedPublisher) Publish(ctx context.Context, notificationTy if e.ParentNodeExecutionId == nil { return fmt.Errorf("parent node execution id is nil for task execution [%+v]", e) } - eventSource = common.FlyteURLKeyFromNodeExecutionIDRetry(*e.ParentNodeExecutionId, + eventSource = common.FlyteURLKeyFromNodeExecutionIDRetry(e.ParentNodeExecutionId, int(e.RetryAttempt)) finalMsg, err = c.TransformTaskExecutionEvent(ctx, e) if err != nil { @@ -392,7 +392,7 @@ func (c *CloudEventWrappedPublisher) Publish(ctx context.Context, notificationTy phase = e.Phase.String() eventTime = e.OccurredAt.AsTime() eventID = fmt.Sprintf("%v.%v", executionID, phase) - eventSource = common.FlyteURLKeyFromNodeExecutionID(*msgType.Event.Id) + eventSource = common.FlyteURLKeyFromNodeExecutionID(msgType.Event.Id) finalMsg, err = c.TransformNodeExecutionEvent(ctx, e) if err != nil { logger.Errorf(ctx, "Failed to transform node execution event with error: %v", err) diff --git a/flyteadmin/pkg/async/events/implementations/node_execution_event_writer.go b/flyteadmin/pkg/async/events/implementations/node_execution_event_writer.go index 623baf354d..3f59496626 100644 --- a/flyteadmin/pkg/async/events/implementations/node_execution_event_writer.go +++ b/flyteadmin/pkg/async/events/implementations/node_execution_event_writer.go @@ -14,10 +14,10 @@ import ( // events, node execution processing doesn't have to wait on these to be committed. type nodeExecutionEventWriter struct { db repositoryInterfaces.Repository - events chan admin.NodeExecutionEventRequest + events chan *admin.NodeExecutionEventRequest } -func (w *nodeExecutionEventWriter) Write(event admin.NodeExecutionEventRequest) { +func (w *nodeExecutionEventWriter) Write(event *admin.NodeExecutionEventRequest) { w.events <- event } @@ -40,6 +40,6 @@ func (w *nodeExecutionEventWriter) Run() { func NewNodeExecutionEventWriter(db repositoryInterfaces.Repository, bufferSize int) interfaces.NodeExecutionEventWriter { return &nodeExecutionEventWriter{ db: db, - events: make(chan admin.NodeExecutionEventRequest, bufferSize), + events: make(chan *admin.NodeExecutionEventRequest, bufferSize), } } diff --git a/flyteadmin/pkg/async/events/implementations/node_execution_event_writer_test.go b/flyteadmin/pkg/async/events/implementations/node_execution_event_writer_test.go index 4fafd19f69..3271ff5452 100644 --- a/flyteadmin/pkg/async/events/implementations/node_execution_event_writer_test.go +++ b/flyteadmin/pkg/async/events/implementations/node_execution_event_writer_test.go @@ -12,7 +12,7 @@ import ( func TestNodeExecutionEventWriter(t *testing.T) { db := mocks.NewMockRepository() - event := admin.NodeExecutionEventRequest{ + event := &admin.NodeExecutionEventRequest{ RequestId: "request_id", Event: &event2.NodeExecutionEvent{ Id: &core.NodeExecutionIdentifier{ diff --git a/flyteadmin/pkg/async/events/implementations/workflow_execution_event_writer.go b/flyteadmin/pkg/async/events/implementations/workflow_execution_event_writer.go index 7521dee4b8..e4f63b44f9 100644 --- a/flyteadmin/pkg/async/events/implementations/workflow_execution_event_writer.go +++ b/flyteadmin/pkg/async/events/implementations/workflow_execution_event_writer.go @@ -14,10 +14,10 @@ import ( // events, workflow execution processing doesn't have to wait on these to be committed. type workflowExecutionEventWriter struct { db repositoryInterfaces.Repository - events chan admin.WorkflowExecutionEventRequest + events chan *admin.WorkflowExecutionEventRequest } -func (w *workflowExecutionEventWriter) Write(event admin.WorkflowExecutionEventRequest) { +func (w *workflowExecutionEventWriter) Write(event *admin.WorkflowExecutionEventRequest) { w.events <- event } @@ -40,6 +40,6 @@ func (w *workflowExecutionEventWriter) Run() { func NewWorkflowExecutionEventWriter(db repositoryInterfaces.Repository, bufferSize int) interfaces.WorkflowExecutionEventWriter { return &workflowExecutionEventWriter{ db: db, - events: make(chan admin.WorkflowExecutionEventRequest, bufferSize), + events: make(chan *admin.WorkflowExecutionEventRequest, bufferSize), } } diff --git a/flyteadmin/pkg/async/events/implementations/workflow_execution_event_writer_test.go b/flyteadmin/pkg/async/events/implementations/workflow_execution_event_writer_test.go index db52cb809f..ce8dd390df 100644 --- a/flyteadmin/pkg/async/events/implementations/workflow_execution_event_writer_test.go +++ b/flyteadmin/pkg/async/events/implementations/workflow_execution_event_writer_test.go @@ -12,7 +12,7 @@ import ( func TestWorkflowExecutionEventWriter(t *testing.T) { db := mocks.NewMockRepository() - event := admin.WorkflowExecutionEventRequest{ + event := &admin.WorkflowExecutionEventRequest{ RequestId: "request_id", Event: &event2.WorkflowExecutionEvent{ ExecutionId: &core.WorkflowExecutionIdentifier{ diff --git a/flyteadmin/pkg/async/events/interfaces/node_execution.go b/flyteadmin/pkg/async/events/interfaces/node_execution.go index d6163db526..5e62e3b69b 100644 --- a/flyteadmin/pkg/async/events/interfaces/node_execution.go +++ b/flyteadmin/pkg/async/events/interfaces/node_execution.go @@ -8,5 +8,5 @@ import ( type NodeExecutionEventWriter interface { Run() - Write(nodeExecutionEvent admin.NodeExecutionEventRequest) + Write(nodeExecutionEvent *admin.NodeExecutionEventRequest) } diff --git a/flyteadmin/pkg/async/events/interfaces/workflow_execution.go b/flyteadmin/pkg/async/events/interfaces/workflow_execution.go index f730ddc993..d503ea0934 100644 --- a/flyteadmin/pkg/async/events/interfaces/workflow_execution.go +++ b/flyteadmin/pkg/async/events/interfaces/workflow_execution.go @@ -8,5 +8,5 @@ import ( type WorkflowExecutionEventWriter interface { Run() - Write(workflowExecutionEvent admin.WorkflowExecutionEventRequest) + Write(workflowExecutionEvent *admin.WorkflowExecutionEventRequest) } diff --git a/flyteadmin/pkg/async/events/mocks/node_execution_event_writer.go b/flyteadmin/pkg/async/events/mocks/node_execution_event_writer.go index b51639c609..f50149b6db 100644 --- a/flyteadmin/pkg/async/events/mocks/node_execution_event_writer.go +++ b/flyteadmin/pkg/async/events/mocks/node_execution_event_writer.go @@ -19,6 +19,6 @@ func (_m *NodeExecutionEventWriter) Run() { } // Write provides a mock function with given fields: nodeExecutionEvent -func (_m *NodeExecutionEventWriter) Write(nodeExecutionEvent admin.NodeExecutionEventRequest) { +func (_m *NodeExecutionEventWriter) Write(nodeExecutionEvent *admin.NodeExecutionEventRequest) { _m.Called(nodeExecutionEvent) } diff --git a/flyteadmin/pkg/async/events/mocks/workflow_execution_event_writer.go b/flyteadmin/pkg/async/events/mocks/workflow_execution_event_writer.go index 1c366f4cd5..616f3334f9 100644 --- a/flyteadmin/pkg/async/events/mocks/workflow_execution_event_writer.go +++ b/flyteadmin/pkg/async/events/mocks/workflow_execution_event_writer.go @@ -19,6 +19,6 @@ func (_m *WorkflowExecutionEventWriter) Run() { } // Write provides a mock function with given fields: workflowExecutionEvent -func (_m *WorkflowExecutionEventWriter) Write(workflowExecutionEvent admin.WorkflowExecutionEventRequest) { +func (_m *WorkflowExecutionEventWriter) Write(workflowExecutionEvent *admin.WorkflowExecutionEventRequest) { _m.Called(workflowExecutionEvent) } diff --git a/flyteadmin/pkg/async/notifications/email.go b/flyteadmin/pkg/async/notifications/email.go index 94eb71719c..a89210cead 100644 --- a/flyteadmin/pkg/async/notifications/email.go +++ b/flyteadmin/pkg/async/notifications/email.go @@ -8,7 +8,7 @@ import ( "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" ) -type GetTemplateValue func(admin.WorkflowExecutionEventRequest, *admin.Execution) string +type GetTemplateValue func(*admin.WorkflowExecutionEventRequest, *admin.Execution) string const executionError = " The execution failed with error: [%s]." @@ -29,58 +29,58 @@ const launchPlanName = "launch_plan.name" const launchPlanVersion = "launch_plan.version" const replaceAllInstances = -1 -func getProject(_ admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { +func getProject(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { return exec.Id.Project } -func getDomain(_ admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { +func getDomain(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { return exec.Id.Domain } -func getName(_ admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { +func getName(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { return exec.Id.Name } -func getPhase(request admin.WorkflowExecutionEventRequest, _ *admin.Execution) string { +func getPhase(request *admin.WorkflowExecutionEventRequest, _ *admin.Execution) string { return strings.ToLower(request.Event.Phase.String()) } -func getError(request admin.WorkflowExecutionEventRequest, _ *admin.Execution) string { +func getError(request *admin.WorkflowExecutionEventRequest, _ *admin.Execution) string { if request.Event.GetError() != nil { return fmt.Sprintf(executionError, request.Event.GetError().Message) } return "" } -func getWorkflowProject(_ admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { +func getWorkflowProject(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { return exec.Closure.WorkflowId.Project } -func getWorkflowDomain(_ admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { +func getWorkflowDomain(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { return exec.Closure.WorkflowId.Domain } -func getWorkflowName(_ admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { +func getWorkflowName(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { return exec.Closure.WorkflowId.Name } -func getWorkflowVersion(_ admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { +func getWorkflowVersion(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { return exec.Closure.WorkflowId.Version } -func getLaunchPlanProject(_ admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { +func getLaunchPlanProject(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { return exec.Spec.LaunchPlan.Project } -func getLaunchPlanDomain(_ admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { +func getLaunchPlanDomain(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { return exec.Spec.LaunchPlan.Domain } -func getLaunchPlanName(_ admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { +func getLaunchPlanName(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { return exec.Spec.LaunchPlan.Name } -func getLaunchPlanVersion(_ admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { +func getLaunchPlanVersion(_ *admin.WorkflowExecutionEventRequest, exec *admin.Execution) string { return exec.Spec.LaunchPlan.Version } @@ -100,7 +100,7 @@ var getTemplateValueFuncs = map[string]GetTemplateValue{ launchPlanVersion: getLaunchPlanVersion, } -func substituteEmailParameters(message string, request admin.WorkflowExecutionEventRequest, execution *admin.Execution) string { +func substituteEmailParameters(message string, request *admin.WorkflowExecutionEventRequest, execution *admin.Execution) string { for template, function := range getTemplateValueFuncs { message = strings.Replace(message, fmt.Sprintf(substitutionParam, template), function(request, execution), replaceAllInstances) message = strings.Replace(message, fmt.Sprintf(substitutionParamNoSpaces, template), function(request, execution), replaceAllInstances) @@ -112,8 +112,8 @@ func substituteEmailParameters(message string, request admin.WorkflowExecutionEv // in customizable email fields set in the flyteadmin application notifications config. func ToEmailMessageFromWorkflowExecutionEvent( config runtimeInterfaces.NotificationsConfig, - emailNotification admin.EmailNotification, - request admin.WorkflowExecutionEventRequest, + emailNotification *admin.EmailNotification, + request *admin.WorkflowExecutionEventRequest, execution *admin.Execution) *admin.EmailMessage { return &admin.EmailMessage{ diff --git a/flyteadmin/pkg/async/notifications/email_test.go b/flyteadmin/pkg/async/notifications/email_test.go index 5a9cf01c36..35f351a45d 100644 --- a/flyteadmin/pkg/async/notifications/email_test.go +++ b/flyteadmin/pkg/async/notifications/email_test.go @@ -52,7 +52,7 @@ var workflowExecution = &admin.Execution{ func TestSubstituteEmailParameters(t *testing.T) { message := "{{ unused }}. {{project }} and {{ domain }} and {{ name }} ended up in {{ phase }}.{{ error }}" - request := admin.WorkflowExecutionEventRequest{ + request := &admin.WorkflowExecutionEventRequest{ Event: &event.WorkflowExecutionEvent{ Phase: core.WorkflowExecution_SUCCEEDED, }, @@ -88,7 +88,7 @@ func TestSubstituteAllTemplates(t *testing.T) { messageTemplate = append(messageTemplate, template) desiredResult = append(desiredResult, result) } - request := admin.WorkflowExecutionEventRequest{ + request := &admin.WorkflowExecutionEventRequest{ Event: &event.WorkflowExecutionEvent{ Phase: core.WorkflowExecution_SUCCEEDED, }, @@ -117,7 +117,7 @@ func TestSubstituteAllTemplatesNoSpaces(t *testing.T) { messageTemplate = append(messageTemplate, template) desiredResult = append(desiredResult, result) } - request := admin.WorkflowExecutionEventRequest{ + request := &admin.WorkflowExecutionEventRequest{ Event: &event.WorkflowExecutionEvent{ Phase: core.WorkflowExecution_SUCCEEDED, }, @@ -136,12 +136,12 @@ func TestToEmailMessageFromWorkflowExecutionEvent(t *testing.T) { Subject: "Notice: Execution \"{{ name }}\" has succeeded in \"{{ domain }}\".", }, } - emailNotification := admin.EmailNotification{ + emailNotification := &admin.EmailNotification{ RecipientsEmail: []string{ "a@example.com", "b@example.org", }, } - request := admin.WorkflowExecutionEventRequest{ + request := &admin.WorkflowExecutionEventRequest{ Event: &event.WorkflowExecutionEvent{ Phase: core.WorkflowExecution_ABORTED, }, diff --git a/flyteadmin/pkg/async/notifications/implementations/aws_emailer.go b/flyteadmin/pkg/async/notifications/implementations/aws_emailer.go index 72985c9548..712bd7080d 100644 --- a/flyteadmin/pkg/async/notifications/implementations/aws_emailer.go +++ b/flyteadmin/pkg/async/notifications/implementations/aws_emailer.go @@ -21,7 +21,7 @@ type AwsEmailer struct { awsEmail sesiface.SESAPI } -func FlyteEmailToSesEmailInput(email admin.EmailMessage) ses.SendEmailInput { +func FlyteEmailToSesEmailInput(email *admin.EmailMessage) ses.SendEmailInput { var toAddress []*string for _, toEmail := range email.RecipientsEmail { // SES email input takes an array of pointers to strings so we have to create a new one for each email @@ -51,7 +51,7 @@ func FlyteEmailToSesEmailInput(email admin.EmailMessage) ses.SendEmailInput { } } -func (e *AwsEmailer) SendEmail(ctx context.Context, email admin.EmailMessage) error { +func (e *AwsEmailer) SendEmail(ctx context.Context, email *admin.EmailMessage) error { emailInput := FlyteEmailToSesEmailInput(email) _, err := e.awsEmail.SendEmail(&emailInput) e.systemMetrics.SendTotal.Inc() diff --git a/flyteadmin/pkg/async/notifications/implementations/aws_emailer_test.go b/flyteadmin/pkg/async/notifications/implementations/aws_emailer_test.go index c06d818eec..01a2a06273 100644 --- a/flyteadmin/pkg/async/notifications/implementations/aws_emailer_test.go +++ b/flyteadmin/pkg/async/notifications/implementations/aws_emailer_test.go @@ -32,7 +32,7 @@ func TestAwsEmailer_SendEmail(t *testing.T) { mockAwsEmail := mocks.SESClient{} var awsSES sesiface.SESAPI = &mockAwsEmail expectedSenderEmail := "no-reply@example.com" - emailNotification := admin.EmailMessage{ + emailNotification := &admin.EmailMessage{ SubjectLine: "Notice: Execution \"name\" has succeeded in \"domain\".", SenderEmail: "no-reply@example.com", RecipientsEmail: []string{ @@ -67,7 +67,7 @@ func TestAwsEmailer_SendEmail(t *testing.T) { } func TestFlyteEmailToSesEmailInput(t *testing.T) { - emailNotification := admin.EmailMessage{ + emailNotification := &admin.EmailMessage{ SubjectLine: "Notice: Execution \"name\" has succeeded in \"domain\".", SenderEmail: "no-reply@example.com", RecipientsEmail: []string{ @@ -97,7 +97,7 @@ func TestAwsEmailer_SendEmailError(t *testing.T) { testEmail := NewAwsEmailer(getNotificationsConfig(), promutils.NewTestScope(), awsSES) - emailNotification := admin.EmailMessage{ + emailNotification := &admin.EmailMessage{ SubjectLine: "Notice: Execution \"name\" has succeeded in \"domain\".", SenderEmail: "no-reply@example.com", RecipientsEmail: []string{ @@ -125,7 +125,7 @@ func TestAwsEmailer_SendEmailEmailOutput(t *testing.T) { testEmail := NewAwsEmailer(getNotificationsConfig(), promutils.NewTestScope(), awsSES) - emailNotification := admin.EmailMessage{ + emailNotification := &admin.EmailMessage{ SubjectLine: "Notice: Execution \"name\" has succeeded in \"domain\".", SenderEmail: "no-reply@example.com", RecipientsEmail: []string{ diff --git a/flyteadmin/pkg/async/notifications/implementations/aws_processor.go b/flyteadmin/pkg/async/notifications/implementations/aws_processor.go index fb3b3c2a1b..0e1dceb53c 100644 --- a/flyteadmin/pkg/async/notifications/implementations/aws_processor.go +++ b/flyteadmin/pkg/async/notifications/implementations/aws_processor.go @@ -36,7 +36,7 @@ func (p *Processor) StartProcessing() { } func (p *Processor) run() error { - var emailMessage admin.EmailMessage + emailMessage := &admin.EmailMessage{} var err error for msg := range p.sub.Start() { p.systemMetrics.MessageTotal.Inc() @@ -83,7 +83,7 @@ func (p *Processor) run() error { continue } - if err = proto.Unmarshal(notificationBytes, &emailMessage); err != nil { + if err = proto.Unmarshal(notificationBytes, emailMessage); err != nil { logger.Debugf(context.Background(), "failed to unmarshal to notification object from decoded string[%s] from message [%s] with err: %v", valueString, stringMsg, err) p.systemMetrics.MessageDecodingError.Inc() p.markMessageDone(msg) diff --git a/flyteadmin/pkg/async/notifications/implementations/aws_processor_test.go b/flyteadmin/pkg/async/notifications/implementations/aws_processor_test.go index ef27f1f3a8..e566fdd740 100644 --- a/flyteadmin/pkg/async/notifications/implementations/aws_processor_test.go +++ b/flyteadmin/pkg/async/notifications/implementations/aws_processor_test.go @@ -30,7 +30,7 @@ func TestProcessor_StartProcessing(t *testing.T) { // Because the message stored in Amazon SQS is a JSON of the SNS output, store the test output in the JSON Messages. testSubscriber.JSONMessages = append(testSubscriber.JSONMessages, testSubscriberMessage) - sendEmailValidationFunc := func(ctx context.Context, email admin.EmailMessage) error { + sendEmailValidationFunc := func(ctx context.Context, email *admin.EmailMessage) error { assert.Equal(t, email.Body, testEmail.Body) assert.Equal(t, email.RecipientsEmail, testEmail.RecipientsEmail) assert.Equal(t, email.SubjectLine, testEmail.SubjectLine) @@ -115,7 +115,7 @@ func TestProcessor_StartProcessingError(t *testing.T) { func TestProcessor_StartProcessingEmailError(t *testing.T) { initializeProcessor() emailError := errors.New("error sending email") - sendEmailErrorFunc := func(ctx context.Context, email admin.EmailMessage) error { + sendEmailErrorFunc := func(ctx context.Context, email *admin.EmailMessage) error { return emailError } mockEmailer.SetSendEmailFunc(sendEmailErrorFunc) diff --git a/flyteadmin/pkg/async/notifications/implementations/gcp_processor.go b/flyteadmin/pkg/async/notifications/implementations/gcp_processor.go index 54e4f4a592..b1e97ca7a1 100644 --- a/flyteadmin/pkg/async/notifications/implementations/gcp_processor.go +++ b/flyteadmin/pkg/async/notifications/implementations/gcp_processor.go @@ -39,12 +39,12 @@ func (p *GcpProcessor) StartProcessing() { } func (p *GcpProcessor) run() error { - var emailMessage admin.EmailMessage + emailMessage := &admin.EmailMessage{} for msg := range p.sub.Start() { p.systemMetrics.MessageTotal.Inc() - if err := proto.Unmarshal(msg.Message(), &emailMessage); err != nil { + if err := proto.Unmarshal(msg.Message(), emailMessage); err != nil { logger.Debugf(context.Background(), "failed to unmarshal to notification object message [%s] with err: %v", string(msg.Message()), err) p.systemMetrics.MessageDecodingError.Inc() p.markMessageDone(msg) diff --git a/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go b/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go index da5bda2610..5ad49a7257 100644 --- a/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go +++ b/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go @@ -34,7 +34,7 @@ func TestGcpProcessor_StartProcessing(t *testing.T) { testGcpProcessor := NewGcpProcessor(&testGcpSubscriber, &mockGcpEmailer, promutils.NewTestScope()) - sendEmailValidationFunc := func(ctx context.Context, email admin.EmailMessage) error { + sendEmailValidationFunc := func(ctx context.Context, email *admin.EmailMessage) error { assert.Equal(t, email.Body, testEmail.Body) assert.Equal(t, email.RecipientsEmail, testEmail.RecipientsEmail) assert.Equal(t, email.SubjectLine, testEmail.SubjectLine) @@ -81,7 +81,7 @@ func TestGcpProcessor_StartProcessingError(t *testing.T) { func TestGcpProcessor_StartProcessingEmailError(t *testing.T) { initializeGcpSubscriber() emailError := errors.New("error sending email") - sendEmailErrorFunc := func(ctx context.Context, email admin.EmailMessage) error { + sendEmailErrorFunc := func(ctx context.Context, email *admin.EmailMessage) error { return emailError } mockGcpEmailer.SetSendEmailFunc(sendEmailErrorFunc) diff --git a/flyteadmin/pkg/async/notifications/implementations/noop_notifications.go b/flyteadmin/pkg/async/notifications/implementations/noop_notifications.go index 4da316f6b2..03dfa063ea 100644 --- a/flyteadmin/pkg/async/notifications/implementations/noop_notifications.go +++ b/flyteadmin/pkg/async/notifications/implementations/noop_notifications.go @@ -14,7 +14,7 @@ import ( // Email to use when there is no email configuration. type NoopEmail struct{} -func (n *NoopEmail) SendEmail(ctx context.Context, email admin.EmailMessage) error { +func (n *NoopEmail) SendEmail(ctx context.Context, email *admin.EmailMessage) error { logger.Debugf(ctx, "received noop SendEmail request with subject [%s] and recipient [%s]", email.SubjectLine, strings.Join(email.RecipientsEmail, ",")) return nil diff --git a/flyteadmin/pkg/async/notifications/implementations/sandbox_processor.go b/flyteadmin/pkg/async/notifications/implementations/sandbox_processor.go index 2cb83da406..9fb5e34bc7 100644 --- a/flyteadmin/pkg/async/notifications/implementations/sandbox_processor.go +++ b/flyteadmin/pkg/async/notifications/implementations/sandbox_processor.go @@ -27,12 +27,12 @@ func (p *SandboxProcessor) StartProcessing() { } func (p *SandboxProcessor) run() error { - var emailMessage admin.EmailMessage + emailMessage := &admin.EmailMessage{} for { select { case msg := <-p.subChan: - err := proto.Unmarshal(msg, &emailMessage) + err := proto.Unmarshal(msg, emailMessage) if err != nil { logger.Errorf(context.Background(), "error with unmarshalling message [%v]", err) return err diff --git a/flyteadmin/pkg/async/notifications/implementations/sandbox_processor_test.go b/flyteadmin/pkg/async/notifications/implementations/sandbox_processor_test.go index d0ee9ee31b..83594284a9 100644 --- a/flyteadmin/pkg/async/notifications/implementations/sandbox_processor_test.go +++ b/flyteadmin/pkg/async/notifications/implementations/sandbox_processor_test.go @@ -19,7 +19,7 @@ func TestSandboxProcessor_StartProcessingSuccess(t *testing.T) { msgChan <- msg testSandboxProcessor := NewSandboxProcessor(msgChan, &mockSandboxEmailer) - sendEmailValidationFunc := func(ctx context.Context, email admin.EmailMessage) error { + sendEmailValidationFunc := func(ctx context.Context, email *admin.EmailMessage) error { assert.Equal(t, testEmail.Body, email.Body) assert.Equal(t, testEmail.RecipientsEmail, email.RecipientsEmail) assert.Equal(t, testEmail.SubjectLine, email.SubjectLine) @@ -43,7 +43,7 @@ func TestSandboxProcessor_StartProcessingError(t *testing.T) { msgChan <- msg emailError := errors.New("error running processor") - sendEmailValidationFunc := func(ctx context.Context, email admin.EmailMessage) error { + sendEmailValidationFunc := func(ctx context.Context, email *admin.EmailMessage) error { return emailError } mockSandboxEmailer.SetSendEmailFunc(sendEmailValidationFunc) @@ -70,7 +70,7 @@ func TestSandboxProcessor_StartProcessingEmailError(t *testing.T) { testSandboxProcessor := NewSandboxProcessor(msgChan, &mockSandboxEmailer) emailError := errors.New("error sending email") - sendEmailValidationFunc := func(ctx context.Context, email admin.EmailMessage) error { + sendEmailValidationFunc := func(ctx context.Context, email *admin.EmailMessage) error { return emailError } diff --git a/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer.go b/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer.go index 54f53859f3..c8386bd41e 100644 --- a/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer.go +++ b/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer.go @@ -30,7 +30,7 @@ func getEmailAddresses(addresses []string) []*mail.Email { return sendgridAddresses } -func getSendgridEmail(adminEmail admin.EmailMessage) *mail.SGMailV3 { +func getSendgridEmail(adminEmail *admin.EmailMessage) *mail.SGMailV3 { m := mail.NewV3Mail() // This from email address is really here as a formality. For sendgrid specifically, the sender email is determined // from the api key that's used, not what you send along here. @@ -60,7 +60,7 @@ func getAPIKey(config runtimeInterfaces.EmailServerConfig) string { return strings.TrimSpace(string(apiKeyFile)) } -func (s SendgridEmailer) SendEmail(ctx context.Context, email admin.EmailMessage) error { +func (s SendgridEmailer) SendEmail(ctx context.Context, email *admin.EmailMessage) error { m := getSendgridEmail(email) s.systemMetrics.SendTotal.Inc() response, err := s.client.Send(m) diff --git a/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer_test.go b/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer_test.go index bfedb152d0..eafad84b2c 100644 --- a/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer_test.go +++ b/flyteadmin/pkg/async/notifications/implementations/sendgrid_emailer_test.go @@ -21,7 +21,7 @@ func TestAddresses(t *testing.T) { } func TestGetEmail(t *testing.T) { - emailNotification := admin.EmailMessage{ + emailNotification := &admin.EmailMessage{ SubjectLine: "Notice: Execution \"name\" has succeeded in \"domain\".", SenderEmail: "no-reply@example.com", RecipientsEmail: []string{ diff --git a/flyteadmin/pkg/async/notifications/interfaces/emailer.go b/flyteadmin/pkg/async/notifications/interfaces/emailer.go index 54b6ad5574..f6874cf580 100644 --- a/flyteadmin/pkg/async/notifications/interfaces/emailer.go +++ b/flyteadmin/pkg/async/notifications/interfaces/emailer.go @@ -9,5 +9,5 @@ import ( // The implementation of Emailer needs to be passed to the implementation of Processor // in order for emails to be sent. type Emailer interface { - SendEmail(ctx context.Context, email admin.EmailMessage) error + SendEmail(ctx context.Context, email *admin.EmailMessage) error } diff --git a/flyteadmin/pkg/async/notifications/mocks/processor.go b/flyteadmin/pkg/async/notifications/mocks/processor.go index a60bb26f96..178d68490e 100644 --- a/flyteadmin/pkg/async/notifications/mocks/processor.go +++ b/flyteadmin/pkg/async/notifications/mocks/processor.go @@ -29,7 +29,7 @@ func (m *MockSubscriber) Stop() error { return nil } -type SendEmailFunc func(ctx context.Context, email admin.EmailMessage) error +type SendEmailFunc func(ctx context.Context, email *admin.EmailMessage) error type MockEmailer struct { sendEmailFunc SendEmailFunc @@ -39,7 +39,7 @@ func (m *MockEmailer) SetSendEmailFunc(sendEmail SendEmailFunc) { m.sendEmailFunc = sendEmail } -func (m *MockEmailer) SendEmail(ctx context.Context, email admin.EmailMessage) error { +func (m *MockEmailer) SendEmail(ctx context.Context, email *admin.EmailMessage) error { if m.sendEmailFunc != nil { return m.sendEmailFunc(ctx, email) } diff --git a/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler.go b/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler.go index 768d195766..9c3cb166b5 100644 --- a/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler.go +++ b/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler.go @@ -68,7 +68,7 @@ type cloudWatchScheduler struct { metrics cloudWatchSchedulerMetrics } -func getScheduleName(scheduleNamePrefix string, identifier core.Identifier) string { +func getScheduleName(scheduleNamePrefix string, identifier *core.Identifier) string { hashedIdentifier := hashIdentifier(identifier) if len(scheduleNamePrefix) > 0 { return fmt.Sprintf(scheduleNameFormat, scheduleNamePrefix, hashedIdentifier) @@ -76,12 +76,12 @@ func getScheduleName(scheduleNamePrefix string, identifier core.Identifier) stri return fmt.Sprintf("%d", hashedIdentifier) } -func getScheduleDescription(identifier core.Identifier) string { +func getScheduleDescription(identifier *core.Identifier) string { return fmt.Sprintf(scheduleDescriptionFormat, identifier.Project, identifier.Domain, identifier.Name) } -func getScheduleExpression(schedule admin.Schedule) (string, error) { +func getScheduleExpression(schedule *admin.Schedule) (string, error) { if schedule.GetCronExpression() != "" { return fmt.Sprintf(cronExpression, schedule.GetCronExpression()), nil } @@ -171,11 +171,11 @@ func (s *cloudWatchScheduler) AddSchedule(ctx context.Context, input scheduleInt } func (s *cloudWatchScheduler) CreateScheduleInput(ctx context.Context, appConfig *appInterfaces.SchedulerConfig, - identifier core.Identifier, schedule *admin.Schedule) (scheduleInterfaces.AddScheduleInput, error) { + identifier *core.Identifier, schedule *admin.Schedule) (scheduleInterfaces.AddScheduleInput, error) { payload, err := SerializeScheduleWorkflowPayload( schedule.GetKickoffTimeInputArg(), - admin.NamedEntityIdentifier{ + &admin.NamedEntityIdentifier{ Project: identifier.Project, Domain: identifier.Domain, Name: identifier.Name, @@ -194,7 +194,7 @@ func (s *cloudWatchScheduler) CreateScheduleInput(ctx context.Context, appConfig addScheduleInput := scheduleInterfaces.AddScheduleInput{ Identifier: identifier, - ScheduleExpression: *schedule, + ScheduleExpression: schedule, Payload: payload, ScheduleNamePrefix: scheduleNamePrefix, } diff --git a/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler_test.go b/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler_test.go index bb32163d1f..9b24e1baa9 100644 --- a/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler_test.go +++ b/flyteadmin/pkg/async/schedule/aws/cloud_watch_scheduler_test.go @@ -26,7 +26,7 @@ var expectedError = flyteAdminErrors.NewFlyteAdminError(codes.Internal, "foo") var testSerializedPayload = fmt.Sprintf("event triggered at '%s'", awsTimestampPlaceholder) -var testSchedulerIdentifier = core.Identifier{ +var testSchedulerIdentifier = &core.Identifier{ Project: "project", Domain: "domain", Name: "name", @@ -55,7 +55,7 @@ func TestGetScheduleDescription(t *testing.T) { } func TestGetScheduleExpression(t *testing.T) { - expression, err := getScheduleExpression(admin.Schedule{ + expression, err := getScheduleExpression(&admin.Schedule{ ScheduleExpression: &admin.Schedule_CronExpression{ CronExpression: "foo", }, @@ -63,7 +63,7 @@ func TestGetScheduleExpression(t *testing.T) { assert.Nil(t, err) assert.Equal(t, "cron(foo)", expression) - expression, err = getScheduleExpression(admin.Schedule{ + expression, err = getScheduleExpression(&admin.Schedule{ ScheduleExpression: &admin.Schedule_Rate{ Rate: &admin.FixedRate{ Value: 1, @@ -74,7 +74,7 @@ func TestGetScheduleExpression(t *testing.T) { assert.Nil(t, err) assert.Equal(t, "rate(1 day)", expression) - expression, err = getScheduleExpression(admin.Schedule{ + expression, err = getScheduleExpression(&admin.Schedule{ ScheduleExpression: &admin.Schedule_Rate{ Rate: &admin.FixedRate{ Value: 2, @@ -85,7 +85,7 @@ func TestGetScheduleExpression(t *testing.T) { assert.Nil(t, err) assert.Equal(t, "rate(2 hours)", expression) - _, err = getScheduleExpression(admin.Schedule{}) + _, err = getScheduleExpression(&admin.Schedule{}) assert.Equal(t, codes.InvalidArgument, err.(flyteAdminErrors.FlyteAdminError).Code()) } @@ -133,7 +133,7 @@ func TestAddSchedule(t *testing.T) { assert.Nil(t, scheduler.AddSchedule(context.Background(), scheduleInterfaces.AddScheduleInput{ Identifier: testSchedulerIdentifier, - ScheduleExpression: admin.Schedule{ + ScheduleExpression: &admin.Schedule{ ScheduleExpression: &admin.Schedule_Rate{ Rate: &admin.FixedRate{ Value: 1, @@ -168,7 +168,7 @@ func TestAddSchedule_PutRuleError(t *testing.T) { err := scheduler.AddSchedule(context.Background(), scheduleInterfaces.AddScheduleInput{ Identifier: testSchedulerIdentifier, - ScheduleExpression: admin.Schedule{ + ScheduleExpression: &admin.Schedule{ ScheduleExpression: &admin.Schedule_Rate{ Rate: &admin.FixedRate{ Value: 1, @@ -195,7 +195,7 @@ func TestAddSchedule_PutTargetsError(t *testing.T) { err := scheduler.AddSchedule(context.Background(), scheduleInterfaces.AddScheduleInput{ Identifier: testSchedulerIdentifier, - ScheduleExpression: admin.Schedule{ + ScheduleExpression: &admin.Schedule{ ScheduleExpression: &admin.Schedule_Rate{ Rate: &admin.FixedRate{ Value: 1, diff --git a/flyteadmin/pkg/async/schedule/aws/serialization.go b/flyteadmin/pkg/async/schedule/aws/serialization.go index 833235f4fc..8162a9a265 100644 --- a/flyteadmin/pkg/async/schedule/aws/serialization.go +++ b/flyteadmin/pkg/async/schedule/aws/serialization.go @@ -35,13 +35,13 @@ type ScheduledWorkflowExecutionRequest struct { // The name of the kickoff time input argument in the workflow definition. This will be filled with kickoff time. KickoffTimeArg string // The desired launch plan identifier to trigger on schedule event firings. - LaunchPlanIdentifier admin.NamedEntityIdentifier + LaunchPlanIdentifier *admin.NamedEntityIdentifier } // This produces a function that is used to serialize messages enqueued on the cloudwatch scheduler. func SerializeScheduleWorkflowPayload( - kickoffTimeArg string, launchPlanIdentifier admin.NamedEntityIdentifier) (*string, error) { - payload, err := proto.Marshal(&launchPlanIdentifier) + kickoffTimeArg string, launchPlanIdentifier *admin.NamedEntityIdentifier) (*string, error) { + payload, err := proto.Marshal(launchPlanIdentifier) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "failed to marshall launch plan with err: %v", err) } @@ -81,6 +81,6 @@ func DeserializeScheduleWorkflowPayload(payload []byte) (ScheduledWorkflowExecut return ScheduledWorkflowExecutionRequest{ KickoffTime: kickoffTime, KickoffTimeArg: scheduleWorkflowPayload.KickoffTimeArg, - LaunchPlanIdentifier: launchPlanIdentifier, + LaunchPlanIdentifier: &launchPlanIdentifier, }, nil } diff --git a/flyteadmin/pkg/async/schedule/aws/serialization_test.go b/flyteadmin/pkg/async/schedule/aws/serialization_test.go index 5b14866247..ae6a293ecf 100644 --- a/flyteadmin/pkg/async/schedule/aws/serialization_test.go +++ b/flyteadmin/pkg/async/schedule/aws/serialization_test.go @@ -15,7 +15,7 @@ import ( const testKickoffTimeArg = "kickoff time arg" -var testLaunchPlanIdentifier = admin.NamedEntityIdentifier{ +var testLaunchPlanIdentifier = &admin.NamedEntityIdentifier{ Name: "name", Project: "project", Domain: "domain", @@ -38,7 +38,7 @@ func TestDeserializeScheduleWorkflowPayload(t *testing.T) { time.Date(2017, 12, 22, 18, 43, 48, 0, time.UTC), scheduledWorkflowExecutionRequest.KickoffTime) assert.Equal(t, testKickoffTimeArg, scheduledWorkflowExecutionRequest.KickoffTimeArg) - assert.True(t, proto.Equal(&testLaunchPlanIdentifier, &scheduledWorkflowExecutionRequest.LaunchPlanIdentifier), + assert.True(t, proto.Equal(testLaunchPlanIdentifier, scheduledWorkflowExecutionRequest.LaunchPlanIdentifier), fmt.Sprintf("scheduledWorkflowExecutionRequest.LaunchPlanIdentifier %v", &scheduledWorkflowExecutionRequest.LaunchPlanIdentifier)) } diff --git a/flyteadmin/pkg/async/schedule/aws/shared.go b/flyteadmin/pkg/async/schedule/aws/shared.go index 2a0590cff6..3868e05799 100644 --- a/flyteadmin/pkg/async/schedule/aws/shared.go +++ b/flyteadmin/pkg/async/schedule/aws/shared.go @@ -9,7 +9,7 @@ import ( "github.com/flyteorg/flyte/flytestdlib/logger" ) -func hashIdentifier(identifier core.Identifier) uint64 { +func hashIdentifier(identifier *core.Identifier) uint64 { h := fnv.New64() _, err := h.Write([]byte(fmt.Sprintf(scheduleNameInputsFormat, identifier.Project, identifier.Domain, identifier.Name))) diff --git a/flyteadmin/pkg/async/schedule/aws/shared_test.go b/flyteadmin/pkg/async/schedule/aws/shared_test.go index 4fb0ccd515..7833d0980d 100644 --- a/flyteadmin/pkg/async/schedule/aws/shared_test.go +++ b/flyteadmin/pkg/async/schedule/aws/shared_test.go @@ -9,7 +9,7 @@ import ( ) func TestHashIdentifier(t *testing.T) { - identifier := core.Identifier{ + identifier := &core.Identifier{ Project: "project", Domain: "domain", Name: "name", diff --git a/flyteadmin/pkg/async/schedule/aws/workflow_executor.go b/flyteadmin/pkg/async/schedule/aws/workflow_executor.go index 13d2041f9d..523fdd077e 100644 --- a/flyteadmin/pkg/async/schedule/aws/workflow_executor.go +++ b/flyteadmin/pkg/async/schedule/aws/workflow_executor.go @@ -61,7 +61,7 @@ var doNotconsumeBase64 = false // The kickoff time argument isn't required for scheduled workflows. However, if it exists we substitute the kick-off // time value for the input argument. func (e *workflowExecutor) resolveKickoffTimeArg( - request ScheduledWorkflowExecutionRequest, launchPlan admin.LaunchPlan, + request ScheduledWorkflowExecutionRequest, launchPlan *admin.LaunchPlan, executionRequest *admin.ExecutionCreateRequest) error { if request.KickoffTimeArg == "" || launchPlan.Closure.ExpectedInputs == nil { logger.Debugf(context.Background(), "No kickoff time to resolve for scheduled workflow execution: [%s/%s/%s]", @@ -100,8 +100,8 @@ func (e *workflowExecutor) resolveKickoffTimeArg( return nil } -func (e *workflowExecutor) getActiveLaunchPlanVersion(launchPlanIdentifier *admin.NamedEntityIdentifier) (admin.LaunchPlan, error) { - launchPlans, err := e.launchPlanManager.ListLaunchPlans(context.Background(), admin.ResourceListRequest{ +func (e *workflowExecutor) getActiveLaunchPlanVersion(launchPlanIdentifier *admin.NamedEntityIdentifier) (*admin.LaunchPlan, error) { + launchPlans, err := e.launchPlanManager.ListLaunchPlans(context.Background(), &admin.ResourceListRequest{ Id: launchPlanIdentifier, Filters: activeLaunchPlanFilter, Limit: 1, @@ -110,20 +110,20 @@ func (e *workflowExecutor) getActiveLaunchPlanVersion(launchPlanIdentifier *admi logger.Warningf(context.Background(), "failed to find active launch plan with identifier [%+v]", launchPlanIdentifier) e.metrics.NoActiveLaunchPlanVersionsFound.Inc() - return admin.LaunchPlan{}, err + return &admin.LaunchPlan{}, err } if len(launchPlans.LaunchPlans) != 1 { e.metrics.GreaterThan1LaunchPlanVersionsFound.Inc() logger.Warningf(context.Background(), "failed to get exactly one active launch plan for identifier: %+v", launchPlanIdentifier) - return admin.LaunchPlan{}, errors.NewFlyteAdminErrorf(codes.Internal, + return &admin.LaunchPlan{}, errors.NewFlyteAdminErrorf(codes.Internal, "failed to get exactly one active launch plan for identifier: %+v", launchPlanIdentifier) } - return *launchPlans.LaunchPlans[0], nil + return launchPlans.LaunchPlans[0], nil } -func generateExecutionName(launchPlan admin.LaunchPlan, kickoffTime time.Time) string { - hashedIdentifier := hashIdentifier(core.Identifier{ +func generateExecutionName(launchPlan *admin.LaunchPlan, kickoffTime time.Time) string { + hashedIdentifier := hashIdentifier(&core.Identifier{ Project: launchPlan.Id.Project, Domain: launchPlan.Id.Domain, Name: launchPlan.Id.Name, @@ -133,7 +133,7 @@ func generateExecutionName(launchPlan admin.LaunchPlan, kickoffTime time.Time) s } func (e *workflowExecutor) formulateExecutionCreateRequest( - launchPlan admin.LaunchPlan, kickoffTime time.Time) admin.ExecutionCreateRequest { + launchPlan *admin.LaunchPlan, kickoffTime time.Time) *admin.ExecutionCreateRequest { // Deterministically assign a name based on the schedule kickoff time/launch plan definition. name := generateExecutionName(launchPlan, kickoffTime) logger.Debugf(context.Background(), "generated name [%s] for scheduled execution with launch plan [%+v]", @@ -147,7 +147,7 @@ func (e *workflowExecutor) formulateExecutionCreateRequest( logger.Warningf(context.Background(), "failed to serialize kickoff time [%v] to proto with err: %v", kickoffTime, err) } - executionRequest := admin.ExecutionCreateRequest{ + executionRequest := &admin.ExecutionCreateRequest{ Project: launchPlan.Id.Project, Domain: launchPlan.Id.Domain, Name: name, @@ -189,7 +189,7 @@ func (e *workflowExecutor) run() error { logger.Debugf(context.Background(), "Processing scheduled workflow execution event: %+v", scheduledWorkflowExecutionRequest) - launchPlan, err := e.getActiveLaunchPlanVersion(&scheduledWorkflowExecutionRequest.LaunchPlanIdentifier) + launchPlan, err := e.getActiveLaunchPlanVersion(scheduledWorkflowExecutionRequest.LaunchPlanIdentifier) if err != nil { // In the rare case that a scheduled event fires right before a user disables the currently active launch // plan version (and triggers deleting the schedule rule) there may be no active launch plans. This is fine, @@ -209,7 +209,7 @@ func (e *workflowExecutor) run() error { executionRequest := e.formulateExecutionCreateRequest(launchPlan, scheduledWorkflowExecutionRequest.KickoffTime) ctx = contextutils.WithWorkflowID(ctx, fmt.Sprintf(workflowIdentifierFmt, executionRequest.Project, executionRequest.Domain, executionRequest.Name)) - err = e.resolveKickoffTimeArg(scheduledWorkflowExecutionRequest, launchPlan, &executionRequest) + err = e.resolveKickoffTimeArg(scheduledWorkflowExecutionRequest, launchPlan, executionRequest) if err != nil { e.metrics.FailedResolveKickoffTimeArg.Inc() logger.Error(context.Background(), err.Error()) diff --git a/flyteadmin/pkg/async/schedule/aws/workflow_executor_test.go b/flyteadmin/pkg/async/schedule/aws/workflow_executor_test.go index 0479e89073..f6fc9b9693 100644 --- a/flyteadmin/pkg/async/schedule/aws/workflow_executor_test.go +++ b/flyteadmin/pkg/async/schedule/aws/workflow_executor_test.go @@ -26,14 +26,14 @@ import ( const testKickoffTime = "kickoff time arg" var testKickoffTimestamp = time.Date(2017, 12, 22, 18, 43, 48, 0, time.UTC) -var testIdentifier = admin.NamedEntityIdentifier{ +var testIdentifier = &admin.NamedEntityIdentifier{ Name: "name", Project: "project", Domain: "domain", } var protoTestTimestamp, _ = ptypes.TimestampProto(testKickoffTimestamp) -var testKickoffTimeProtoLiteral = core.Literal{ +var testKickoffTimeProtoLiteral = &core.Literal{ Value: &core.Literal_Scalar{ Scalar: &core.Scalar{ Value: &core.Scalar_Primitive{ @@ -72,7 +72,7 @@ func TestResolveKickoffTimeArg(t *testing.T) { KickoffTimeArg: testKickoffTime, KickoffTime: testKickoffTimestamp, } - launchPlan := admin.LaunchPlan{ + launchPlan := &admin.LaunchPlan{ Closure: &admin.LaunchPlanClosure{ ExpectedInputs: &core.ParameterMap{ Parameters: map[string]*core.Parameter{ @@ -81,7 +81,7 @@ func TestResolveKickoffTimeArg(t *testing.T) { }, }, } - executionRequest := admin.ExecutionCreateRequest{ + executionRequest := &admin.ExecutionCreateRequest{ Project: testIdentifier.Project, Domain: testIdentifier.Domain, Name: testIdentifier.Name, @@ -90,11 +90,11 @@ func TestResolveKickoffTimeArg(t *testing.T) { }, } testExecutor := newWorkflowExecutorForTest(nil, nil, nil) - err := testExecutor.resolveKickoffTimeArg(scheduleRequest, launchPlan, &executionRequest) + err := testExecutor.resolveKickoffTimeArg(scheduleRequest, launchPlan, executionRequest) assert.Nil(t, err) assert.Contains(t, executionRequest.Inputs.Literals, testKickoffTime) assert.Equal(t, testKickoffTimeProtoLiteral, - *executionRequest.Inputs.Literals[testKickoffTime]) + executionRequest.Inputs.Literals[testKickoffTime]) } func TestResolveKickoffTimeArg_NoKickoffTimeArg(t *testing.T) { @@ -102,7 +102,7 @@ func TestResolveKickoffTimeArg_NoKickoffTimeArg(t *testing.T) { KickoffTimeArg: testKickoffTime, KickoffTime: testKickoffTimestamp, } - launchPlan := admin.LaunchPlan{ + launchPlan := &admin.LaunchPlan{ Closure: &admin.LaunchPlanClosure{ ExpectedInputs: &core.ParameterMap{ Parameters: map[string]*core.Parameter{ @@ -111,7 +111,7 @@ func TestResolveKickoffTimeArg_NoKickoffTimeArg(t *testing.T) { }, }, } - executionRequest := admin.ExecutionCreateRequest{ + executionRequest := &admin.ExecutionCreateRequest{ Project: testIdentifier.Project, Domain: testIdentifier.Domain, Name: testIdentifier.Name, @@ -120,7 +120,7 @@ func TestResolveKickoffTimeArg_NoKickoffTimeArg(t *testing.T) { }, } testExecutor := newWorkflowExecutorForTest(nil, nil, nil) - err := testExecutor.resolveKickoffTimeArg(scheduleRequest, launchPlan, &executionRequest) + err := testExecutor.resolveKickoffTimeArg(scheduleRequest, launchPlan, executionRequest) assert.Nil(t, err) assert.NotContains(t, executionRequest.Inputs.Literals, testKickoffTime) } @@ -140,7 +140,7 @@ func TestGetActiveLaunchPlanVersion(t *testing.T) { launchPlanManager := mocks.NewMockLaunchPlanManager() launchPlanManager.(*mocks.MockLaunchPlanManager).SetListLaunchPlansCallback( - func(ctx context.Context, request admin.ResourceListRequest) ( + func(ctx context.Context, request *admin.ResourceListRequest) ( *admin.LaunchPlanList, error) { assert.True(t, proto.Equal(launchPlanNamedIdentifier, request.Id)) assert.Equal(t, "eq(state,1)", request.Filters) @@ -169,7 +169,7 @@ func TestGetActiveLaunchPlanVersion_ManagerError(t *testing.T) { expectedErr := errors.New("expected error") launchPlanManager := mocks.NewMockLaunchPlanManager() launchPlanManager.(*mocks.MockLaunchPlanManager).SetListLaunchPlansCallback( - func(ctx context.Context, request admin.ResourceListRequest) ( + func(ctx context.Context, request *admin.ResourceListRequest) ( *admin.LaunchPlanList, error) { return nil, expectedErr }) @@ -185,7 +185,7 @@ func TestFormulateExecutionCreateRequest(t *testing.T) { Name: "baz", Version: "12345", } - launchPlan := admin.LaunchPlan{ + launchPlan := &admin.LaunchPlan{ Spec: &admin.LaunchPlanSpec{ WorkflowId: &core.Identifier{ Project: "project", @@ -232,21 +232,21 @@ func TestRun(t *testing.T) { testExecutionManager := mocks.MockExecutionManager{} var messagesSeen int testExecutionManager.SetCreateCallback(func( - ctx context.Context, request admin.ExecutionCreateRequest, requestedAt time.Time) ( + ctx context.Context, request *admin.ExecutionCreateRequest, requestedAt time.Time) ( *admin.ExecutionCreateResponse, error) { assert.Equal(t, "project", request.Project) assert.Equal(t, "domain", request.Domain) assert.Equal(t, "ar8fphnlc5wh9dksjncj", request.Name) if messagesSeen == 0 { assert.Contains(t, request.Inputs.Literals, testKickoffTime) - assert.Equal(t, testKickoffTimeProtoLiteral, *request.Inputs.Literals[testKickoffTime]) + assert.Equal(t, testKickoffTimeProtoLiteral, request.Inputs.Literals[testKickoffTime]) } messagesSeen++ return &admin.ExecutionCreateResponse{}, nil }) launchPlanManager := mocks.NewMockLaunchPlanManager() launchPlanManager.(*mocks.MockLaunchPlanManager).SetListLaunchPlansCallback( - func(ctx context.Context, request admin.ResourceListRequest) ( + func(ctx context.Context, request *admin.ResourceListRequest) ( *admin.LaunchPlanList, error) { assert.Equal(t, "project", request.Id.Project) assert.Equal(t, "domain", request.Id.Domain) diff --git a/flyteadmin/pkg/async/schedule/interfaces/event_scheduler.go b/flyteadmin/pkg/async/schedule/interfaces/event_scheduler.go index 8502eae167..dad2574bd2 100644 --- a/flyteadmin/pkg/async/schedule/interfaces/event_scheduler.go +++ b/flyteadmin/pkg/async/schedule/interfaces/event_scheduler.go @@ -11,9 +11,9 @@ import ( type AddScheduleInput struct { // Defines the unique identifier associated with the schedule - Identifier core.Identifier + Identifier *core.Identifier // Defines the schedule expression. - ScheduleExpression admin.Schedule + ScheduleExpression *admin.Schedule // Message payload encoded as an CloudWatch event rule InputTemplate. Payload *string // Optional: The application-wide prefix to be applied for schedule names. @@ -22,7 +22,7 @@ type AddScheduleInput struct { type RemoveScheduleInput struct { // Defines the unique identifier associated with the schedule - Identifier core.Identifier + Identifier *core.Identifier // Optional: The application-wide prefix to be applied for schedule names. ScheduleNamePrefix string } @@ -32,7 +32,7 @@ type EventScheduler interface { AddSchedule(ctx context.Context, input AddScheduleInput) error // CreateScheduleInput using the scheduler config and launch plan identifier and schedule - CreateScheduleInput(ctx context.Context, appConfig *appInterfaces.SchedulerConfig, identifier core.Identifier, + CreateScheduleInput(ctx context.Context, appConfig *appInterfaces.SchedulerConfig, identifier *core.Identifier, schedule *admin.Schedule) (AddScheduleInput, error) // Removes an existing schedule. diff --git a/flyteadmin/pkg/async/schedule/mocks/mock_event_scheduler.go b/flyteadmin/pkg/async/schedule/mocks/mock_event_scheduler.go index a1bcc9bee7..fb9aebe34e 100644 --- a/flyteadmin/pkg/async/schedule/mocks/mock_event_scheduler.go +++ b/flyteadmin/pkg/async/schedule/mocks/mock_event_scheduler.go @@ -18,15 +18,15 @@ type MockEventScheduler struct { } func (s *MockEventScheduler) CreateScheduleInput(ctx context.Context, appConfig *runtimeInterfaces.SchedulerConfig, - identifier core.Identifier, schedule *admin.Schedule) (interfaces.AddScheduleInput, error) { + identifier *core.Identifier, schedule *admin.Schedule) (interfaces.AddScheduleInput, error) { payload, _ := aws.SerializeScheduleWorkflowPayload( schedule.GetKickoffTimeInputArg(), - admin.NamedEntityIdentifier{ + &admin.NamedEntityIdentifier{ Project: identifier.Project, Domain: identifier.Domain, Name: identifier.Name, }) - return interfaces.AddScheduleInput{Identifier: identifier, ScheduleExpression: *schedule, Payload: payload}, nil + return interfaces.AddScheduleInput{Identifier: identifier, ScheduleExpression: schedule, Payload: payload}, nil } func (s *MockEventScheduler) AddSchedule(ctx context.Context, input interfaces.AddScheduleInput) error { diff --git a/flyteadmin/pkg/async/schedule/noop/event_scheduler.go b/flyteadmin/pkg/async/schedule/noop/event_scheduler.go index ed05858607..1a6ac3c7d2 100644 --- a/flyteadmin/pkg/async/schedule/noop/event_scheduler.go +++ b/flyteadmin/pkg/async/schedule/noop/event_scheduler.go @@ -13,7 +13,7 @@ import ( type EventScheduler struct{} -func (s *EventScheduler) CreateScheduleInput(ctx context.Context, appConfig *runtimeInterfaces.SchedulerConfig, identifier core.Identifier, schedule *admin.Schedule) (interfaces.AddScheduleInput, error) { +func (s *EventScheduler) CreateScheduleInput(ctx context.Context, appConfig *runtimeInterfaces.SchedulerConfig, identifier *core.Identifier, schedule *admin.Schedule) (interfaces.AddScheduleInput, error) { panic("implement me") } diff --git a/flyteadmin/pkg/common/flyte_url.go b/flyteadmin/pkg/common/flyte_url.go index 49ba984cd5..f5245ac238 100644 --- a/flyteadmin/pkg/common/flyte_url.go +++ b/flyteadmin/pkg/common/flyte_url.go @@ -125,7 +125,7 @@ func ParseFlyteURLToExecution(flyteURL string) (ParsedExecution, error) { } -func FlyteURLsFromNodeExecutionID(nodeExecutionID core.NodeExecutionIdentifier, deck bool) *admin.FlyteURLs { +func FlyteURLsFromNodeExecutionID(nodeExecutionID *core.NodeExecutionIdentifier, deck bool) *admin.FlyteURLs { base := fmt.Sprintf("flyte://v1/%s/%s/%s/%s", nodeExecutionID.ExecutionId.Project, nodeExecutionID.ExecutionId.Domain, nodeExecutionID.ExecutionId.Name, nodeExecutionID.NodeId) @@ -142,7 +142,7 @@ func FlyteURLsFromNodeExecutionID(nodeExecutionID core.NodeExecutionIdentifier, // FlyteURLKeyFromNodeExecutionID is a modified version of the function above. // This constructs a fully unique prefix, and when post-pended with the output name, forms a fully unique name for // the artifact service (including the project/domain of course, which the artifact service will add). -func FlyteURLKeyFromNodeExecutionID(nodeExecutionID core.NodeExecutionIdentifier) string { +func FlyteURLKeyFromNodeExecutionID(nodeExecutionID *core.NodeExecutionIdentifier) string { res := fmt.Sprintf("%s/%s", nodeExecutionID.ExecutionId.Name, nodeExecutionID.NodeId) return res @@ -150,13 +150,13 @@ func FlyteURLKeyFromNodeExecutionID(nodeExecutionID core.NodeExecutionIdentifier // FlyteURLKeyFromNodeExecutionIDRetry is a modified version of the function above. // See the uniqueness comment above. -func FlyteURLKeyFromNodeExecutionIDRetry(nodeExecutionID core.NodeExecutionIdentifier, retry int) string { +func FlyteURLKeyFromNodeExecutionIDRetry(nodeExecutionID *core.NodeExecutionIdentifier, retry int) string { res := fmt.Sprintf("%s/%s/%s", nodeExecutionID.ExecutionId.Name, nodeExecutionID.NodeId, strconv.Itoa(retry)) return res } -func FlyteURLsFromTaskExecutionID(taskExecutionID core.TaskExecutionIdentifier, deck bool) *admin.FlyteURLs { +func FlyteURLsFromTaskExecutionID(taskExecutionID *core.TaskExecutionIdentifier, deck bool) *admin.FlyteURLs { base := fmt.Sprintf("flyte://v1/%s/%s/%s/%s/%s", taskExecutionID.NodeExecutionId.ExecutionId.Project, taskExecutionID.NodeExecutionId.ExecutionId.Domain, taskExecutionID.NodeExecutionId.ExecutionId.Name, taskExecutionID.NodeExecutionId.NodeId, strconv.Itoa(int(taskExecutionID.RetryAttempt))) diff --git a/flyteadmin/pkg/common/flyte_url_test.go b/flyteadmin/pkg/common/flyte_url_test.go index a3d3141459..a0cbfcda2b 100644 --- a/flyteadmin/pkg/common/flyte_url_test.go +++ b/flyteadmin/pkg/common/flyte_url_test.go @@ -10,7 +10,7 @@ import ( func TestFlyteURLsFromNodeExecutionID(t *testing.T) { t.Run("with deck", func(t *testing.T) { - ne := core.NodeExecutionIdentifier{ + ne := &core.NodeExecutionIdentifier{ NodeId: "n0-dn0-n1", ExecutionId: &core.WorkflowExecutionIdentifier{ Project: "fs", @@ -25,7 +25,7 @@ func TestFlyteURLsFromNodeExecutionID(t *testing.T) { }) t.Run("without deck", func(t *testing.T) { - ne := core.NodeExecutionIdentifier{ + ne := &core.NodeExecutionIdentifier{ NodeId: "n0-dn0-n1", ExecutionId: &core.WorkflowExecutionIdentifier{ Project: "fs", @@ -42,7 +42,7 @@ func TestFlyteURLsFromNodeExecutionID(t *testing.T) { func TestFlyteURLsFromTaskExecutionID(t *testing.T) { t.Run("with deck", func(t *testing.T) { - te := core.TaskExecutionIdentifier{ + te := &core.TaskExecutionIdentifier{ TaskId: &core.Identifier{ ResourceType: core.ResourceType_TASK, Project: "fs", @@ -67,7 +67,7 @@ func TestFlyteURLsFromTaskExecutionID(t *testing.T) { }) t.Run("without deck", func(t *testing.T) { - te := core.TaskExecutionIdentifier{ + te := &core.TaskExecutionIdentifier{ TaskId: &core.Identifier{ ResourceType: core.ResourceType_TASK, Project: "fs", diff --git a/flyteadmin/pkg/data/implementations/aws_remote_url.go b/flyteadmin/pkg/data/implementations/aws_remote_url.go index aa71309dea..db8af08d49 100644 --- a/flyteadmin/pkg/data/implementations/aws_remote_url.go +++ b/flyteadmin/pkg/data/implementations/aws_remote_url.go @@ -52,12 +52,12 @@ func (a *AWSRemoteURL) splitURI(ctx context.Context, uri string) (AWSS3Object, e }, nil } -func (a *AWSRemoteURL) Get(ctx context.Context, uri string) (admin.UrlBlob, error) { +func (a *AWSRemoteURL) Get(ctx context.Context, uri string) (*admin.UrlBlob, error) { logger.Debugf(ctx, "Getting signed url for - %s", uri) s3URI, err := a.splitURI(ctx, uri) if err != nil { logger.Debugf(ctx, "failed to extract s3 bucket and key from uri: %s", uri) - return admin.UrlBlob{}, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid uri: %s", uri) + return &admin.UrlBlob{}, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid uri: %s", uri) } // First, get the size of the url blob. headResult, err := a.s3Client.HeadObject(&s3.HeadObjectInput{ @@ -66,7 +66,7 @@ func (a *AWSRemoteURL) Get(ctx context.Context, uri string) (admin.UrlBlob, erro }) if err != nil { logger.Debugf(ctx, "failed to get object size for %s with %v", uri, err) - return admin.UrlBlob{}, errors.NewFlyteAdminErrorf( + return &admin.UrlBlob{}, errors.NewFlyteAdminErrorf( codes.Internal, "failed to get object size for %s with %v", uri, err) } @@ -79,14 +79,14 @@ func (a *AWSRemoteURL) Get(ctx context.Context, uri string) (admin.UrlBlob, erro if err != nil { logger.Warning(ctx, "failed to presign url for uri [%s] for %v with err %v", uri, a.presignDuration, err) - return admin.UrlBlob{}, errors.NewFlyteAdminErrorf(codes.Internal, + return &admin.UrlBlob{}, errors.NewFlyteAdminErrorf(codes.Internal, "failed to presign url for uri [%s] for %v with err %v", uri, a.presignDuration, err) } var contentLength int64 if headResult.ContentLength != nil { contentLength = *headResult.ContentLength } - return admin.UrlBlob{ + return &admin.UrlBlob{ Url: urlStr, Bytes: contentLength, }, nil diff --git a/flyteadmin/pkg/data/implementations/gcp_remote_url.go b/flyteadmin/pkg/data/implementations/gcp_remote_url.go index 79bd8b29a9..3a8dc98679 100644 --- a/flyteadmin/pkg/data/implementations/gcp_remote_url.go +++ b/flyteadmin/pkg/data/implementations/gcp_remote_url.go @@ -118,19 +118,19 @@ func (g *GCPRemoteURL) signURL(ctx context.Context, gcsURI GCPGCSObject) (string return gcs.SignedURL(gcsURI.bucket, gcsURI.object, opts) } -func (g *GCPRemoteURL) Get(ctx context.Context, uri string) (admin.UrlBlob, error) { +func (g *GCPRemoteURL) Get(ctx context.Context, uri string) (*admin.UrlBlob, error) { logger.Debugf(ctx, "Getting signed url for - %s", uri) gcsURI, err := g.splitURI(ctx, uri) if err != nil { logger.Debugf(ctx, "failed to extract gcs bucket and object from uri: %s", uri) - return admin.UrlBlob{}, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid uri: %s", uri) + return &admin.UrlBlob{}, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid uri: %s", uri) } // First, get the size of the url blob. attrs, err := g.gcsClient.Bucket(gcsURI.bucket).Object(gcsURI.object).Attrs(ctx) if err != nil { logger.Debugf(ctx, "failed to get object size for %s with %v", uri, err) - return admin.UrlBlob{}, errors.NewFlyteAdminErrorf( + return &admin.UrlBlob{}, errors.NewFlyteAdminErrorf( codes.Internal, "failed to get object size for %s with %v", uri, err) } @@ -138,10 +138,10 @@ func (g *GCPRemoteURL) Get(ctx context.Context, uri string) (admin.UrlBlob, erro if err != nil { logger.Warning(ctx, "failed to presign url for uri [%s] for %v with err %v", uri, g.signDuration, err) - return admin.UrlBlob{}, errors.NewFlyteAdminErrorf(codes.Internal, + return &admin.UrlBlob{}, errors.NewFlyteAdminErrorf(codes.Internal, "failed to presign url for uri [%s] for %v with err %v", uri, g.signDuration, err) } - return admin.UrlBlob{ + return &admin.UrlBlob{ Url: urlStr, Bytes: attrs.Size, }, nil diff --git a/flyteadmin/pkg/data/implementations/noop_remote_url.go b/flyteadmin/pkg/data/implementations/noop_remote_url.go index 9e623550de..520b2adf5e 100644 --- a/flyteadmin/pkg/data/implementations/noop_remote_url.go +++ b/flyteadmin/pkg/data/implementations/noop_remote_url.go @@ -16,13 +16,13 @@ type NoopRemoteURL struct { remoteDataStoreClient storage.DataStore } -func (n *NoopRemoteURL) Get(ctx context.Context, uri string) (admin.UrlBlob, error) { +func (n *NoopRemoteURL) Get(ctx context.Context, uri string) (*admin.UrlBlob, error) { metadata, err := n.remoteDataStoreClient.Head(ctx, storage.DataReference(uri)) if err != nil { - return admin.UrlBlob{}, errors.NewFlyteAdminErrorf(codes.Internal, + return &admin.UrlBlob{}, errors.NewFlyteAdminErrorf(codes.Internal, "failed to get metadata for uri: %s with err: %v", uri, err) } - return admin.UrlBlob{ + return &admin.UrlBlob{ Url: uri, Bytes: metadata.Size(), }, nil diff --git a/flyteadmin/pkg/data/interfaces/remote.go b/flyteadmin/pkg/data/interfaces/remote.go index 049c9a0465..a027162ad9 100644 --- a/flyteadmin/pkg/data/interfaces/remote.go +++ b/flyteadmin/pkg/data/interfaces/remote.go @@ -9,5 +9,5 @@ import ( // Defines an interface for fetching pre-signed URLs. type RemoteURLInterface interface { // TODO: Refactor for URI to be of type DataReference. We should package a FromString-like function in flytestdlib - Get(ctx context.Context, uri string) (admin.UrlBlob, error) + Get(ctx context.Context, uri string) (*admin.UrlBlob, error) } diff --git a/flyteadmin/pkg/data/mocks/remote.go b/flyteadmin/pkg/data/mocks/remote.go index 3f7b0f64e0..25e08ab1ca 100644 --- a/flyteadmin/pkg/data/mocks/remote.go +++ b/flyteadmin/pkg/data/mocks/remote.go @@ -9,14 +9,14 @@ import ( // Mock implementation of a RemoteURLInterface type MockRemoteURL struct { - GetCallback func(ctx context.Context, uri string) (admin.UrlBlob, error) + GetCallback func(ctx context.Context, uri string) (*admin.UrlBlob, error) } -func (m *MockRemoteURL) Get(ctx context.Context, uri string) (admin.UrlBlob, error) { +func (m *MockRemoteURL) Get(ctx context.Context, uri string) (*admin.UrlBlob, error) { if m.GetCallback != nil { return m.GetCallback(ctx, uri) } - return admin.UrlBlob{}, nil + return &admin.UrlBlob{}, nil } func NewMockRemoteURL() interfaces.RemoteURLInterface { diff --git a/flyteadmin/pkg/manager/impl/description_entity_manager.go b/flyteadmin/pkg/manager/impl/description_entity_manager.go index 4e0e070ad8..a7affd5e88 100644 --- a/flyteadmin/pkg/manager/impl/description_entity_manager.go +++ b/flyteadmin/pkg/manager/impl/description_entity_manager.go @@ -32,17 +32,17 @@ type DescriptionEntityManager struct { metrics DescriptionEntityMetrics } -func (d *DescriptionEntityManager) GetDescriptionEntity(ctx context.Context, request admin.ObjectGetRequest) ( +func (d *DescriptionEntityManager) GetDescriptionEntity(ctx context.Context, request *admin.ObjectGetRequest) ( *admin.DescriptionEntity, error) { if err := validation.ValidateDescriptionEntityGetRequest(request); err != nil { logger.Errorf(ctx, "invalid request [%+v]: %v", request, err) return nil, err } ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain) - return util.GetDescriptionEntity(ctx, d.db, *request.Id) + return util.GetDescriptionEntity(ctx, d.db, request.Id) } -func (d *DescriptionEntityManager) ListDescriptionEntity(ctx context.Context, request admin.DescriptionEntityListRequest) (*admin.DescriptionEntityList, error) { +func (d *DescriptionEntityManager) ListDescriptionEntity(ctx context.Context, request *admin.DescriptionEntityListRequest) (*admin.DescriptionEntityList, error) { // Check required fields if err := validation.ValidateDescriptionEntityListRequest(request); err != nil { return nil, err diff --git a/flyteadmin/pkg/manager/impl/description_entity_manager_test.go b/flyteadmin/pkg/manager/impl/description_entity_manager_test.go index 33cab0350d..dbcab8bdb1 100644 --- a/flyteadmin/pkg/manager/impl/description_entity_manager_test.go +++ b/flyteadmin/pkg/manager/impl/description_entity_manager_test.go @@ -46,13 +46,13 @@ func TestDescriptionEntityManager_Get(t *testing.T) { repository := getMockRepositoryForDETest() manager := NewDescriptionEntityManager(repository, getMockConfigForDETest(), mockScope.NewTestScope()) - response, err := manager.GetDescriptionEntity(context.Background(), admin.ObjectGetRequest{ + response, err := manager.GetDescriptionEntity(context.Background(), &admin.ObjectGetRequest{ Id: &descriptionEntityIdentifier, }) assert.NoError(t, err) assert.NotNil(t, response) - response, err = manager.GetDescriptionEntity(context.Background(), admin.ObjectGetRequest{ + response, err = manager.GetDescriptionEntity(context.Background(), &admin.ObjectGetRequest{ Id: &badDescriptionEntityIdentifier, }) assert.Error(t, err) @@ -64,7 +64,7 @@ func TestDescriptionEntityManager_List(t *testing.T) { manager := NewDescriptionEntityManager(repository, getMockConfigForDETest(), mockScope.NewTestScope()) t.Run("failed to validate a request", func(t *testing.T) { - response, err := manager.ListDescriptionEntity(context.Background(), admin.DescriptionEntityListRequest{ + response, err := manager.ListDescriptionEntity(context.Background(), &admin.DescriptionEntityListRequest{ Id: &admin.NamedEntityIdentifier{ Name: "flyte", }, @@ -74,7 +74,7 @@ func TestDescriptionEntityManager_List(t *testing.T) { }) t.Run("failed to sort description entity", func(t *testing.T) { - response, err := manager.ListDescriptionEntity(context.Background(), admin.DescriptionEntityListRequest{ + response, err := manager.ListDescriptionEntity(context.Background(), &admin.DescriptionEntityListRequest{ ResourceType: core.ResourceType_TASK, Id: &admin.NamedEntityIdentifier{ Name: "flyte", @@ -89,7 +89,7 @@ func TestDescriptionEntityManager_List(t *testing.T) { }) t.Run("failed to validate token", func(t *testing.T) { - response, err := manager.ListDescriptionEntity(context.Background(), admin.DescriptionEntityListRequest{ + response, err := manager.ListDescriptionEntity(context.Background(), &admin.DescriptionEntityListRequest{ ResourceType: core.ResourceType_TASK, Id: &admin.NamedEntityIdentifier{ Name: "flyte", @@ -104,7 +104,7 @@ func TestDescriptionEntityManager_List(t *testing.T) { }) t.Run("list description entities in the task", func(t *testing.T) { - response, err := manager.ListDescriptionEntity(context.Background(), admin.DescriptionEntityListRequest{ + response, err := manager.ListDescriptionEntity(context.Background(), &admin.DescriptionEntityListRequest{ ResourceType: core.ResourceType_TASK, Id: &admin.NamedEntityIdentifier{ Name: "flyte", @@ -118,7 +118,7 @@ func TestDescriptionEntityManager_List(t *testing.T) { }) t.Run("list description entities in the workflow", func(t *testing.T) { - response, err := manager.ListDescriptionEntity(context.Background(), admin.DescriptionEntityListRequest{ + response, err := manager.ListDescriptionEntity(context.Background(), &admin.DescriptionEntityListRequest{ ResourceType: core.ResourceType_WORKFLOW, Id: &admin.NamedEntityIdentifier{ Name: "flyte", @@ -132,7 +132,7 @@ func TestDescriptionEntityManager_List(t *testing.T) { }) t.Run("failed to get filter", func(t *testing.T) { - response, err := manager.ListDescriptionEntity(context.Background(), admin.DescriptionEntityListRequest{ + response, err := manager.ListDescriptionEntity(context.Background(), &admin.DescriptionEntityListRequest{ ResourceType: core.ResourceType_WORKFLOW, Id: &admin.NamedEntityIdentifier{ Name: "flyte", diff --git a/flyteadmin/pkg/manager/impl/execution_manager.go b/flyteadmin/pkg/manager/impl/execution_manager.go index 13521cedbb..6ae9a61a52 100644 --- a/flyteadmin/pkg/manager/impl/execution_manager.go +++ b/flyteadmin/pkg/manager/impl/execution_manager.go @@ -105,7 +105,7 @@ func getUser(ctx context.Context) string { } func (m *ExecutionManager) populateExecutionQueue( - ctx context.Context, identifier core.Identifier, compiledWorkflow *core.CompiledWorkflowClosure) { + ctx context.Context, identifier *core.Identifier, compiledWorkflow *core.CompiledWorkflowClosure) { queueConfig := m.queueAllocator.GetQueue(ctx, identifier) for _, task := range compiledWorkflow.Tasks { container := task.Template.GetContainer() @@ -287,7 +287,7 @@ func (m *ExecutionManager) getInheritedExecMetadata(ctx context.Context, request parentNodeExecutionID = parentNodeExecutionModel.ID - sourceExecutionModel, err := util.GetExecutionModel(ctx, m.db, *requestSpec.Metadata.ParentNodeExecution.ExecutionId) + sourceExecutionModel, err := util.GetExecutionModel(ctx, m.db, requestSpec.Metadata.ParentNodeExecution.ExecutionId) if err != nil { logger.Errorf(ctx, "Failed to get workflow execution [%+v] that launched this execution [%+v] with error %v", requestSpec.Metadata.ParentNodeExecution, workflowExecutionID, err) @@ -321,7 +321,7 @@ func (m *ExecutionManager) getInheritedExecMetadata(ctx context.Context, request func (m *ExecutionManager) getExecutionConfig(ctx context.Context, request *admin.ExecutionCreateRequest, launchPlan *admin.LaunchPlan) (*admin.WorkflowExecutionConfig, error) { - workflowExecConfig := admin.WorkflowExecutionConfig{} + workflowExecConfig := &admin.WorkflowExecutionConfig{} // Merge the request spec into workflowExecConfig workflowExecConfig = util.MergeIntoExecConfig(workflowExecConfig, request.Spec) @@ -399,7 +399,7 @@ func (m *ExecutionManager) getExecutionConfig(ctx context.Context, request *admi logger.Infof(ctx, "getting the workflow execution config from application configuration") // Defaults to one from the application config - return &workflowExecConfig, nil + return workflowExecConfig, nil } func (m *ExecutionManager) getClusterAssignment(ctx context.Context, request *admin.ExecutionCreateRequest) ( @@ -428,7 +428,7 @@ func (m *ExecutionManager) getClusterAssignment(ctx context.Context, request *ad } func (m *ExecutionManager) launchSingleTaskExecution( - ctx context.Context, request admin.ExecutionCreateRequest, requestedAt time.Time) ( + ctx context.Context, request *admin.ExecutionCreateRequest, requestedAt time.Time) ( context.Context, *models.Execution, error) { taskModel, err := m.db.TaskRepo().Get(ctx, repositoryInterfaces.Identifier{ @@ -483,12 +483,12 @@ func (m *ExecutionManager) launchSingleTaskExecution( } name := util.GetExecutionName(request) - workflowExecutionID := core.WorkflowExecutionIdentifier{ + workflowExecutionID := &core.WorkflowExecutionIdentifier{ Project: request.Project, Domain: request.Domain, Name: name, } - ctx = getExecutionContext(ctx, &workflowExecutionID) + ctx = getExecutionContext(ctx, workflowExecutionID) namespace := common.GetNamespaceName( m.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), workflowExecutionID.Project, workflowExecutionID.Domain) @@ -501,7 +501,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( // Get the node execution (if any) that launched this execution var parentNodeExecutionID uint var sourceExecutionID uint - parentNodeExecutionID, sourceExecutionID, err = m.getInheritedExecMetadata(ctx, requestSpec, &workflowExecutionID) + parentNodeExecutionID, sourceExecutionID, err = m.getInheritedExecMetadata(ctx, requestSpec, workflowExecutionID) if err != nil { return nil, nil, err } @@ -513,7 +513,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( } // Dynamically assign execution queues. - m.populateExecutionQueue(ctx, *workflow.Id, workflow.Closure.CompiledWorkflow) + m.populateExecutionQueue(ctx, workflow.Id, workflow.Closure.CompiledWorkflow) inputsURI, err := common.OffloadLiteralMap(ctx, m.storageClient, request.Inputs, workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.Inputs) if err != nil { @@ -523,7 +523,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( if err != nil { return nil, nil, err } - executionConfig, err := m.getExecutionConfig(ctx, &request, nil) + executionConfig, err := m.getExecutionConfig(ctx, request, nil) if err != nil { return nil, nil, err } @@ -548,7 +548,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( rawOutputDataConfig = executionConfig.RawOutputDataConfig } - clusterAssignment, err := m.getClusterAssignment(ctx, &request) + clusterAssignment, err := m.getClusterAssignment(ctx, request) if err != nil { return nil, nil, err } @@ -571,7 +571,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( ExecutionClusterLabel: executionClusterLabel, } - overrides, err := m.addPluginOverrides(ctx, &workflowExecutionID, workflowExecutionID.Name, "") + overrides, err := m.addPluginOverrides(ctx, workflowExecutionID, workflowExecutionID.Name, "") if err != nil { return nil, nil, err } @@ -586,7 +586,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( workflowExecutor := plugins.Get[workflowengineInterfaces.WorkflowExecutor](m.pluginRegistry, plugins.PluginIDWorkflowExecutor) execInfo, err := workflowExecutor.Execute(ctx, workflowengineInterfaces.ExecutionData{ Namespace: namespace, - ExecutionID: &workflowExecutionID, + ExecutionID: workflowExecutionID, ReferenceWorkflowName: workflow.Id.Name, ReferenceLaunchPlanName: launchPlan.Id.Name, WorkflowClosure: workflow.Closure.CompiledWorkflow, @@ -598,7 +598,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( if err != nil { m.systemMetrics.PropellerFailures.Inc() logger.Infof(ctx, "Failed to execute workflow %+v with execution id %+v and inputs %+v with err %v", - request, workflowExecutionID, request.Inputs, err) + request, &workflowExecutionID, request.Inputs, err) return nil, nil, err } executionCreatedAt := time.Now() @@ -692,7 +692,7 @@ func resolveSecurityCtx(ctx context.Context, executionConfigSecurityCtx *core.Se // getStringFromInput should be called when a tag or partition value is a binding to an input. the input is looked up // from the input map and the binding, and an error is returned if the input key is not in the map. -func (m *ExecutionManager) getStringFromInput(ctx context.Context, inputBinding core.InputBindingData, inputs map[string]*core.Literal) (string, error) { +func (m *ExecutionManager) getStringFromInput(ctx context.Context, inputBinding *core.InputBindingData, inputs map[string]*core.Literal) (string, error) { inputName := inputBinding.GetVar() if inputName == "" { @@ -732,7 +732,7 @@ func (m *ExecutionManager) getLabelValue(ctx context.Context, l *core.LabelValue return "", errors.NewFlyteAdminErrorf(codes.InvalidArgument, "label value is nil") } if l.GetInputBinding() != nil { - return m.getStringFromInput(ctx, *l.GetInputBinding(), inputs) + return m.getStringFromInput(ctx, l.GetInputBinding(), inputs) } if l.GetStaticValue() != "" { return l.GetStaticValue(), nil @@ -740,7 +740,7 @@ func (m *ExecutionManager) getLabelValue(ctx context.Context, l *core.LabelValue return "", errors.NewFlyteAdminErrorf(codes.InvalidArgument, "label value is empty") } -func (m *ExecutionManager) fillInTemplateArgs(ctx context.Context, query core.ArtifactQuery, inputs map[string]*core.Literal) (core.ArtifactQuery, error) { +func (m *ExecutionManager) fillInTemplateArgs(ctx context.Context, query *core.ArtifactQuery, inputs map[string]*core.Literal) (*core.ArtifactQuery, error) { if query.GetUri() != "" { // If a query string, then just pass it through, nothing to fill in. return query, nil @@ -805,7 +805,7 @@ func (m *ExecutionManager) fillInTemplateArgs(ctx context.Context, query core.Ar } } - return core.ArtifactQuery{ + return &core.ArtifactQuery{ Identifier: &core.ArtifactQuery_ArtifactId{ ArtifactId: &core.ArtifactID{ ArtifactKey: &core.ArtifactKey{ @@ -825,7 +825,7 @@ func (m *ExecutionManager) fillInTemplateArgs(ctx context.Context, query core.Ar } func (m *ExecutionManager) launchExecutionAndPrepareModel( - ctx context.Context, request admin.ExecutionCreateRequest, requestedAt time.Time) ( + ctx context.Context, request *admin.ExecutionCreateRequest, requestedAt time.Time) ( context.Context, *models.Execution, []*models.ExecutionTag, error) { err := validation.ValidateExecutionRequest(ctx, request, m.db, m.config.ApplicationConfiguration()) @@ -840,7 +840,7 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( return ctx, model, nil, err } - launchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, *request.Spec.LaunchPlan) + launchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.Spec.LaunchPlan) if err != nil { logger.Debugf(ctx, "Failed to get launch plan model for ExecutionCreateRequest %+v with err %v", request, err) return nil, nil, nil, err @@ -870,7 +870,7 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( return nil, nil, nil, err } - workflowModel, err := util.GetWorkflowModel(ctx, m.db, *launchPlan.Spec.WorkflowId) + workflowModel, err := util.GetWorkflowModel(ctx, m.db, launchPlan.Spec.WorkflowId) if err != nil { logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.Spec.WorkflowId, err) return nil, nil, nil, err @@ -889,12 +889,12 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( workflow.Closure = closure name := util.GetExecutionName(request) - workflowExecutionID := core.WorkflowExecutionIdentifier{ + workflowExecutionID := &core.WorkflowExecutionIdentifier{ Project: request.Project, Domain: request.Domain, Name: name, } - ctx = getExecutionContext(ctx, &workflowExecutionID) + ctx = getExecutionContext(ctx, workflowExecutionID) var requestSpec = request.Spec if requestSpec.Metadata == nil { requestSpec.Metadata = &admin.ExecutionMetadata{} @@ -905,7 +905,7 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( // Get the node and parent execution (if any) that launched this execution var parentNodeExecutionID uint var sourceExecutionID uint - parentNodeExecutionID, sourceExecutionID, err = m.getInheritedExecMetadata(ctx, requestSpec, &workflowExecutionID) + parentNodeExecutionID, sourceExecutionID, err = m.getInheritedExecMetadata(ctx, requestSpec, workflowExecutionID) if err != nil { return nil, nil, nil, err } @@ -917,7 +917,7 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( } // Dynamically assign execution queues. - m.populateExecutionQueue(ctx, *workflow.Id, workflow.Closure.CompiledWorkflow) + m.populateExecutionQueue(ctx, workflow.Id, workflow.Closure.CompiledWorkflow) inputsURI, err := common.OffloadLiteralMap(ctx, m.storageClient, executionInputs, workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.Inputs) if err != nil { @@ -928,7 +928,7 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( return nil, nil, nil, err } - executionConfig, err := m.getExecutionConfig(ctx, &request, launchPlan) + executionConfig, err := m.getExecutionConfig(ctx, request, launchPlan) if err != nil { return nil, nil, nil, err } @@ -953,7 +953,7 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( rawOutputDataConfig = executionConfig.RawOutputDataConfig } - clusterAssignment, err := m.getClusterAssignment(ctx, &request) + clusterAssignment, err := m.getClusterAssignment(ctx, request) if err != nil { return nil, nil, nil, err } @@ -977,7 +977,7 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( ExecutionClusterLabel: executionClusterLabel, } - overrides, err := m.addPluginOverrides(ctx, &workflowExecutionID, launchPlan.GetSpec().WorkflowId.Name, launchPlan.Id.Name) + overrides, err := m.addPluginOverrides(ctx, workflowExecutionID, launchPlan.GetSpec().WorkflowId.Name, launchPlan.Id.Name) if err != nil { return nil, nil, nil, err } @@ -1027,7 +1027,7 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( workflowExecutor := plugins.Get[workflowengineInterfaces.WorkflowExecutor](m.pluginRegistry, plugins.PluginIDWorkflowExecutor) execInfo, execErr := workflowExecutor.Execute(ctx, workflowengineInterfaces.ExecutionData{ Namespace: namespace, - ExecutionID: &workflowExecutionID, + ExecutionID: workflowExecutionID, ReferenceWorkflowName: workflow.Id.Name, ReferenceLaunchPlanName: launchPlan.Id.Name, WorkflowClosure: workflow.Closure.CompiledWorkflow, @@ -1065,7 +1065,7 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( // Inserts an execution model into the database store and emits platform metrics. func (m *ExecutionManager) createExecutionModel( ctx context.Context, executionModel *models.Execution, executionTagModel []*models.ExecutionTag) (*core.WorkflowExecutionIdentifier, error) { - workflowExecutionIdentifier := core.WorkflowExecutionIdentifier{ + workflowExecutionIdentifier := &core.WorkflowExecutionIdentifier{ Project: executionModel.ExecutionKey.Project, Domain: executionModel.ExecutionKey.Domain, Name: executionModel.ExecutionKey.Name, @@ -1080,11 +1080,11 @@ func (m *ExecutionManager) createExecutionModel( m.systemMetrics.ExecutionsCreated.Inc() m.systemMetrics.SpecSizeBytes.Observe(float64(len(executionModel.Spec))) m.systemMetrics.ClosureSizeBytes.Observe(float64(len(executionModel.Closure))) - return &workflowExecutionIdentifier, nil + return workflowExecutionIdentifier, nil } func (m *ExecutionManager) CreateExecution( - ctx context.Context, request admin.ExecutionCreateRequest, requestedAt time.Time) ( + ctx context.Context, request *admin.ExecutionCreateRequest, requestedAt time.Time) ( *admin.ExecutionCreateResponse, error) { // Prior to flyteidl v0.15.0, Inputs was held in ExecutionSpec. Ensure older clients continue to work. @@ -1108,9 +1108,9 @@ func (m *ExecutionManager) CreateExecution( } func (m *ExecutionManager) RelaunchExecution( - ctx context.Context, request admin.ExecutionRelaunchRequest, requestedAt time.Time) ( + ctx context.Context, request *admin.ExecutionRelaunchRequest, requestedAt time.Time) ( *admin.ExecutionCreateResponse, error) { - existingExecutionModel, err := util.GetExecutionModel(ctx, m.db, *request.Id) + existingExecutionModel, err := util.GetExecutionModel(ctx, m.db, request.Id) if err != nil { logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err %v", request, err) return nil, err @@ -1144,7 +1144,7 @@ func (m *ExecutionManager) RelaunchExecution( executionSpec.OverwriteCache = request.GetOverwriteCache() var executionModel *models.Execution var executionTagModel []*models.ExecutionTag - ctx, executionModel, executionTagModel, err = m.launchExecutionAndPrepareModel(ctx, admin.ExecutionCreateRequest{ + ctx, executionModel, executionTagModel, err = m.launchExecutionAndPrepareModel(ctx, &admin.ExecutionCreateRequest{ Project: request.Id.Project, Domain: request.Id.Domain, Name: request.Name, @@ -1166,9 +1166,9 @@ func (m *ExecutionManager) RelaunchExecution( } func (m *ExecutionManager) RecoverExecution( - ctx context.Context, request admin.ExecutionRecoverRequest, requestedAt time.Time) ( + ctx context.Context, request *admin.ExecutionRecoverRequest, requestedAt time.Time) ( *admin.ExecutionCreateResponse, error) { - existingExecutionModel, err := util.GetExecutionModel(ctx, m.db, *request.Id) + existingExecutionModel, err := util.GetExecutionModel(ctx, m.db, request.Id) if err != nil { logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err %v", request, err) return nil, err @@ -1196,7 +1196,7 @@ func (m *ExecutionManager) RecoverExecution( executionSpec.Metadata.ReferenceExecution = existingExecution.Id var executionModel *models.Execution var executionTagModel []*models.ExecutionTag - ctx, executionModel, executionTagModel, err = m.launchExecutionAndPrepareModel(ctx, admin.ExecutionCreateRequest{ + ctx, executionModel, executionTagModel, err = m.launchExecutionAndPrepareModel(ctx, &admin.ExecutionCreateRequest{ Project: request.Id.Project, Domain: request.Id.Domain, Name: request.Name, @@ -1232,7 +1232,7 @@ func (m *ExecutionManager) emitScheduledWorkflowMetrics( "[%s/%s/%s]", executionModel.Project, executionModel.Domain, executionModel.Name) return } - launchPlan, err := util.GetLaunchPlan(context.Background(), m.db, *execution.Spec.LaunchPlan) + launchPlan, err := util.GetLaunchPlan(context.Background(), m.db, execution.Spec.LaunchPlan) if err != nil { logger.Warningf(context.Background(), "failed to find launch plan when emitting scheduled workflow execution stats with for "+ @@ -1345,7 +1345,7 @@ func (m *ExecutionManager) emitOverallWorkflowExecutionTime( watch.Observe(*executionModel.ExecutionCreatedAt, terminalEventTime) } -func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request admin.WorkflowExecutionEventRequest) ( +func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request *admin.WorkflowExecutionEventRequest) ( *admin.WorkflowExecutionEventResponse, error) { err := validation.ValidateCreateWorkflowEventRequest(request, m.config.ApplicationConfiguration().GetRemoteDataConfig().MaxSizeInBytes) if err != nil { @@ -1356,7 +1356,7 @@ func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request admi logger.Debugf(ctx, "Received workflow execution event for [%+v] transitioning to phase [%v]", request.Event.ExecutionId, request.Event.Phase) - executionModel, err := util.GetExecutionModel(ctx, m.db, *request.Event.ExecutionId) + executionModel, err := util.GetExecutionModel(ctx, m.db, request.Event.ExecutionId) if err != nil { logger.Debugf(ctx, "failed to find execution [%+v] for recorded event [%s]: %v", request.Event.ExecutionId, request.RequestId, err) @@ -1441,14 +1441,14 @@ func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request admi } } - if err := m.eventPublisher.Publish(ctx, proto.MessageName(&request), &request); err != nil { + if err := m.eventPublisher.Publish(ctx, proto.MessageName(request), request); err != nil { m.systemMetrics.PublishEventError.Inc() logger.Infof(ctx, "error publishing event [%+v] with err: [%v]", request.RequestId, err) } go func() { ceCtx := context.TODO() - if err := m.cloudEventPublisher.Publish(ceCtx, proto.MessageName(&request), &request); err != nil { + if err := m.cloudEventPublisher.Publish(ceCtx, proto.MessageName(request), request); err != nil { m.systemMetrics.PublishEventError.Inc() logger.Infof(ctx, "error publishing cloud event [%+v] with err: [%v]", request.RequestId, err) } @@ -1458,13 +1458,13 @@ func (m *ExecutionManager) CreateWorkflowEvent(ctx context.Context, request admi } func (m *ExecutionManager) GetExecution( - ctx context.Context, request admin.WorkflowExecutionGetRequest) (*admin.Execution, error) { + ctx context.Context, request *admin.WorkflowExecutionGetRequest) (*admin.Execution, error) { if err := validation.ValidateWorkflowExecutionIdentifier(request.Id); err != nil { logger.Debugf(ctx, "GetExecution request [%+v] failed validation with err: %v", request, err) return nil, err } ctx = getExecutionContext(ctx, request.Id) - executionModel, err := util.GetExecutionModel(ctx, m.db, *request.Id) + executionModel, err := util.GetExecutionModel(ctx, m.db, request.Id) if err != nil { logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err: %v", request, err) return nil, err @@ -1483,14 +1483,14 @@ func (m *ExecutionManager) GetExecution( return execution, nil } -func (m *ExecutionManager) UpdateExecution(ctx context.Context, request admin.ExecutionUpdateRequest, +func (m *ExecutionManager) UpdateExecution(ctx context.Context, request *admin.ExecutionUpdateRequest, requestedAt time.Time) (*admin.ExecutionUpdateResponse, error) { if err := validation.ValidateWorkflowExecutionIdentifier(request.Id); err != nil { logger.Debugf(ctx, "UpdateExecution request [%+v] failed validation with err: %v", request, err) return nil, err } ctx = getExecutionContext(ctx, request.Id) - executionModel, err := util.GetExecutionModel(ctx, m.db, *request.Id) + executionModel, err := util.GetExecutionModel(ctx, m.db, request.Id) if err != nil { logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err: %v", request, err) return nil, err @@ -1509,9 +1509,9 @@ func (m *ExecutionManager) UpdateExecution(ctx context.Context, request admin.Ex } func (m *ExecutionManager) GetExecutionData( - ctx context.Context, request admin.WorkflowExecutionGetDataRequest) (*admin.WorkflowExecutionGetDataResponse, error) { + ctx context.Context, request *admin.WorkflowExecutionGetDataRequest) (*admin.WorkflowExecutionGetDataResponse, error) { ctx = getExecutionContext(ctx, request.Id) - executionModel, err := util.GetExecutionModel(ctx, m.db, *request.Id) + executionModel, err := util.GetExecutionModel(ctx, m.db, request.Id) if err != nil { logger.Debugf(ctx, "Failed to get execution model for request [%+v] with err: %v", request, err) return nil, err @@ -1565,7 +1565,7 @@ func (m *ExecutionManager) GetExecutionData( } func (m *ExecutionManager) ListExecutions( - ctx context.Context, request admin.ResourceListRequest) (*admin.ExecutionList, error) { + ctx context.Context, request *admin.ResourceListRequest) (*admin.ExecutionList, error) { // Check required fields if err := validation.ValidateResourceListRequest(request); err != nil { logger.Debugf(ctx, "ListExecutions request [%+v] failed validation with err: %v", request, err) @@ -1642,7 +1642,7 @@ func (m *ExecutionManager) ListExecutions( // publishNotifications will only forward major errors because the assumption made is all of the objects // that are being manipulated have already been validated/manipulated by Flyte itself. // Note: This method should be refactored somewhere else once the interaction with pushing to SNS. -func (m *ExecutionManager) publishNotifications(ctx context.Context, request admin.WorkflowExecutionEventRequest, +func (m *ExecutionManager) publishNotifications(ctx context.Context, request *admin.WorkflowExecutionEventRequest, execution models.Execution) error { // Notifications are stored in the Spec object of an admin.Execution object. adminExecution, err := transformers.FromExecutionModel(ctx, execution, transformers.DefaultExecutionTransformerOptions) @@ -1670,7 +1670,7 @@ func (m *ExecutionManager) publishNotifications(ctx context.Context, request adm // Currently all three supported notifications use email underneath to send the notification. // Convert Slack and PagerDuty into an EmailNotification type. - var emailNotification admin.EmailNotification + emailNotification := &admin.EmailNotification{} if notification.GetEmail() != nil { emailNotification.RecipientsEmail = notification.GetEmail().GetRecipientsEmail() } else if notification.GetPagerDuty() != nil { @@ -1692,7 +1692,7 @@ func (m *ExecutionManager) publishNotifications(ctx context.Context, request adm *m.config.ApplicationConfiguration().GetNotificationsConfig(), emailNotification, request, adminExecution) // Errors seen while publishing a message are considered non-fatal to the method and will not result // in the method returning an error. - if err = m.notificationClient.Publish(ctx, proto.MessageName(&emailNotification), email); err != nil { + if err = m.notificationClient.Publish(ctx, proto.MessageName(emailNotification), email); err != nil { m.systemMetrics.PublishNotificationError.Inc() logger.Infof(ctx, "error publishing email notification [%+v] with err: [%v]", notification, err) } @@ -1701,7 +1701,7 @@ func (m *ExecutionManager) publishNotifications(ctx context.Context, request adm } func (m *ExecutionManager) TerminateExecution( - ctx context.Context, request admin.ExecutionTerminateRequest) (*admin.ExecutionTerminateResponse, error) { + ctx context.Context, request *admin.ExecutionTerminateRequest) (*admin.ExecutionTerminateResponse, error) { if err := validation.ValidateWorkflowExecutionIdentifier(request.Id); err != nil { logger.Debugf(ctx, "received terminate execution request: %v with invalid identifier: %v", request, err) return nil, err diff --git a/flyteadmin/pkg/manager/impl/execution_manager_test.go b/flyteadmin/pkg/manager/impl/execution_manager_test.go index 58e50c0444..1cf2713083 100644 --- a/flyteadmin/pkg/manager/impl/execution_manager_test.go +++ b/flyteadmin/pkg/manager/impl/execution_manager_test.go @@ -154,7 +154,7 @@ func getLegacyExecutionRequest() *admin.ExecutionCreateRequest { r := testutils.GetExecutionRequest() r.Spec.Inputs = r.Inputs r.Inputs = nil - return &r + return r } func getMockNamespaceMappingConfig() runtimeInterfaces.NamespaceMappingConfiguration { @@ -190,7 +190,7 @@ func setDefaultLpCallbackForExecTest(repository interfaces.Repository) { }, } - lpSpecBytes, _ := proto.Marshal(&lpSpec) + lpSpecBytes, _ := proto.Marshal(lpSpec) lpClosure := admin.LaunchPlanClosure{ ExpectedInputs: lpSpec.DefaultInputs, } @@ -358,7 +358,7 @@ func TestCreateExecution(t *testing.T) { r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &mockExecutor) qosProvider := &runtimeIFaceMocks.QualityOfServiceConfiguration{} - qosProvider.OnGetTierExecutionValues().Return(map[core.QualityOfService_Tier]core.QualityOfServiceSpec{ + qosProvider.OnGetTierExecutionValues().Return(map[core.QualityOfService_Tier]*core.QualityOfServiceSpec{ core.QualityOfService_HIGH: { QueueingBudget: ptypes.DurationProto(10 * time.Minute), }, @@ -405,7 +405,7 @@ func TestCreateExecutionFromWorkflowNode(t *testing.T) { repository := getMockRepositoryForExecTest() setDefaultLpCallbackForExecTest(repository) - parentNodeExecutionID := core.NodeExecutionIdentifier{ + parentNodeExecutionID := &core.NodeExecutionIdentifier{ ExecutionId: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -459,7 +459,7 @@ func TestCreateExecutionFromWorkflowNode(t *testing.T) { err := proto.Unmarshal(input.Spec, &spec) assert.NoError(t, err) assert.Equal(t, admin.ExecutionMetadata_CHILD_WORKFLOW, spec.Metadata.Mode) - assert.True(t, proto.Equal(&parentNodeExecutionID, spec.Metadata.ParentNodeExecution)) + assert.True(t, proto.Equal(parentNodeExecutionID, spec.Metadata.ParentNodeExecution)) assert.EqualValues(t, input.ParentNodeExecutionID, 1) assert.EqualValues(t, input.SourceExecutionID, 2) assert.Equal(t, 2, int(spec.Metadata.Nesting)) @@ -482,7 +482,7 @@ func TestCreateExecutionFromWorkflowNode(t *testing.T) { request := testutils.GetExecutionRequest() request.Spec.Metadata = &admin.ExecutionMetadata{ Mode: admin.ExecutionMetadata_CHILD_WORKFLOW, - ParentNodeExecution: &parentNodeExecutionID, + ParentNodeExecution: parentNodeExecutionID, } response, err := execManager.CreateExecution(context.Background(), request, requestedAt) assert.Nil(t, err) @@ -636,7 +636,7 @@ func TestCreateExecutionPropellerFailure(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &mockExecutor) qosProvider := &runtimeIFaceMocks.QualityOfServiceConfiguration{} - qosProvider.OnGetTierExecutionValues().Return(map[core.QualityOfService_Tier]core.QualityOfServiceSpec{ + qosProvider.OnGetTierExecutionValues().Return(map[core.QualityOfService_Tier]*core.QualityOfServiceSpec{ core.QualityOfService_HIGH: { QueueingBudget: ptypes.DurationProto(10 * time.Minute), }, @@ -863,7 +863,7 @@ func TestCreateExecutionNoNotifications(t *testing.T) { // CreateExecutionRequest. lpSpec := testutils.GetSampleLpSpecForTest() lpSpec.EntityMetadata.Notifications = nil - lpSpecBytes, _ := proto.Marshal(&lpSpec) + lpSpecBytes, _ := proto.Marshal(lpSpec) lpClosure := admin.LaunchPlanClosure{ ExpectedInputs: lpSpec.DefaultInputs, } @@ -1464,7 +1464,7 @@ func TestRelaunchExecution(t *testing.T) { repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) // Issue request. - response, err := execManager.RelaunchExecution(context.Background(), admin.ExecutionRelaunchRequest{ + response, err := execManager.RelaunchExecution(context.Background(), &admin.ExecutionRelaunchRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -1512,7 +1512,7 @@ func TestRelaunchExecution_GetExistingFailure(t *testing.T) { }) // Issue request. - _, err := execManager.RelaunchExecution(context.Background(), admin.ExecutionRelaunchRequest{ + _, err := execManager.RelaunchExecution(context.Background(), &admin.ExecutionRelaunchRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -1553,7 +1553,7 @@ func TestRelaunchExecution_CreateFailure(t *testing.T) { }) // Issue request. - _, err := execManager.RelaunchExecution(context.Background(), admin.ExecutionRelaunchRequest{ + _, err := execManager.RelaunchExecution(context.Background(), &admin.ExecutionRelaunchRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -1605,7 +1605,7 @@ func TestRelaunchExecutionInterruptibleOverride(t *testing.T) { } repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) - _, err := execManager.RelaunchExecution(context.Background(), admin.ExecutionRelaunchRequest{ + _, err := execManager.RelaunchExecution(context.Background(), &admin.ExecutionRelaunchRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -1656,7 +1656,7 @@ func TestRelaunchExecutionOverwriteCacheOverride(t *testing.T) { } repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) - asd, err := execManager.RelaunchExecution(context.Background(), admin.ExecutionRelaunchRequest{ + asd, err := execManager.RelaunchExecution(context.Background(), &admin.ExecutionRelaunchRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -1691,7 +1691,7 @@ func TestRelaunchExecutionOverwriteCacheOverride(t *testing.T) { } repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) - asd, err := execManager.RelaunchExecution(context.Background(), admin.ExecutionRelaunchRequest{ + asd, err := execManager.RelaunchExecution(context.Background(), &admin.ExecutionRelaunchRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -1726,7 +1726,7 @@ func TestRelaunchExecutionOverwriteCacheOverride(t *testing.T) { } repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) - asd, err := execManager.RelaunchExecution(context.Background(), admin.ExecutionRelaunchRequest{ + asd, err := execManager.RelaunchExecution(context.Background(), &admin.ExecutionRelaunchRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -1780,7 +1780,7 @@ func TestRelaunchExecutionEnvsOverride(t *testing.T) { } repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) - _, err := execManager.RelaunchExecution(context.Background(), admin.ExecutionRelaunchRequest{ + _, err := execManager.RelaunchExecution(context.Background(), &admin.ExecutionRelaunchRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -1829,7 +1829,7 @@ func TestRecoverExecution(t *testing.T) { repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) // Issue request. - response, err := execManager.RecoverExecution(context.Background(), admin.ExecutionRecoverRequest{ + response, err := execManager.RecoverExecution(context.Background(), &admin.ExecutionRecoverRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -1919,7 +1919,7 @@ func TestRecoverExecution_RecoveredChildNode(t *testing.T) { NodeId: "parent", } repository.NodeExecutionRepo().(*repositoryMocks.MockNodeExecutionRepo).SetGetCallback(func(ctx context.Context, input interfaces.NodeExecutionResource) (models.NodeExecution, error) { - assert.True(t, proto.Equal(&parentNodeExecution, &input.NodeExecutionIdentifier)) + assert.True(t, proto.Equal(&parentNodeExecution, input.NodeExecutionIdentifier)) return models.NodeExecution{ BaseModel: models.BaseModel{ @@ -1929,7 +1929,7 @@ func TestRecoverExecution_RecoveredChildNode(t *testing.T) { }) // Issue request. - response, err := execManager.RecoverExecution(context.Background(), admin.ExecutionRecoverRequest{ + response, err := execManager.RecoverExecution(context.Background(), &admin.ExecutionRecoverRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -1977,7 +1977,7 @@ func TestRecoverExecution_GetExistingFailure(t *testing.T) { }) // Issue request. - _, err := execManager.RecoverExecution(context.Background(), admin.ExecutionRecoverRequest{ + _, err := execManager.RecoverExecution(context.Background(), &admin.ExecutionRecoverRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -2016,7 +2016,7 @@ func TestRecoverExecution_GetExistingInputsFailure(t *testing.T) { repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetGetCallback(executionGetFunc) // Issue request. - _, err := execManager.RecoverExecution(context.Background(), admin.ExecutionRecoverRequest{ + _, err := execManager.RecoverExecution(context.Background(), &admin.ExecutionRecoverRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -2069,7 +2069,7 @@ func TestRecoverExecutionInterruptibleOverride(t *testing.T) { repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) // Issue request. - response, err := execManager.RecoverExecution(context.Background(), admin.ExecutionRecoverRequest{ + response, err := execManager.RecoverExecution(context.Background(), &admin.ExecutionRecoverRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -2130,7 +2130,7 @@ func TestRecoverExecutionOverwriteCacheOverride(t *testing.T) { repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) // Issue request. - response, err := execManager.RecoverExecution(context.Background(), admin.ExecutionRecoverRequest{ + response, err := execManager.RecoverExecution(context.Background(), &admin.ExecutionRecoverRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -2192,7 +2192,7 @@ func TestRecoverExecutionEnvsOverride(t *testing.T) { repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) // Issue request. - response, err := execManager.RecoverExecution(context.Background(), admin.ExecutionRecoverRequest{ + response, err := execManager.RecoverExecution(context.Background(), &admin.ExecutionRecoverRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -2259,7 +2259,7 @@ func TestCreateWorkflowEvent(t *testing.T) { return nil } repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetUpdateCallback(updateExecutionFunc) - request := admin.WorkflowExecutionEventRequest{ + request := &admin.WorkflowExecutionEventRequest{ RequestId: "1", Event: &event.WorkflowExecutionEvent{ ExecutionId: &executionIdentifier, @@ -2307,7 +2307,7 @@ func TestCreateWorkflowEvent_TerminalState(t *testing.T) { r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - resp, err := execManager.CreateWorkflowEvent(context.Background(), admin.WorkflowExecutionEventRequest{ + resp, err := execManager.CreateWorkflowEvent(context.Background(), &admin.WorkflowExecutionEventRequest{ RequestId: "1", Event: &event.WorkflowExecutionEvent{ ExecutionId: &executionIdentifier, @@ -2347,7 +2347,7 @@ func TestCreateWorkflowEvent_NoRunningToQueued(t *testing.T) { r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - resp, err := execManager.CreateWorkflowEvent(context.Background(), admin.WorkflowExecutionEventRequest{ + resp, err := execManager.CreateWorkflowEvent(context.Background(), &admin.WorkflowExecutionEventRequest{ RequestId: "1", Event: &event.WorkflowExecutionEvent{ ExecutionId: &executionIdentifier, @@ -2380,7 +2380,7 @@ func TestCreateWorkflowEvent_CurrentlyAborting(t *testing.T) { } repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetUpdateCallback(updateExecutionFunc) - req := admin.WorkflowExecutionEventRequest{ + req := &admin.WorkflowExecutionEventRequest{ RequestId: "1", Event: &event.WorkflowExecutionEvent{ ExecutionId: &executionIdentifier, @@ -2421,7 +2421,7 @@ func TestCreateWorkflowEvent_StartedRunning(t *testing.T) { executionGetFunc := makeExecutionGetFunc(t, closureBytes, nil) repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetGetCallback(executionGetFunc) - closure := admin.ExecutionClosure{ + closure := &admin.ExecutionClosure{ Phase: core.WorkflowExecution_RUNNING, StartedAt: occurredAtProto, UpdatedAt: occurredAtProto, @@ -2430,7 +2430,7 @@ func TestCreateWorkflowEvent_StartedRunning(t *testing.T) { OccurredAt: testutils.MockCreatedAtProto, }, } - closureBytes, _ := proto.Marshal(&closure) + closureBytes, _ := proto.Marshal(closure) updateExecutionFunc := func( context context.Context, execution models.Execution) error { assert.Equal(t, "project", execution.Project) @@ -2448,7 +2448,7 @@ func TestCreateWorkflowEvent_StartedRunning(t *testing.T) { } repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetUpdateCallback(updateExecutionFunc) occurredAtTimestamp, _ := ptypes.TimestampProto(occurredAt) - request := admin.WorkflowExecutionEventRequest{ + request := &admin.WorkflowExecutionEventRequest{ RequestId: "1", Event: &event.WorkflowExecutionEvent{ ExecutionId: &executionIdentifier, @@ -2496,7 +2496,7 @@ func TestCreateWorkflowEvent_DuplicateRunning(t *testing.T) { r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) occurredAtTimestamp, _ := ptypes.TimestampProto(occurredAt) - resp, err := execManager.CreateWorkflowEvent(context.Background(), admin.WorkflowExecutionEventRequest{ + resp, err := execManager.CreateWorkflowEvent(context.Background(), &admin.WorkflowExecutionEventRequest{ RequestId: "1", Event: &event.WorkflowExecutionEvent{ ExecutionId: &executionIdentifier, @@ -2539,7 +2539,7 @@ func TestCreateWorkflowEvent_InvalidPhaseChange(t *testing.T) { r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) occurredAtTimestamp, _ := ptypes.TimestampProto(occurredAt) - resp, err := execManager.CreateWorkflowEvent(context.Background(), admin.WorkflowExecutionEventRequest{ + resp, err := execManager.CreateWorkflowEvent(context.Background(), &admin.WorkflowExecutionEventRequest{ RequestId: "1", Event: &event.WorkflowExecutionEvent{ ExecutionId: &executionIdentifier, @@ -2592,7 +2592,7 @@ func TestCreateWorkflowEvent_ClusterReassignmentOnQueued(t *testing.T) { occurredAtTimestamp, _ := ptypes.TimestampProto(occurredAt) mockDbEventWriter := &eventWriterMocks.WorkflowExecutionEventWriter{} - request := admin.WorkflowExecutionEventRequest{ + request := &admin.WorkflowExecutionEventRequest{ RequestId: "1", Event: &event.WorkflowExecutionEvent{ ExecutionId: &executionIdentifier, @@ -2628,7 +2628,7 @@ func TestCreateWorkflowEvent_InvalidEvent(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - resp, err := execManager.CreateWorkflowEvent(context.Background(), admin.WorkflowExecutionEventRequest{ + resp, err := execManager.CreateWorkflowEvent(context.Background(), &admin.WorkflowExecutionEventRequest{ RequestId: "1", Event: &event.WorkflowExecutionEvent{ ExecutionId: &executionIdentifier, @@ -2658,7 +2658,7 @@ func TestCreateWorkflowEvent_UpdateModelError(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - resp, err := execManager.CreateWorkflowEvent(context.Background(), admin.WorkflowExecutionEventRequest{ + resp, err := execManager.CreateWorkflowEvent(context.Background(), &admin.WorkflowExecutionEventRequest{ RequestId: "1", Event: &event.WorkflowExecutionEvent{ ExecutionId: &executionIdentifier, @@ -2693,7 +2693,7 @@ func TestCreateWorkflowEvent_DatabaseGetError(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - resp, err := execManager.CreateWorkflowEvent(context.Background(), admin.WorkflowExecutionEventRequest{ + resp, err := execManager.CreateWorkflowEvent(context.Background(), &admin.WorkflowExecutionEventRequest{ RequestId: "1", Event: &event.WorkflowExecutionEvent{ ExecutionId: &executionIdentifier, @@ -2729,7 +2729,7 @@ func TestCreateWorkflowEvent_DatabaseUpdateError(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - resp, err := execManager.CreateWorkflowEvent(context.Background(), admin.WorkflowExecutionEventRequest{ + resp, err := execManager.CreateWorkflowEvent(context.Background(), &admin.WorkflowExecutionEventRequest{ RequestId: "1", Event: &event.WorkflowExecutionEvent{ ExecutionId: &executionIdentifier, @@ -2775,7 +2775,7 @@ func TestCreateWorkflowEvent_IncompatibleCluster(t *testing.T) { r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) occurredAtTimestamp, _ := ptypes.TimestampProto(occurredAt) - resp, err := execManager.CreateWorkflowEvent(context.Background(), admin.WorkflowExecutionEventRequest{ + resp, err := execManager.CreateWorkflowEvent(context.Background(), &admin.WorkflowExecutionEventRequest{ RequestId: "1", Event: &event.WorkflowExecutionEvent{ ExecutionId: &executionIdentifier, @@ -2832,7 +2832,7 @@ func TestGetExecution(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - execution, err := execManager.GetExecution(context.Background(), admin.WorkflowExecutionGetRequest{ + execution, err := execManager.GetExecution(context.Background(), &admin.WorkflowExecutionGetRequest{ Id: &executionIdentifier, }) assert.NoError(t, err) @@ -2855,7 +2855,7 @@ func TestGetExecution_DatabaseError(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - execution, err := execManager.GetExecution(context.Background(), admin.WorkflowExecutionGetRequest{ + execution, err := execManager.GetExecution(context.Background(), &admin.WorkflowExecutionGetRequest{ Id: &executionIdentifier, }) assert.Nil(t, execution) @@ -2887,7 +2887,7 @@ func TestGetExecution_TransformerError(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - execution, err := execManager.GetExecution(context.Background(), admin.WorkflowExecutionGetRequest{ + execution, err := execManager.GetExecution(context.Background(), &admin.WorkflowExecutionGetRequest{ Id: &executionIdentifier, }) assert.Nil(t, execution) @@ -2900,7 +2900,7 @@ func TestUpdateExecution(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - _, err := execManager.UpdateExecution(context.Background(), admin.ExecutionUpdateRequest{ + _, err := execManager.UpdateExecution(context.Background(), &admin.ExecutionUpdateRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -2922,7 +2922,7 @@ func TestUpdateExecution(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - updateResponse, err := execManager.UpdateExecution(context.Background(), admin.ExecutionUpdateRequest{ + updateResponse, err := execManager.UpdateExecution(context.Background(), &admin.ExecutionUpdateRequest{ Id: &executionIdentifier, }, time.Now()) assert.NoError(t, err) @@ -2943,7 +2943,7 @@ func TestUpdateExecution(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - updateResponse, err := execManager.UpdateExecution(context.Background(), admin.ExecutionUpdateRequest{ + updateResponse, err := execManager.UpdateExecution(context.Background(), &admin.ExecutionUpdateRequest{ Id: &executionIdentifier, State: admin.ExecutionState_EXECUTION_ARCHIVED, }, time.Now()) @@ -2961,7 +2961,7 @@ func TestUpdateExecution(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - _, err := execManager.UpdateExecution(context.Background(), admin.ExecutionUpdateRequest{ + _, err := execManager.UpdateExecution(context.Background(), &admin.ExecutionUpdateRequest{ Id: &executionIdentifier, State: admin.ExecutionState_EXECUTION_ARCHIVED, }, time.Now()) @@ -2978,7 +2978,7 @@ func TestUpdateExecution(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - _, err := execManager.UpdateExecution(context.Background(), admin.ExecutionUpdateRequest{ + _, err := execManager.UpdateExecution(context.Background(), &admin.ExecutionUpdateRequest{ Id: &executionIdentifier, State: admin.ExecutionState_EXECUTION_ARCHIVED, }, time.Now()) @@ -3049,7 +3049,7 @@ func TestListExecutions(t *testing.T) { r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - executionList, err := execManager.ListExecutions(context.Background(), admin.ResourceListRequest{ + executionList, err := execManager.ListExecutions(context.Background(), &admin.ResourceListRequest{ Id: &admin.NamedEntityIdentifier{ Project: projectValue, Domain: domainValue, @@ -3081,7 +3081,7 @@ func TestListExecutions_MissingParameters(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repositoryMocks.NewMockRepository(), r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - _, err := execManager.ListExecutions(context.Background(), admin.ResourceListRequest{ + _, err := execManager.ListExecutions(context.Background(), &admin.ResourceListRequest{ Id: &admin.NamedEntityIdentifier{ Domain: domainValue, }, @@ -3090,7 +3090,7 @@ func TestListExecutions_MissingParameters(t *testing.T) { assert.Error(t, err) assert.Equal(t, codes.InvalidArgument, err.(flyteAdminErrors.FlyteAdminError).Code()) - _, err = execManager.ListExecutions(context.Background(), admin.ResourceListRequest{ + _, err = execManager.ListExecutions(context.Background(), &admin.ResourceListRequest{ Id: &admin.NamedEntityIdentifier{ Project: projectValue, }, @@ -3099,7 +3099,7 @@ func TestListExecutions_MissingParameters(t *testing.T) { assert.Error(t, err) assert.Equal(t, codes.InvalidArgument, err.(flyteAdminErrors.FlyteAdminError).Code()) - _, err = execManager.ListExecutions(context.Background(), admin.ResourceListRequest{ + _, err = execManager.ListExecutions(context.Background(), &admin.ResourceListRequest{ Id: &admin.NamedEntityIdentifier{ Project: projectValue, Domain: domainValue, @@ -3120,7 +3120,7 @@ func TestListExecutions_DatabaseError(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - _, err := execManager.ListExecutions(context.Background(), admin.ResourceListRequest{ + _, err := execManager.ListExecutions(context.Background(), &admin.ResourceListRequest{ Id: &admin.NamedEntityIdentifier{ Project: projectValue, Domain: domainValue, @@ -3154,7 +3154,7 @@ func TestListExecutions_TransformerError(t *testing.T) { r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - executionList, err := execManager.ListExecutions(context.Background(), admin.ResourceListRequest{ + executionList, err := execManager.ListExecutions(context.Background(), &admin.ResourceListRequest{ Id: &admin.NamedEntityIdentifier{ Project: projectValue, Domain: domainValue, @@ -3192,7 +3192,7 @@ func TestExecutionManager_PublishNotifications(t *testing.T) { } // Currently this doesn't do anything special as the code to invoke pushing to SNS isn't enabled yet. // This sets up the skeleton for it and appeases the go lint overlords. - workflowRequest := admin.WorkflowExecutionEventRequest{ + workflowRequest := &admin.WorkflowExecutionEventRequest{ Event: &event.WorkflowExecutionEvent{ Phase: core.WorkflowExecution_FAILED, OutputResult: &event.WorkflowExecutionEvent_Error{ @@ -3204,7 +3204,7 @@ func TestExecutionManager_PublishNotifications(t *testing.T) { ExecutionId: &executionIdentifier, }, } - var execClosure = admin.ExecutionClosure{ + var execClosure = &admin.ExecutionClosure{ Notifications: testutils.GetExecutionRequest().Spec.GetNotifications().Notifications, WorkflowId: &core.Identifier{ ResourceType: core.ResourceType_WORKFLOW, @@ -3244,7 +3244,7 @@ func TestExecutionManager_PublishNotifications(t *testing.T) { execClosure.Notifications = append(execClosure.Notifications, extraNotifications[0]) execClosure.Notifications = append(execClosure.Notifications, extraNotifications[1]) - execClosureBytes, _ := proto.Marshal(&execClosure) + execClosureBytes, _ := proto.Marshal(execClosure) executionModel := models.Execution{ ExecutionKey: models.ExecutionKey{ Project: "project", @@ -3273,7 +3273,7 @@ func TestExecutionManager_PublishNotificationsTransformError(t *testing.T) { notificationClient: &mockPublisher, } - workflowRequest := admin.WorkflowExecutionEventRequest{ + workflowRequest := &admin.WorkflowExecutionEventRequest{ Event: &event.WorkflowExecutionEvent{ Phase: core.WorkflowExecution_FAILED, OutputResult: &event.WorkflowExecutionEvent_Error{ @@ -3331,7 +3331,7 @@ func TestExecutionManager_TestExecutionManager_PublishNotificationsTransformErro } // Currently this doesn't do anything special as the code to invoke pushing to SNS isn't enabled yet. // This sets up the skeleton for it and appeases the go lint overlords. - workflowRequest := admin.WorkflowExecutionEventRequest{ + workflowRequest := &admin.WorkflowExecutionEventRequest{ Event: &event.WorkflowExecutionEvent{ Phase: core.WorkflowExecution_FAILED, OutputResult: &event.WorkflowExecutionEvent_Error{ @@ -3343,7 +3343,7 @@ func TestExecutionManager_TestExecutionManager_PublishNotificationsTransformErro ExecutionId: &executionIdentifier, }, } - var execClosure = admin.ExecutionClosure{ + var execClosure = &admin.ExecutionClosure{ Notifications: testutils.GetExecutionRequest().Spec.GetNotifications().Notifications, WorkflowId: &core.Identifier{ ResourceType: core.ResourceType_WORKFLOW, @@ -3353,7 +3353,7 @@ func TestExecutionManager_TestExecutionManager_PublishNotificationsTransformErro Version: "wf_version", }, } - execClosureBytes, _ := proto.Marshal(&execClosure) + execClosureBytes, _ := proto.Marshal(execClosure) executionModel := models.Execution{ ExecutionKey: models.ExecutionKey{ Project: "project", @@ -3385,7 +3385,7 @@ func TestExecutionManager_PublishNotificationsNoPhaseMatch(t *testing.T) { } // Currently this doesn't do anything special as the code to invoke pushing to SNS isn't enabled yet. // This sets up the skeleton for it and appeases the go lint overlords. - workflowRequest := admin.WorkflowExecutionEventRequest{ + workflowRequest := &admin.WorkflowExecutionEventRequest{ Event: &event.WorkflowExecutionEvent{ Phase: core.WorkflowExecution_SUCCEEDED, OutputResult: &event.WorkflowExecutionEvent_OutputUri{ @@ -3394,10 +3394,10 @@ func TestExecutionManager_PublishNotificationsNoPhaseMatch(t *testing.T) { ExecutionId: &executionIdentifier, }, } - var execClosure = admin.ExecutionClosure{ + var execClosure = &admin.ExecutionClosure{ Notifications: testutils.GetExecutionRequest().Spec.GetNotifications().Notifications, } - execClosureBytes, _ := proto.Marshal(&execClosure) + execClosureBytes, _ := proto.Marshal(execClosure) executionModel := models.Execution{ ExecutionKey: models.ExecutionKey{ Project: "project", @@ -3460,7 +3460,7 @@ func TestTerminateExecution(t *testing.T) { identity, err := auth.NewIdentityContext("", principal, "", time.Now(), sets.NewString(), nil, nil) assert.NoError(t, err) ctx := identity.WithContext(context.Background()) - resp, err := execManager.TerminateExecution(ctx, admin.ExecutionTerminateRequest{ + resp, err := execManager.TerminateExecution(ctx, &admin.ExecutionTerminateRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -3492,7 +3492,7 @@ func TestTerminateExecution_PropellerError(t *testing.T) { }) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - resp, err := execManager.TerminateExecution(context.Background(), admin.ExecutionTerminateRequest{ + resp, err := execManager.TerminateExecution(context.Background(), &admin.ExecutionTerminateRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -3523,7 +3523,7 @@ func TestTerminateExecution_DatabaseError(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &mockExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - resp, err := execManager.TerminateExecution(context.Background(), admin.ExecutionTerminateRequest{ + resp, err := execManager.TerminateExecution(context.Background(), &admin.ExecutionTerminateRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -3553,7 +3553,7 @@ func TestTerminateExecution_AlreadyTerminated(t *testing.T) { }, nil }) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - resp, err := execManager.TerminateExecution(context.Background(), admin.ExecutionTerminateRequest{ + resp, err := execManager.TerminateExecution(context.Background(), &admin.ExecutionTerminateRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -3601,20 +3601,20 @@ func TestGetExecutionData(t *testing.T) { } mockExecutionRemoteURL := dataMocks.NewMockRemoteURL() mockExecutionRemoteURL.(*dataMocks.MockRemoteURL).GetCallback = func( - ctx context.Context, uri string) (admin.UrlBlob, error) { + ctx context.Context, uri string) (*admin.UrlBlob, error) { if uri == outputURI { - return admin.UrlBlob{ + return &admin.UrlBlob{ Url: "outputs", Bytes: 200, }, nil } else if strings.HasSuffix(uri, shared.Inputs) { - return admin.UrlBlob{ + return &admin.UrlBlob{ Url: "inputs", Bytes: 200, }, nil } - return admin.UrlBlob{}, errors.New("unexpected input") + return &admin.UrlBlob{}, errors.New("unexpected input") } mockStorage := commonMocks.GetMockStorageClient() fullInputs := &core.LiteralMap{ @@ -3645,7 +3645,7 @@ func TestGetExecutionData(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), mockStorage, mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - dataResponse, err := execManager.GetExecutionData(context.Background(), admin.WorkflowExecutionGetDataRequest{ + dataResponse, err := execManager.GetExecutionData(context.Background(), &admin.WorkflowExecutionGetDataRequest{ Id: &executionIdentifier, }) assert.Nil(t, err) @@ -3777,7 +3777,7 @@ func TestGetExecution_Legacy(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - execution, err := execManager.GetExecution(context.Background(), admin.WorkflowExecutionGetRequest{ + execution, err := execManager.GetExecution(context.Background(), &admin.WorkflowExecutionGetRequest{ Id: &executionIdentifier, }) assert.NoError(t, err) @@ -3819,20 +3819,20 @@ func TestGetExecutionData_LegacyModel(t *testing.T) { } mockExecutionRemoteURL := dataMocks.NewMockRemoteURL() mockExecutionRemoteURL.(*dataMocks.MockRemoteURL).GetCallback = func( - ctx context.Context, uri string) (admin.UrlBlob, error) { + ctx context.Context, uri string) (*admin.UrlBlob, error) { if uri == outputURI { - return admin.UrlBlob{ + return &admin.UrlBlob{ Url: "outputs", Bytes: 200, }, nil } else if strings.HasSuffix(uri, shared.Inputs) { - return admin.UrlBlob{ + return &admin.UrlBlob{ Url: "inputs", Bytes: 200, }, nil } - return admin.UrlBlob{}, errors.New("unexpected input") + return &admin.UrlBlob{}, errors.New("unexpected input") } repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetGetCallback(executionGetFunc) @@ -3840,7 +3840,7 @@ func TestGetExecutionData_LegacyModel(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), storageClient, mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - dataResponse, err := execManager.GetExecutionData(context.Background(), admin.WorkflowExecutionGetDataRequest{ + dataResponse, err := execManager.GetExecutionData(context.Background(), &admin.WorkflowExecutionGetDataRequest{ Id: &executionIdentifier, }) assert.Nil(t, err) @@ -3888,7 +3888,7 @@ func TestCreateExecution_LegacyClient(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &mockExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - response, err := execManager.CreateExecution(context.Background(), *getLegacyExecutionRequest(), requestedAt) + response, err := execManager.CreateExecution(context.Background(), getLegacyExecutionRequest(), requestedAt) assert.Nil(t, err) expectedResponse := &admin.ExecutionCreateResponse{ @@ -3939,7 +3939,7 @@ func TestRelaunchExecution_LegacyModel(t *testing.T) { repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetCreateCallback(exCreateFunc) // Issue request. - response, err := execManager.RelaunchExecution(context.Background(), admin.ExecutionRelaunchRequest{ + response, err := execManager.RelaunchExecution(context.Background(), &admin.ExecutionRelaunchRequest{ Id: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", @@ -4031,7 +4031,7 @@ func TestListExecutions_LegacyModel(t *testing.T) { r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - executionList, err := execManager.ListExecutions(context.Background(), admin.ResourceListRequest{ + executionList, err := execManager.ListExecutions(context.Background(), &admin.ResourceListRequest{ Id: &admin.NamedEntityIdentifier{ Project: projectValue, Domain: domainValue, @@ -4494,7 +4494,7 @@ func TestCreateSingleTaskExecution(t *testing.T) { r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &mockExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), mockStorage, mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, workflowManager, namedEntityManager, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) - request := admin.ExecutionCreateRequest{ + request := &admin.ExecutionCreateRequest{ Project: "flytekit", Domain: "production", Name: "singletaskexec", @@ -4521,7 +4521,7 @@ func TestCreateSingleTaskExecution(t *testing.T) { } marshaller := jsonpb.Marshaler{} - _, ferr := marshaller.MarshalToString(&request) + _, ferr := marshaller.MarshalToString(request) assert.NoError(t, ferr) // test once to create an initial launchplan @@ -5757,7 +5757,7 @@ func TestQueryTemplate(t *testing.T) { } p := &core.Partitions{Value: pMap} - q := core.ArtifactQuery{ + q := &core.ArtifactQuery{ Identifier: &core.ArtifactQuery_ArtifactId{ ArtifactId: &core.ArtifactID{ ArtifactKey: ak, @@ -5769,7 +5769,7 @@ func TestQueryTemplate(t *testing.T) { filledQuery, err := m.fillInTemplateArgs(ctx, q, otherInputs.Literals) assert.NoError(t, err) - assert.True(t, proto.Equal(&q, &filledQuery)) + assert.True(t, proto.Equal(q, filledQuery)) }) t.Run("template date-times, both in explicit tp and not", func(t *testing.T) { @@ -5779,7 +5779,7 @@ func TestQueryTemplate(t *testing.T) { } p := &core.Partitions{Value: pMap} - q := core.ArtifactQuery{ + q := &core.ArtifactQuery{ Identifier: &core.ArtifactQuery_ArtifactId{ ArtifactId: &core.ArtifactID{ ArtifactKey: ak, @@ -5803,7 +5803,7 @@ func TestQueryTemplate(t *testing.T) { } p := &core.Partitions{Value: pMap} - q := core.ArtifactQuery{ + q := &core.ArtifactQuery{ Identifier: &core.ArtifactQuery_ArtifactId{ ArtifactId: &core.ArtifactID{ ArtifactKey: ak, diff --git a/flyteadmin/pkg/manager/impl/executions/quality_of_service_test.go b/flyteadmin/pkg/manager/impl/executions/quality_of_service_test.go index 86763a672d..41a04ec2bc 100644 --- a/flyteadmin/pkg/manager/impl/executions/quality_of_service_test.go +++ b/flyteadmin/pkg/manager/impl/executions/quality_of_service_test.go @@ -38,7 +38,7 @@ func getQualityOfServiceWithDuration(duration time.Duration) *core.QualityOfServ func getMockConfig() runtimeInterfaces.Configuration { mockConfig := mocks.NewMockConfigurationProvider(nil, nil, nil, nil, nil, nil) provider := &runtimeIFaceMocks.QualityOfServiceConfiguration{} - provider.OnGetTierExecutionValues().Return(map[core.QualityOfService_Tier]core.QualityOfServiceSpec{ + provider.OnGetTierExecutionValues().Return(map[core.QualityOfService_Tier]*core.QualityOfServiceSpec{ core.QualityOfService_HIGH: { QueueingBudget: ptypes.DurationProto(10 * time.Minute), }, diff --git a/flyteadmin/pkg/manager/impl/executions/queues.go b/flyteadmin/pkg/manager/impl/executions/queues.go index 5e4706700c..90a5951a33 100644 --- a/flyteadmin/pkg/manager/impl/executions/queues.go +++ b/flyteadmin/pkg/manager/impl/executions/queues.go @@ -25,7 +25,7 @@ type queues = []singleQueueConfiguration type queueConfig = map[tag]queues type QueueAllocator interface { - GetQueue(ctx context.Context, identifier core.Identifier) singleQueueConfiguration + GetQueue(ctx context.Context, identifier *core.Identifier) singleQueueConfiguration } type queueAllocatorImpl struct { @@ -52,7 +52,7 @@ func (q *queueAllocatorImpl) refreshExecutionQueues(executionQueues []runtimeInt q.queueConfigMap = queueConfigMap } -func (q *queueAllocatorImpl) GetQueue(ctx context.Context, identifier core.Identifier) singleQueueConfiguration { +func (q *queueAllocatorImpl) GetQueue(ctx context.Context, identifier *core.Identifier) singleQueueConfiguration { // NOTE: If refreshing the execution queues & workflow configs on every call to GetQueue becomes too slow we should // investigate caching the computed queue assignments. executionQueues := q.config.QueueConfiguration().GetExecutionQueues() diff --git a/flyteadmin/pkg/manager/impl/executions/queues_test.go b/flyteadmin/pkg/manager/impl/executions/queues_test.go index 808a482fd3..baa1bef9b9 100644 --- a/flyteadmin/pkg/manager/impl/executions/queues_test.go +++ b/flyteadmin/pkg/manager/impl/executions/queues_test.go @@ -67,22 +67,22 @@ func TestGetQueue(t *testing.T) { queueConfig := singleQueueConfiguration{ DynamicQueue: "queue dynamic", } - assert.Equal(t, queueConfig, queueAllocator.GetQueue(context.Background(), core.Identifier{ + assert.Equal(t, queueConfig, queueAllocator.GetQueue(context.Background(), &core.Identifier{ Project: "project", Domain: "domain", Name: "name", })) - assert.EqualValues(t, singleQueueConfiguration{}, queueAllocator.GetQueue(context.Background(), core.Identifier{ + assert.EqualValues(t, singleQueueConfiguration{}, queueAllocator.GetQueue(context.Background(), &core.Identifier{ Project: "project", Domain: "domain", Name: "name2", })) - assert.EqualValues(t, singleQueueConfiguration{}, queueAllocator.GetQueue(context.Background(), core.Identifier{ + assert.EqualValues(t, singleQueueConfiguration{}, queueAllocator.GetQueue(context.Background(), &core.Identifier{ Project: "project", Domain: "domain2", Name: "name", })) - assert.EqualValues(t, singleQueueConfiguration{}, queueAllocator.GetQueue(context.Background(), core.Identifier{ + assert.EqualValues(t, singleQueueConfiguration{}, queueAllocator.GetQueue(context.Background(), &core.Identifier{ Project: "project2", Domain: "domain", Name: "name", @@ -174,7 +174,7 @@ func TestGetQueueDefaults(t *testing.T) { assert.Equal(t, singleQueueConfiguration{ DynamicQueue: "default dynamic", }, queueAllocator.GetQueue( - context.Background(), core.Identifier{ + context.Background(), &core.Identifier{ Project: "unmatched", Domain: "domain", Name: "workflow", @@ -182,7 +182,7 @@ func TestGetQueueDefaults(t *testing.T) { assert.EqualValues(t, singleQueueConfiguration{ DynamicQueue: "queue1 dynamic", }, queueAllocator.GetQueue( - context.Background(), core.Identifier{ + context.Background(), &core.Identifier{ Project: "project", Domain: "UNMATCHED", Name: "workflow", @@ -190,7 +190,7 @@ func TestGetQueueDefaults(t *testing.T) { assert.EqualValues(t, singleQueueConfiguration{ DynamicQueue: "queue2 dynamic", }, queueAllocator.GetQueue( - context.Background(), core.Identifier{ + context.Background(), &core.Identifier{ Project: "project", Domain: "domain", Name: "UNMATCHED", @@ -198,7 +198,7 @@ func TestGetQueueDefaults(t *testing.T) { assert.Equal(t, singleQueueConfiguration{ DynamicQueue: "queue3 dynamic", }, queueAllocator.GetQueue( - context.Background(), core.Identifier{ + context.Background(), &core.Identifier{ Project: "project", Domain: "domain", Name: "workflow", diff --git a/flyteadmin/pkg/manager/impl/launch_plan_manager.go b/flyteadmin/pkg/manager/impl/launch_plan_manager.go index 57936313e5..74f0571f86 100644 --- a/flyteadmin/pkg/manager/impl/launch_plan_manager.go +++ b/flyteadmin/pkg/manager/impl/launch_plan_manager.go @@ -52,15 +52,15 @@ func (m *LaunchPlanManager) getNamedEntityContext(ctx context.Context, identifie func (m *LaunchPlanManager) CreateLaunchPlan( ctx context.Context, - request admin.LaunchPlanCreateRequest) (*admin.LaunchPlanCreateResponse, error) { + request *admin.LaunchPlanCreateRequest) (*admin.LaunchPlanCreateResponse, error) { if err := validation.ValidateIdentifier(request.GetSpec().GetWorkflowId(), common.Workflow); err != nil { logger.Debugf(ctx, "Failed to validate provided workflow ID for CreateLaunchPlan with err: %v", err) return nil, err } - workflowModel, err := util.GetWorkflowModel(ctx, m.db, *request.Spec.WorkflowId) + workflowModel, err := util.GetWorkflowModel(ctx, m.db, request.Spec.WorkflowId) if err != nil { logger.Debugf(ctx, "Failed to get workflow with id [%+v] for CreateLaunchPlan with id [%+v] with err %v", - *request.Spec.WorkflowId, request.Id) + request.Spec.WorkflowId, request.Id) return nil, err } var workflowInterface core.TypedInterface @@ -69,7 +69,7 @@ func (m *LaunchPlanManager) CreateLaunchPlan( if err != nil { logger.Errorf(ctx, "Failed to unmarshal TypedInterface for workflow [%+v] with err: %v", - *request.Spec.WorkflowId, err) + request.Spec.WorkflowId, err) return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal workflow inputs") } } @@ -79,16 +79,16 @@ func (m *LaunchPlanManager) CreateLaunchPlan( } ctx = getLaunchPlanContext(ctx, request.Id) launchPlan := transformers.CreateLaunchPlan(request, workflowInterface.Outputs) - launchPlanDigest, err := util.GetLaunchPlanDigest(ctx, &launchPlan) + launchPlanDigest, err := util.GetLaunchPlanDigest(ctx, launchPlan) if err != nil { logger.Errorf(ctx, "failed to compute launch plan digest for [%+v] with err: %v", launchPlan.Id, err) return nil, err } - existingLaunchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, *request.Id) + existingLaunchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.Id) if err == nil { if bytes.Equal(existingLaunchPlanModel.Digest, launchPlanDigest) { - return nil, errors.NewLaunchPlanExistsIdenticalStructureError(ctx, &request) + return nil, errors.NewLaunchPlanExistsIdenticalStructureError(ctx, request) } existingLaunchPlan, transformerErr := transformers.FromLaunchPlanModel(existingLaunchPlanModel) if transformerErr != nil { @@ -96,7 +96,7 @@ func (m *LaunchPlanManager) CreateLaunchPlan( return nil, transformerErr } // A launch plan exists with different structure - return nil, errors.NewLaunchPlanExistsDifferentStructureError(ctx, &request, existingLaunchPlan.Spec, launchPlan.Spec) + return nil, errors.NewLaunchPlanExistsDifferentStructureError(ctx, request, existingLaunchPlan.Spec, launchPlan.Spec) } launchPlanModel, err := @@ -138,7 +138,7 @@ func (m *LaunchPlanManager) updateLaunchPlanModelState(launchPlan *models.Launch return nil } -func isScheduleEmpty(launchPlanSpec admin.LaunchPlanSpec) bool { +func isScheduleEmpty(launchPlanSpec *admin.LaunchPlanSpec) bool { schedule := launchPlanSpec.GetEntityMetadata().GetSchedule() if schedule == nil { return true @@ -155,8 +155,8 @@ func isScheduleEmpty(launchPlanSpec admin.LaunchPlanSpec) bool { return true } -func (m *LaunchPlanManager) enableSchedule(ctx context.Context, launchPlanIdentifier core.Identifier, - launchPlanSpec admin.LaunchPlanSpec) error { +func (m *LaunchPlanManager) enableSchedule(ctx context.Context, launchPlanIdentifier *core.Identifier, + launchPlanSpec *admin.LaunchPlanSpec) error { addScheduleInput, err := m.scheduler.CreateScheduleInput(ctx, m.config.ApplicationConfiguration().GetSchedulerConfig(), launchPlanIdentifier, @@ -169,7 +169,7 @@ func (m *LaunchPlanManager) enableSchedule(ctx context.Context, launchPlanIdenti } func (m *LaunchPlanManager) disableSchedule( - ctx context.Context, launchPlanIdentifier core.Identifier) error { + ctx context.Context, launchPlanIdentifier *core.Identifier) error { return m.scheduler.RemoveSchedule(ctx, scheduleInterfaces.RemoveScheduleInput{ Identifier: launchPlanIdentifier, ScheduleNamePrefix: m.config.ApplicationConfiguration().GetSchedulerConfig().EventSchedulerConfig.ScheduleNamePrefix, @@ -178,21 +178,21 @@ func (m *LaunchPlanManager) disableSchedule( func (m *LaunchPlanManager) updateSchedules( ctx context.Context, newlyActiveLaunchPlan models.LaunchPlan, formerlyActiveLaunchPlan *models.LaunchPlan) error { - var newlyActiveLaunchPlanSpec admin.LaunchPlanSpec - err := proto.Unmarshal(newlyActiveLaunchPlan.Spec, &newlyActiveLaunchPlanSpec) + newlyActiveLaunchPlanSpec := &admin.LaunchPlanSpec{} + err := proto.Unmarshal(newlyActiveLaunchPlan.Spec, newlyActiveLaunchPlanSpec) if err != nil { logger.Errorf(ctx, "failed to unmarshal newly enabled launch plan spec") return errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal newly enabled launch plan spec") } - launchPlanIdentifier := core.Identifier{ + launchPlanIdentifier := &core.Identifier{ Project: newlyActiveLaunchPlan.Project, Domain: newlyActiveLaunchPlan.Domain, Name: newlyActiveLaunchPlan.Name, Version: newlyActiveLaunchPlan.Version, } - var formerlyActiveLaunchPlanSpec admin.LaunchPlanSpec + formerlyActiveLaunchPlanSpec := &admin.LaunchPlanSpec{} if formerlyActiveLaunchPlan != nil { - err = proto.Unmarshal(formerlyActiveLaunchPlan.Spec, &formerlyActiveLaunchPlanSpec) + err = proto.Unmarshal(formerlyActiveLaunchPlan.Spec, formerlyActiveLaunchPlanSpec) if err != nil { return errors.NewFlyteAdminErrorf(codes.Internal, "failed to unmarshal formerly enabled launch plan spec") } @@ -200,7 +200,7 @@ func (m *LaunchPlanManager) updateSchedules( if !isScheduleEmpty(formerlyActiveLaunchPlanSpec) { // Disable previous schedule - formerlyActiveLaunchPlanIdentifier := core.Identifier{ + formerlyActiveLaunchPlanIdentifier := &core.Identifier{ Project: formerlyActiveLaunchPlan.Project, Domain: formerlyActiveLaunchPlan.Domain, Name: formerlyActiveLaunchPlan.Name, @@ -221,13 +221,13 @@ func (m *LaunchPlanManager) updateSchedules( return nil } -func (m *LaunchPlanManager) disableLaunchPlan(ctx context.Context, request admin.LaunchPlanUpdateRequest) ( +func (m *LaunchPlanManager) disableLaunchPlan(ctx context.Context, request *admin.LaunchPlanUpdateRequest) ( *admin.LaunchPlanUpdateResponse, error) { if err := validation.ValidateIdentifier(request.Id, common.LaunchPlan); err != nil { logger.Debugf(ctx, "can't disable launch plan [%+v] with invalid identifier: %v", request.Id, err) return nil, err } - launchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, *request.Id) + launchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.Id) if err != nil { logger.Debugf(ctx, "couldn't find launch plan [%+v] to disable with err: %v", request.Id, err) return nil, err @@ -247,7 +247,7 @@ func (m *LaunchPlanManager) disableLaunchPlan(ctx context.Context, request admin "failed to unmarshal launch plan spec when disabling schedule for %+v", request.Id) } if launchPlanSpec.EntityMetadata != nil && launchPlanSpec.EntityMetadata.Schedule != nil { - err = m.disableSchedule(ctx, core.Identifier{ + err = m.disableSchedule(ctx, &core.Identifier{ Project: launchPlanModel.Project, Domain: launchPlanModel.Domain, Name: launchPlanModel.Name, @@ -266,7 +266,7 @@ func (m *LaunchPlanManager) disableLaunchPlan(ctx context.Context, request admin return &admin.LaunchPlanUpdateResponse{}, nil } -func (m *LaunchPlanManager) enableLaunchPlan(ctx context.Context, request admin.LaunchPlanUpdateRequest) ( +func (m *LaunchPlanManager) enableLaunchPlan(ctx context.Context, request *admin.LaunchPlanUpdateRequest) ( *admin.LaunchPlanUpdateResponse, error) { newlyActiveLaunchPlanModel, err := m.db.LaunchPlanRepo().Get(ctx, repoInterfaces.Identifier{ Project: request.Id.Project, @@ -329,7 +329,7 @@ func (m *LaunchPlanManager) enableLaunchPlan(ctx context.Context, request admin. } -func (m *LaunchPlanManager) UpdateLaunchPlan(ctx context.Context, request admin.LaunchPlanUpdateRequest) ( +func (m *LaunchPlanManager) UpdateLaunchPlan(ctx context.Context, request *admin.LaunchPlanUpdateRequest) ( *admin.LaunchPlanUpdateResponse, error) { if err := validation.ValidateIdentifier(request.Id, common.LaunchPlan); err != nil { logger.Debugf(ctx, "can't update launch plan [%+v] state, invalid identifier: %v", request.Id, err) @@ -347,17 +347,17 @@ func (m *LaunchPlanManager) UpdateLaunchPlan(ctx context.Context, request admin. } } -func (m *LaunchPlanManager) GetLaunchPlan(ctx context.Context, request admin.ObjectGetRequest) ( +func (m *LaunchPlanManager) GetLaunchPlan(ctx context.Context, request *admin.ObjectGetRequest) ( *admin.LaunchPlan, error) { if err := validation.ValidateIdentifier(request.Id, common.LaunchPlan); err != nil { logger.Debugf(ctx, "can't get launch plan [%+v] with invalid identifier: %v", request.Id, err) return nil, err } ctx = getLaunchPlanContext(ctx, request.Id) - return util.GetLaunchPlan(ctx, m.db, *request.Id) + return util.GetLaunchPlan(ctx, m.db, request.Id) } -func (m *LaunchPlanManager) GetActiveLaunchPlan(ctx context.Context, request admin.ActiveLaunchPlanRequest) ( +func (m *LaunchPlanManager) GetActiveLaunchPlan(ctx context.Context, request *admin.ActiveLaunchPlanRequest) ( *admin.LaunchPlan, error) { if err := validation.ValidateActiveLaunchPlanRequest(request); err != nil { logger.Debugf(ctx, "can't get active launch plan [%+v] with invalid request: %v", request.Id, err) @@ -389,7 +389,7 @@ func (m *LaunchPlanManager) GetActiveLaunchPlan(ctx context.Context, request adm return transformers.FromLaunchPlanModel(output.LaunchPlans[0]) } -func (m *LaunchPlanManager) ListLaunchPlans(ctx context.Context, request admin.ResourceListRequest) ( +func (m *LaunchPlanManager) ListLaunchPlans(ctx context.Context, request *admin.ResourceListRequest) ( *admin.LaunchPlanList, error) { // Check required fields @@ -447,7 +447,7 @@ func (m *LaunchPlanManager) ListLaunchPlans(ctx context.Context, request admin.R }, nil } -func (m *LaunchPlanManager) ListActiveLaunchPlans(ctx context.Context, request admin.ActiveLaunchPlanListRequest) ( +func (m *LaunchPlanManager) ListActiveLaunchPlans(ctx context.Context, request *admin.ActiveLaunchPlanListRequest) ( *admin.LaunchPlanList, error) { // Check required fields @@ -501,7 +501,7 @@ func (m *LaunchPlanManager) ListActiveLaunchPlans(ctx context.Context, request a } // At least project name and domain must be specified along with limit. -func (m *LaunchPlanManager) ListLaunchPlanIds(ctx context.Context, request admin.NamedEntityIdentifierListRequest) ( +func (m *LaunchPlanManager) ListLaunchPlanIds(ctx context.Context, request *admin.NamedEntityIdentifierListRequest) ( *admin.NamedEntityIdentifierList, error) { ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain) filters, err := util.GetDbFilters(util.FilterSpec{ diff --git a/flyteadmin/pkg/manager/impl/launch_plan_manager_test.go b/flyteadmin/pkg/manager/impl/launch_plan_manager_test.go index 3e5b36793e..d40d7c5e1f 100644 --- a/flyteadmin/pkg/manager/impl/launch_plan_manager_test.go +++ b/flyteadmin/pkg/manager/impl/launch_plan_manager_test.go @@ -32,7 +32,7 @@ import ( var active = int32(admin.LaunchPlanState_ACTIVE) var inactive = int32(admin.LaunchPlanState_INACTIVE) var mockScheduler = mocks.NewMockEventScheduler() -var launchPlanIdentifier = core.Identifier{ +var launchPlanIdentifier = &core.Identifier{ ResourceType: core.ResourceType_LAUNCH_PLAN, Project: project, Domain: domain, @@ -40,7 +40,7 @@ var launchPlanIdentifier = core.Identifier{ Version: version, } -var launchPlanNamedIdentifier = core.Identifier{ +var launchPlanNamedIdentifier = &core.Identifier{ Project: project, Domain: domain, Name: name, @@ -128,8 +128,8 @@ func TestLaunchPlanManager_GetLaunchPlan(t *testing.T) { }, nil } repository.LaunchPlanRepo().(*repositoryMocks.MockLaunchPlanRepo).SetGetCallback(launchPlanGetFunc) - response, err := lpManager.GetLaunchPlan(context.Background(), admin.ObjectGetRequest{ - Id: &launchPlanIdentifier, + response, err := lpManager.GetLaunchPlan(context.Background(), &admin.ObjectGetRequest{ + Id: launchPlanIdentifier, }) assert.NoError(t, err) assert.NotNil(t, response) @@ -183,7 +183,7 @@ func TestLaunchPlanManager_GetActiveLaunchPlan(t *testing.T) { }, nil } repository.LaunchPlanRepo().(*repositoryMocks.MockLaunchPlanRepo).SetListCallback(launchPlanListFunc) - response, err := lpManager.GetActiveLaunchPlan(context.Background(), admin.ActiveLaunchPlanRequest{ + response, err := lpManager.GetActiveLaunchPlan(context.Background(), &admin.ActiveLaunchPlanRequest{ Id: &admin.NamedEntityIdentifier{ Project: lpRequest.Id.Project, Domain: lpRequest.Id.Domain, @@ -203,7 +203,7 @@ func TestLaunchPlanManager_GetActiveLaunchPlan_NoneActive(t *testing.T) { return interfaces.LaunchPlanCollectionOutput{}, nil } repository.LaunchPlanRepo().(*repositoryMocks.MockLaunchPlanRepo).SetListCallback(launchPlanListFunc) - response, err := lpManager.GetActiveLaunchPlan(context.Background(), admin.ActiveLaunchPlanRequest{ + response, err := lpManager.GetActiveLaunchPlan(context.Background(), &admin.ActiveLaunchPlanRequest{ Id: &admin.NamedEntityIdentifier{ Project: lpRequest.Id.Project, Domain: lpRequest.Id.Domain, @@ -217,7 +217,7 @@ func TestLaunchPlanManager_GetActiveLaunchPlan_NoneActive(t *testing.T) { func TestLaunchPlanManager_GetActiveLaunchPlan_InvalidRequest(t *testing.T) { repository := getMockRepositoryForLpTest() lpManager := NewLaunchPlanManager(repository, getMockConfigForLpTest(), mockScheduler, mockScope.NewTestScope()) - response, err := lpManager.GetActiveLaunchPlan(context.Background(), admin.ActiveLaunchPlanRequest{ + response, err := lpManager.GetActiveLaunchPlan(context.Background(), &admin.ActiveLaunchPlanRequest{ Id: &admin.NamedEntityIdentifier{ Domain: domain, Name: name, @@ -394,7 +394,7 @@ func makeLaunchPlanRepoGetCallback(t *testing.T) repositoryMocks.GetLaunchPlanFu func TestEnableSchedule(t *testing.T) { repository := getMockRepositoryForLpTest() mockScheduler := mocks.NewMockEventScheduler() - scheduleExpression := admin.Schedule{ + scheduleExpression := &admin.Schedule{ ScheduleExpression: &admin.Schedule_Rate{ Rate: &admin.FixedRate{ Value: 2, @@ -404,8 +404,8 @@ func TestEnableSchedule(t *testing.T) { } mockScheduler.(*mocks.MockEventScheduler).SetAddScheduleFunc( func(ctx context.Context, input scheduleInterfaces.AddScheduleInput) error { - assert.True(t, proto.Equal(&launchPlanNamedIdentifier, &input.Identifier)) - assert.True(t, proto.Equal(&scheduleExpression, &input.ScheduleExpression)) + assert.True(t, proto.Equal(launchPlanNamedIdentifier, input.Identifier)) + assert.True(t, proto.Equal(scheduleExpression, input.ScheduleExpression)) assert.Equal(t, "{\"time\":