-
Notifications
You must be signed in to change notification settings - Fork 671
/
plugin_manager.go
664 lines (574 loc) · 26.8 KB
/
plugin_manager.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
package k8s
import (
"context"
"fmt"
"time"
"golang.org/x/time/rate"
v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/flyteorg/flyte/flyteplugins/go/tasks/errors"
pluginsCore "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core"
"github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/flytek8s/config"
"github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/io"
"github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/ioutils"
"github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/k8s"
pluginsUtils "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/utils"
compiler "github.com/flyteorg/flyte/flytepropeller/pkg/compiler/transformers/k8s"
"github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/task/backoff"
nodeTaskConfig "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/task/config"
"github.com/flyteorg/flyte/flytestdlib/contextutils"
stdErrors "github.com/flyteorg/flyte/flytestdlib/errors"
"github.com/flyteorg/flyte/flytestdlib/logger"
"github.com/flyteorg/flyte/flytestdlib/promutils"
"github.com/flyteorg/flyte/flytestdlib/promutils/labeled"
)
const finalizer = "flyte/flytek8s"
const pluginStateVersion = 1
type PluginPhase uint8
const (
PluginPhaseNotStarted PluginPhase = iota
PluginPhaseAllocationTokenAcquired
PluginPhaseStarted
)
type PluginState struct {
Phase PluginPhase
K8sPluginState k8s.PluginState
LastEventUpdate time.Time
}
type PluginMetrics struct {
Scope promutils.Scope
GetCacheMiss labeled.StopWatch
GetCacheHit labeled.StopWatch
GetAPILatency labeled.StopWatch
ResourceDeleted labeled.Counter
}
func newPluginMetrics(s promutils.Scope) PluginMetrics {
return PluginMetrics{
Scope: s,
GetCacheMiss: labeled.NewStopWatch("get_cache_miss", "Cache miss on get resource calls.",
time.Millisecond, s),
GetCacheHit: labeled.NewStopWatch("get_cache_hit", "Cache miss on get resource calls.",
time.Millisecond, s),
GetAPILatency: labeled.NewStopWatch("get_api", "Latency for APIServer Get calls.",
time.Millisecond, s),
ResourceDeleted: labeled.NewCounter("pods_deleted", "Counts how many times CheckTaskStatus is"+
" called with a deleted resource.", s),
}
}
func isK8sObjectNotExists(err error) bool {
return k8serrors.IsNotFound(err) || k8serrors.IsGone(err) || k8serrors.IsResourceExpired(err)
}
// A generic Plugin for managing k8s-resources. Plugin writers wishing to use K8s resource can use the simplified api specified in
// pluginmachinery.core
type PluginManager struct {
id string
plugin k8s.Plugin
resourceToWatch runtime.Object
kubeClient pluginsCore.KubeClient
metrics PluginMetrics
// Per namespace-resource
backOffController *backoff.Controller
resourceLevelMonitor *ResourceLevelMonitor
eventWatcher EventWatcher
}
func (e *PluginManager) addObjectMetadata(taskCtx pluginsCore.TaskExecutionMetadata, o client.Object, cfg *config.K8sPluginConfig) {
o.SetNamespace(taskCtx.GetNamespace())
o.SetAnnotations(pluginsUtils.UnionMaps(cfg.DefaultAnnotations, o.GetAnnotations(), pluginsUtils.CopyMap(taskCtx.GetAnnotations())))
o.SetLabels(pluginsUtils.UnionMaps(cfg.DefaultLabels, o.GetLabels(), pluginsUtils.CopyMap(taskCtx.GetLabels())))
o.SetName(taskCtx.GetTaskExecutionID().GetGeneratedName())
if !e.plugin.GetProperties().DisableInjectOwnerReferences {
o.SetOwnerReferences([]metav1.OwnerReference{taskCtx.GetOwnerReference()})
}
if cfg.InjectFinalizer && !e.plugin.GetProperties().DisableInjectFinalizer {
f := append(o.GetFinalizers(), finalizer)
o.SetFinalizers(f)
}
if errs := validation.IsDNS1123Subdomain(o.GetName()); len(errs) > 0 {
o.SetName(pluginsUtils.ConvertToDNS1123SubdomainCompatibleString(o.GetName()))
}
}
func (e *PluginManager) GetProperties() pluginsCore.PluginProperties {
props := e.plugin.GetProperties()
return pluginsCore.PluginProperties{
GeneratedNameMaxLength: props.GeneratedNameMaxLength,
}
}
func (e *PluginManager) GetID() string {
return e.id
}
func (e *PluginManager) getPodEffectiveResourceLimits(ctx context.Context, pod *v1.Pod) v1.ResourceList {
podRequestedResources := make(v1.ResourceList)
initContainersRequestedResources := make(v1.ResourceList)
containersRequestedResources := make(v1.ResourceList)
// Collect the resource requests from all the containers in the pod whose creation is to be attempted
// to decide whether we should try the pod creation during the back off period
// Calculating the effective init resource limits based on the official definition:
// https://kubernetes.io/docs/concepts/workloads/pods/init-containers/#resources
// "The highest of any particular resource request or limit defined on all init containers is the effective init request/limit"
for _, initContainer := range pod.Spec.InitContainers {
for r, q := range initContainer.Resources.Limits {
if currentQuantity, found := initContainersRequestedResources[r]; !found || q.Cmp(currentQuantity) > 0 {
initContainersRequestedResources[r] = q
}
}
}
for _, container := range pod.Spec.Containers {
for k, v := range container.Resources.Limits {
quantity := containersRequestedResources[k]
quantity.Add(v)
containersRequestedResources[k] = quantity
}
}
for k, v := range initContainersRequestedResources {
podRequestedResources[k] = v
}
// https://kubernetes.io/docs/concepts/workloads/pods/init-containers/#resources
// "The Pod’s effective request/limit for a resource is the higher of:
// - the sum of all app containers request/limit for a resource
// - the effective init request/limit for a resource"
for k, qC := range containersRequestedResources {
if qI, found := podRequestedResources[k]; !found || qC.Cmp(qI) > 0 {
podRequestedResources[k] = qC
}
}
formattedResources := make([]string, 0, len(podRequestedResources))
for resourceName, quantity := range podRequestedResources {
formattedResources = append(formattedResources, fmt.Sprintf("{[%v]: [%v]}", resourceName, quantity.String()))
}
logger.Infof(ctx, "The resource requirement for creating Pod [%v/%v] is [%+v]\n",
pod.Namespace, pod.Name, formattedResources)
return podRequestedResources
}
func (e *PluginManager) launchResource(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) (pluginsCore.Transition, error) {
tmpl, err := tCtx.TaskReader().Read(ctx)
if err != nil {
return pluginsCore.Transition{}, err
}
k8sTaskCtxMetadata, err := newTaskExecutionMetadata(tCtx.TaskExecutionMetadata(), tmpl)
if err != nil {
return pluginsCore.Transition{}, err
}
k8sTaskCtx := newTaskExecutionContext(tCtx, k8sTaskCtxMetadata)
o, err := e.plugin.BuildResource(ctx, k8sTaskCtx)
if err != nil {
return pluginsCore.UnknownTransition, err
}
e.addObjectMetadata(k8sTaskCtxMetadata, o, config.GetK8sPluginConfig())
logger.Infof(ctx, "Creating Object: Type:[%v], Object:[%v/%v]", o.GetObjectKind().GroupVersionKind(), o.GetNamespace(), o.GetName())
key := backoff.ComposeResourceKey(o)
pod, casted := o.(*v1.Pod)
if e.backOffController != nil && casted {
podRequestedResources := e.getPodEffectiveResourceLimits(ctx, pod)
cfg := nodeTaskConfig.GetConfig()
backOffHandler := e.backOffController.GetOrCreateHandler(ctx, key, cfg.BackOffConfig.BaseSecond, cfg.BackOffConfig.MaxDuration.Duration)
err = backOffHandler.Handle(ctx, func() error {
return e.kubeClient.GetClient().Create(ctx, o)
}, podRequestedResources)
} else {
err = e.kubeClient.GetClient().Create(ctx, o)
}
if err != nil && !k8serrors.IsAlreadyExists(err) {
if backoff.IsResourceQuotaExceeded(err) && !backoff.IsResourceRequestsEligible(err) {
// if task resources exceed resource quotas then permanently fail because the task will
// be stuck waiting for resources until the `node-active-deadline` terminates the node.
logger.Errorf(ctx, "task resource requests exceed k8s resource limits. err: %v", err)
return pluginsCore.DoTransition(pluginsCore.PhaseInfoFailure("ResourceRequestsExceedLimits",
fmt.Sprintf("requested resources exceed limits: %v", err.Error()), nil)), nil
} else if stdErrors.IsCausedBy(err, errors.BackOffError) {
logger.Warnf(ctx, "Failed to launch job, resource quota exceeded. err: %v", err)
return pluginsCore.DoTransition(pluginsCore.PhaseInfoWaitingForResourcesInfo(time.Now(), pluginsCore.DefaultPhaseVersion, fmt.Sprintf("Exceeded resourcequota: %s", err.Error()), nil)), nil
} else if e.backOffController == nil && backoff.IsResourceQuotaExceeded(err) {
logger.Warnf(ctx, "Failed to launch job, resource quota exceeded and the operation is not guarded by back-off. err: %v", err)
return pluginsCore.DoTransition(pluginsCore.PhaseInfoWaitingForResourcesInfo(time.Now(), pluginsCore.DefaultPhaseVersion, fmt.Sprintf("Exceeded resourcequota: %s", err.Error()), nil)), nil
} else if k8serrors.IsForbidden(err) {
return pluginsCore.DoTransition(pluginsCore.PhaseInfoRetryableFailure("RuntimeFailure", err.Error(), nil)), nil
} else if k8serrors.IsBadRequest(err) || k8serrors.IsInvalid(err) {
logger.Errorf(ctx, "Badly formatted resource for plugin [%s], err %s", e.id, err)
// return pluginsCore.DoTransition(pluginsCore.PhaseInfoFailure("BadTaskFormat", err.Error(), nil)), nil
} else if k8serrors.IsRequestEntityTooLargeError(err) {
logger.Errorf(ctx, "Badly formatted resource for plugin [%s], err %s", e.id, err)
return pluginsCore.DoTransition(pluginsCore.PhaseInfoFailure("EntityTooLarge", err.Error(), nil)), nil
}
reason := k8serrors.ReasonForError(err)
logger.Errorf(ctx, "Failed to launch job, system error. err: %v", err)
return pluginsCore.UnknownTransition, errors.Wrapf(stdErrors.ErrorCode(reason), err, "failed to create resource")
}
return pluginsCore.DoTransition(pluginsCore.PhaseInfoQueued(time.Now(), pluginsCore.DefaultPhaseVersion, "task submitted to K8s")), nil
}
func (e *PluginManager) getResource(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) (client.Object, error) {
o, err := e.plugin.BuildIdentityResource(ctx, tCtx.TaskExecutionMetadata())
if err != nil {
logger.Errorf(ctx, "Failed to build the Resource with name: %v. Error: %v", tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName(), err)
return nil, err
}
e.addObjectMetadata(tCtx.TaskExecutionMetadata(), o, config.GetK8sPluginConfig())
return o, nil
}
func (e *PluginManager) checkResourcePhase(ctx context.Context, tCtx pluginsCore.TaskExecutionContext, o client.Object, k8sPluginState *k8s.PluginState) (pluginsCore.Transition, error) {
nsName := k8stypes.NamespacedName{Namespace: o.GetNamespace(), Name: o.GetName()}
// Attempt to get resource from informer cache, if not found, retrieve it from API server.
if err := e.kubeClient.GetClient().Get(ctx, nsName, o); err != nil {
if isK8sObjectNotExists(err) {
// This happens sometimes because a node gets removed and K8s deletes the pod. This will result in a
// Pod does not exist error. This should be retried using the retry policy
logger.Warningf(ctx, "Failed to find the Resource with name: %v. Error: %v", nsName, err)
failureReason := fmt.Sprintf("resource not found, name [%s]. reason: %s", nsName.String(), err.Error())
return pluginsCore.DoTransition(pluginsCore.PhaseInfoSystemRetryableFailure("ResourceDeletedExternally", failureReason, nil)), nil
}
logger.Warningf(ctx, "Failed to retrieve Resource Details with name: %v. Error: %v", nsName, err)
return pluginsCore.UnknownTransition, err
}
if o.GetDeletionTimestamp() != nil {
e.metrics.ResourceDeleted.Inc(ctx)
}
pCtx := newPluginContext(tCtx, k8sPluginState)
p, err := e.plugin.GetTaskPhase(ctx, pCtx, o)
if err != nil {
logger.Warnf(ctx, "failed to check status of resource in plugin [%s], with error: %s", e.GetID(), err.Error())
return pluginsCore.UnknownTransition, err
}
if p.Phase() == pluginsCore.PhaseSuccess {
var opReader io.OutputReader
if pCtx.ow == nil {
logger.Infof(ctx, "Plugin [%s] returned no outputReader, assuming file based outputs", e.id)
opReader = ioutils.NewRemoteFileOutputReader(ctx, tCtx.DataStore(), tCtx.OutputWriter(), 0)
} else {
logger.Infof(ctx, "Plugin [%s] returned outputReader", e.id)
opReader = pCtx.ow.GetReader()
}
err := tCtx.OutputWriter().Put(ctx, opReader)
if err != nil {
return pluginsCore.UnknownTransition, err
}
return pluginsCore.DoTransition(p), nil
}
if !p.Phase().IsTerminal() && o.GetDeletionTimestamp() != nil {
// If the object has been deleted, that is, it has a deletion timestamp, but is not in a terminal state, we should
// mark the task as a retryable failure. We've seen this happen when a kubelet disappears - all pods running on
// the node are marked with a deletionTimestamp, but our finalizers prevent the pod from being deleted.
// This can also happen when a user deletes a Pod directly.
failureReason := fmt.Sprintf("object [%s] terminated in the background, manually", nsName.String())
return pluginsCore.DoTransition(pluginsCore.PhaseInfoSystemRetryableFailure("UnexpectedObjectDeletion", failureReason, nil)), nil
}
return pluginsCore.DoTransition(p), nil
}
func (e PluginManager) Handle(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) (pluginsCore.Transition, error) {
// read phase state
pluginState := PluginState{}
if v, err := tCtx.PluginStateReader().Get(&pluginState); err != nil {
if v != pluginStateVersion {
return pluginsCore.DoTransition(pluginsCore.PhaseInfoRetryableFailure(errors.CorruptedPluginState,
fmt.Sprintf("plugin state version mismatch expected [%d] got [%d]", pluginStateVersion, v), nil)), nil
}
return pluginsCore.UnknownTransition, errors.Wrapf(errors.CorruptedPluginState, err, "Failed to read unmarshal custom state")
}
// evaluate plugin
var err error
var transition pluginsCore.Transition
var o client.Object
pluginPhase := pluginState.Phase
if pluginState.Phase == PluginPhaseNotStarted {
transition, err = e.launchResource(ctx, tCtx)
if err == nil && transition.Info().Phase() == pluginsCore.PhaseQueued {
pluginPhase = PluginPhaseStarted
}
} else {
o, err = e.getResource(ctx, tCtx)
if err != nil {
transition, err = pluginsCore.DoTransition(pluginsCore.PhaseInfoFailure("BadTaskDefinition",
fmt.Sprintf("Failed to build resource, caused by: %s", err.Error()), nil)), nil
} else {
transition, err = e.checkResourcePhase(ctx, tCtx, o, &pluginState.K8sPluginState)
}
}
if err != nil {
return transition, err
}
// Add events since last update
version := transition.Info().Version()
lastEventUpdate := pluginState.LastEventUpdate
if e.eventWatcher != nil && o != nil {
nsName := k8stypes.NamespacedName{Namespace: o.GetNamespace(), Name: o.GetName()}
recentEvents := e.eventWatcher.List(nsName, lastEventUpdate)
if len(recentEvents) > 0 {
taskInfo := transition.Info().Info()
taskInfo.AdditionalReasons = make([]pluginsCore.ReasonInfo, 0, len(recentEvents))
for _, event := range recentEvents {
taskInfo.AdditionalReasons = append(taskInfo.AdditionalReasons,
pluginsCore.ReasonInfo{Reason: event.Note, OccurredAt: &event.CreatedAt})
lastEventUpdate = event.CreatedAt
}
// Bump the version to ensure newly added events are picked up
version++
}
}
// persist any changes in phase state
newPluginState := PluginState{
Phase: pluginPhase,
K8sPluginState: k8s.PluginState{
Phase: transition.Info().Phase(),
PhaseVersion: version,
Reason: transition.Info().Reason(),
},
LastEventUpdate: lastEventUpdate,
}
if pluginState != newPluginState {
if err := tCtx.PluginStateWriter().Put(pluginStateVersion, &newPluginState); err != nil {
return pluginsCore.UnknownTransition, err
}
}
return transition, nil
}
func (e PluginManager) Abort(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) error {
logger.Infof(ctx, "KillTask invoked. We will attempt to delete object [%v].",
tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName())
o, err := e.getResource(ctx, tCtx)
if err != nil {
logger.Errorf(ctx, "%v", err)
return nil
}
deleteResource := true
abortOverride, hasAbortOverride := e.plugin.(k8s.PluginAbortOverride)
resourceToFinalize := o
var behavior k8s.AbortBehavior
if hasAbortOverride {
behavior, err = abortOverride.OnAbort(ctx, tCtx, o)
deleteResource = err == nil && behavior.DeleteResource
if err == nil && behavior.Resource != nil {
resourceToFinalize = behavior.Resource
}
}
if err != nil {
} else if deleteResource {
err = e.kubeClient.GetClient().Delete(ctx, resourceToFinalize)
} else {
if behavior.Patch != nil && behavior.Update == nil {
err = e.kubeClient.GetClient().Patch(ctx, resourceToFinalize, behavior.Patch.Patch, behavior.Patch.Options...)
} else if behavior.Patch == nil && behavior.Update != nil {
err = e.kubeClient.GetClient().Update(ctx, resourceToFinalize, behavior.Update.Options...)
} else {
err = errors.Errorf(errors.RuntimeFailure, "AbortBehavior for resource %v must specify either a Patch and an Update operation if Delete is set to false. Only one can be supplied.", resourceToFinalize.GetName())
}
if behavior.DeleteOnErr && err != nil {
logger.Warningf(ctx, "Failed to apply AbortBehavior for resource %v with error %v. Will attempt to delete resource.", resourceToFinalize.GetName(), err)
err = e.kubeClient.GetClient().Delete(ctx, resourceToFinalize)
}
}
if err != nil && !isK8sObjectNotExists(err) {
logger.Warningf(ctx, "Failed to clear finalizers for Resource with name: %v/%v. Error: %v",
resourceToFinalize.GetNamespace(), resourceToFinalize.GetName(), err)
return err
}
return nil
}
func (e *PluginManager) clearFinalizers(ctx context.Context, o client.Object) error {
if len(o.GetFinalizers()) > 0 {
o.SetFinalizers([]string{})
err := e.kubeClient.GetClient().Update(ctx, o)
if err != nil && !isK8sObjectNotExists(err) {
logger.Warningf(ctx, "Failed to clear finalizers for Resource with name: %v/%v. Error: %v",
o.GetNamespace(), o.GetName(), err)
return err
}
} else {
logger.Debugf(ctx, "Finalizers are already empty for Resource with name: %v/%v",
o.GetNamespace(), o.GetName())
}
return nil
}
func (e *PluginManager) Finalize(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) (err error) {
errs := stdErrors.ErrorCollection{}
var nsName k8stypes.NamespacedName
cfg := config.GetK8sPluginConfig()
o, err := e.getResource(ctx, tCtx)
if err != nil {
logger.Errorf(ctx, "%v", err)
return nil
}
nsName = k8stypes.NamespacedName{Namespace: o.GetNamespace(), Name: o.GetName()}
// Attempt to cleanup finalizers so that the object may be deleted/garbage collected. We try to clear them for all
// objects, regardless of whether or not InjectFinalizer is configured to handle all cases where InjectFinalizer is
// enabled/disabled during object execution.
if err := e.kubeClient.GetClient().Get(ctx, nsName, o); err != nil {
if isK8sObjectNotExists(err) {
return nil
}
// This happens sometimes because a node gets removed and K8s deletes the pod. This will result in a
// Pod does not exist error. This should be retried using the retry policy
logger.Warningf(ctx, "Failed in finalizing get Resource with name: %v. Error: %v", nsName, err)
return err
}
// This must happen after sending admin event. It's safe against partial failures because if the event failed, we will
// simply retry in the next round. If the event succeeded but this failed, we will try again the next round to send
// the same event (idempotent) and then come here again...
err = e.clearFinalizers(ctx, o)
if err != nil {
errs.Append(err)
}
// If we should delete the resource when finalize is called, do a best effort delete.
if cfg.DeleteResourceOnFinalize && !e.plugin.GetProperties().DisableDeleteResourceOnFinalize {
// Attempt to delete resource, if not found, return success.
if err := e.kubeClient.GetClient().Delete(ctx, o); err != nil {
if isK8sObjectNotExists(err) {
return errs.ErrorOrDefault()
}
// This happens sometimes because a node gets removed and K8s deletes the pod. This will result in a
// Pod does not exist error. This should be retried using the retry policy
logger.Warningf(ctx, "Failed in finalizing. Failed to delete Resource with name: %v. Error: %v", nsName, err)
errs.Append(fmt.Errorf("finalize: failed to delete resource with name [%v]. Error: %w", nsName, err))
}
}
return errs.ErrorOrDefault()
}
func NewPluginManagerWithBackOff(ctx context.Context, iCtx pluginsCore.SetupContext, entry k8s.PluginEntry, backOffController *backoff.Controller,
monitorIndex *ResourceMonitorIndex, kubeClientset kubernetes.Interface) (*PluginManager, error) {
mgr, err := NewPluginManager(ctx, iCtx, entry, monitorIndex, kubeClientset)
if err == nil {
mgr.backOffController = backOffController
}
return mgr, err
}
// Creates a K8s generic task executor. This provides an easier way to build task executors that create K8s resources.
func NewPluginManager(ctx context.Context, iCtx pluginsCore.SetupContext, entry k8s.PluginEntry,
monitorIndex *ResourceMonitorIndex, kubeClientset kubernetes.Interface) (*PluginManager, error) {
if iCtx.EnqueueOwner() == nil {
return nil, errors.Errorf(errors.PluginInitializationFailed, "Failed to initialize plugin, enqueue Owner cannot be nil or empty.")
}
kubeClient := iCtx.KubeClient()
if entry.CustomKubeClient != nil {
kc, err := entry.CustomKubeClient(ctx)
if err != nil {
return nil, err
}
if kc != nil {
kubeClient = kc
}
}
if kubeClient == nil {
return nil, errors.Errorf(errors.PluginInitializationFailed, "Failed to initialize K8sResource Plugin, Kubeclient cannot be nil!")
}
logger.Infof(ctx, "Initializing K8s plugin [%s]", entry.ID)
src := source.Kind(iCtx.KubeClient().GetCache(), entry.ResourceToWatch)
workflowParentPredicate := func(o metav1.Object) bool {
if entry.Plugin.GetProperties().DisableInjectOwnerReferences {
return true
}
ownerReference := metav1.GetControllerOf(o)
if ownerReference != nil {
if ownerReference.Kind == iCtx.OwnerKind() {
return true
}
}
return false
}
metricsScope := iCtx.MetricsScope().NewSubScope(entry.ID)
updateCount := labeled.NewCounter("informer_update", "Update events from informer", metricsScope)
droppedUpdateCount := labeled.NewCounter("informer_update_dropped", "Update events from informer that have the same resource version", metricsScope)
genericCount := labeled.NewCounter("informer_generic", "Generic events from informer", metricsScope)
enqueueOwner := iCtx.EnqueueOwner()
err := src.Start(
ctx,
// Handlers
handler.Funcs{
CreateFunc: func(ctx context.Context, evt event.CreateEvent, q2 workqueue.RateLimitingInterface) {
logger.Debugf(context.Background(), "Create received for %s, ignoring.", evt.Object.GetName())
},
UpdateFunc: func(ctx context.Context, evt event.UpdateEvent, q2 workqueue.RateLimitingInterface) {
if evt.ObjectNew == nil {
logger.Warn(context.Background(), "Received an Update event with nil MetaNew.")
} else if evt.ObjectOld == nil || evt.ObjectOld.GetResourceVersion() != evt.ObjectNew.GetResourceVersion() {
// attempt to enqueue this tasks owner by retrieving the workfowID from the resource labels
newCtx := contextutils.WithNamespace(context.Background(), evt.ObjectNew.GetNamespace())
workflowID, exists := evt.ObjectNew.GetLabels()[compiler.ExecutionIDLabel]
if exists {
logger.Debugf(ctx, "Enqueueing owner for updated object [%v/%v]", evt.ObjectNew.GetNamespace(), evt.ObjectNew.GetName())
namespacedName := k8stypes.NamespacedName{
Name: workflowID,
Namespace: evt.ObjectNew.GetNamespace(),
}
if err := enqueueOwner(namespacedName); err != nil {
logger.Warnf(context.Background(), "Failed to handle Update event for object [%v]", namespacedName)
}
updateCount.Inc(newCtx)
}
} else {
newCtx := contextutils.WithNamespace(context.Background(), evt.ObjectNew.GetNamespace())
droppedUpdateCount.Inc(newCtx)
}
},
DeleteFunc: func(ctx context.Context, evt event.DeleteEvent, q2 workqueue.RateLimitingInterface) {
logger.Debugf(context.Background(), "Delete received for %s, ignoring.", evt.Object.GetName())
},
GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.RateLimitingInterface) {
logger.Debugf(context.Background(), "Generic received for %s, ignoring.", evt.Object.GetName())
genericCount.Inc(ctx)
},
},
// Queue - configured for high throughput so we very infrequently rate limit node updates
workqueue.NewNamedRateLimitingQueue(&workqueue.BucketRateLimiter{
Limiter: rate.NewLimiter(rate.Limit(10000), 10000),
}, entry.ResourceToWatch.GetObjectKind().GroupVersionKind().Kind),
// Predicates
predicate.Funcs{
CreateFunc: func(createEvent event.CreateEvent) bool {
return false
},
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
// TODO we should filter out events in case there are no updates observed between the old and new?
return workflowParentPredicate(updateEvent.ObjectNew)
},
DeleteFunc: func(deleteEvent event.DeleteEvent) bool {
return false
},
GenericFunc: func(genericEvent event.GenericEvent) bool {
return workflowParentPredicate(genericEvent.Object)
},
})
if err != nil {
return nil, err
}
gvk, err := getPluginGvk(entry.ResourceToWatch)
if err != nil {
return nil, err
}
var eventWatcher EventWatcher
if config.GetK8sPluginConfig().SendObjectEvents {
eventWatcher, err = NewEventWatcher(ctx, gvk, kubeClientset)
if err != nil {
return nil, err
}
}
// Construct the collector that will emit a gauge indicating current levels of the resource that this K8s plugin operates on
rm := monitorIndex.GetOrCreateResourceLevelMonitor(ctx, metricsScope, kubeClient.GetCache(), gvk)
// Start the poller and gauge emitter
rm.RunCollectorOnce(ctx)
return &PluginManager{
id: entry.ID,
plugin: entry.Plugin,
resourceToWatch: entry.ResourceToWatch,
metrics: newPluginMetrics(metricsScope),
kubeClient: kubeClient,
resourceLevelMonitor: rm,
eventWatcher: eventWatcher,
}, nil
}
func getPluginGvk(resourceToWatch runtime.Object) (schema.GroupVersionKind, error) {
kinds, _, err := scheme.Scheme.ObjectKinds(resourceToWatch)
if err != nil && len(kinds) == 0 {
return schema.GroupVersionKind{}, errors.Errorf(errors.PluginInitializationFailed, "No kind in schema for %v", resourceToWatch)
}
return kinds[0], nil
}