-
Notifications
You must be signed in to change notification settings - Fork 747
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: EventSource and Sensor HA without extra RBAC #1163
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,158 @@ | ||
package leaderelection | ||
|
||
import ( | ||
"context" | ||
|
||
"github.com/fsnotify/fsnotify" | ||
"github.com/nats-io/graft" | ||
nats "github.com/nats-io/nats.go" | ||
"github.com/pkg/errors" | ||
"github.com/spf13/viper" | ||
"go.uber.org/zap" | ||
|
||
"github.com/argoproj/argo-events/common" | ||
"github.com/argoproj/argo-events/common/logging" | ||
eventbusdriver "github.com/argoproj/argo-events/eventbus/driver" | ||
apicommon "github.com/argoproj/argo-events/pkg/apis/common" | ||
eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" | ||
) | ||
|
||
type Elector interface { | ||
RunOrDie(context.Context, LeaderCallbacks) | ||
} | ||
|
||
type LeaderCallbacks struct { | ||
OnStartedLeading func(context.Context) | ||
OnStoppedLeading func() | ||
} | ||
|
||
func NewEventBusElector(ctx context.Context, eventBusConfig eventbusv1alpha1.BusConfig, clusterName string, clusterSize int) (Elector, error) { | ||
logger := logging.FromContext(ctx) | ||
var eventBusType apicommon.EventBusType | ||
var eventBusAuth *eventbusv1alpha1.AuthStrategy | ||
if eventBusConfig.NATS != nil { | ||
eventBusType = apicommon.EventBusNATS | ||
eventBusAuth = eventBusConfig.NATS.Auth | ||
} else { | ||
return nil, errors.New("invalid event bus") | ||
} | ||
var auth *eventbusdriver.Auth | ||
cred := &eventbusdriver.AuthCredential{} | ||
if eventBusAuth == nil || *eventBusAuth == eventbusv1alpha1.AuthStrategyNone { | ||
auth = &eventbusdriver.Auth{ | ||
Strategy: eventbusv1alpha1.AuthStrategyNone, | ||
} | ||
} else { | ||
v := viper.New() | ||
v.SetConfigName("auth") | ||
v.SetConfigType("yaml") | ||
v.AddConfigPath(common.EventBusAuthFileMountPath) | ||
err := v.ReadInConfig() | ||
if err != nil { | ||
return nil, errors.Errorf("failed to load auth.yaml. err: %+v", err) | ||
} | ||
err = v.Unmarshal(cred) | ||
if err != nil { | ||
logger.Errorw("failed to unmarshal auth.yaml", zap.Error(err)) | ||
return nil, err | ||
} | ||
v.WatchConfig() | ||
v.OnConfigChange(func(e fsnotify.Event) { | ||
logger.Info("eventbus auth config file changed.") | ||
err = v.Unmarshal(cred) | ||
if err != nil { | ||
logger.Errorw("failed to unmarshal auth.yaml after reloading", zap.Error(err)) | ||
} | ||
}) | ||
auth = &eventbusdriver.Auth{ | ||
Strategy: *eventBusAuth, | ||
Crendential: cred, | ||
} | ||
} | ||
var elector Elector | ||
switch eventBusType { | ||
case apicommon.EventBusNATS: | ||
elector = &natsEventBusElector{ | ||
clusterName: clusterName, | ||
size: clusterSize, | ||
url: eventBusConfig.NATS.URL, | ||
auth: auth, | ||
} | ||
default: | ||
return nil, errors.New("invalid eventbus type") | ||
} | ||
return elector, nil | ||
} | ||
|
||
type natsEventBusElector struct { | ||
clusterName string | ||
size int | ||
url string | ||
auth *eventbusdriver.Auth | ||
} | ||
|
||
func (e *natsEventBusElector) RunOrDie(ctx context.Context, callbacks LeaderCallbacks) { | ||
log := logging.FromContext(ctx) | ||
ci := graft.ClusterInfo{Name: e.clusterName, Size: e.size} | ||
opts := &nats.DefaultOptions | ||
opts.Url = e.url | ||
if e.auth.Strategy == eventbusv1alpha1.AuthStrategyToken { | ||
opts.Token = e.auth.Crendential.Token | ||
} | ||
rpc, err := graft.NewNatsRpc(opts) | ||
if err != nil { | ||
log.Fatalw("failed to new Nats Rpc", zap.Error(err)) | ||
} | ||
errChan := make(chan error) | ||
stateChangeChan := make(chan graft.StateChange) | ||
handler := graft.NewChanHandler(stateChangeChan, errChan) | ||
node, err := graft.New(ci, handler, rpc, "/tmp/graft.log") | ||
if err != nil { | ||
log.Fatalw("failed to new a node", zap.Error(err)) | ||
} | ||
defer node.Close() | ||
|
||
cctx, cancel := context.WithCancel(ctx) | ||
if node.State() == graft.LEADER { | ||
log.Info("I'm the LEADER, starting ...") | ||
go callbacks.OnStartedLeading(cctx) | ||
} else { | ||
log.Info("Not the LEADER, stand by ...") | ||
} | ||
|
||
handleStateChange := func(sc graft.StateChange) { | ||
switch sc.To { | ||
case graft.LEADER: | ||
log.Info("I'm the LEADER, starting ...") | ||
go callbacks.OnStartedLeading(cctx) | ||
case graft.FOLLOWER, graft.CANDIDATE: | ||
log.Infof("Becoming a %v, stand by ...", sc.To) | ||
if sc.From == graft.LEADER { | ||
cancel() | ||
callbacks.OnStoppedLeading() | ||
cctx, cancel = context.WithCancel(ctx) | ||
} | ||
case graft.CLOSED: | ||
if sc.From == graft.LEADER { | ||
cancel() | ||
callbacks.OnStoppedLeading() | ||
} | ||
log.Fatal("Leader elector connection was CLOSED") | ||
default: | ||
log.Fatalf("Unknown state: %s", sc.To) | ||
} | ||
} | ||
|
||
for { | ||
select { | ||
case <-ctx.Done(): | ||
log.Info("exiting...") | ||
cancel() | ||
return | ||
case sc := <-stateChangeChan: | ||
handleStateChange(sc) | ||
case err := <-errChan: | ||
log.Errorw("Error happened", zap.Error(err)) | ||
} | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -20,8 +20,6 @@ import ( | |
|
||
"github.com/argoproj/argo-events/common" | ||
controllerscommon "github.com/argoproj/argo-events/controllers/common" | ||
"github.com/argoproj/argo-events/eventsources" | ||
apicommon "github.com/argoproj/argo-events/pkg/apis/common" | ||
eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" | ||
"github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" | ||
) | ||
|
@@ -221,9 +219,14 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a | |
}, | ||
}, | ||
}) | ||
emptyDirVolName := "tmp-volume" | ||
volumes = append(volumes, corev1.Volume{ | ||
Name: emptyDirVolName, VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, | ||
}) | ||
deploymentSpec.Template.Spec.Volumes = volumes | ||
volumeMounts := deploymentSpec.Template.Spec.Containers[0].VolumeMounts | ||
volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: "auth-volume", MountPath: common.EventBusAuthFileMountPath}) | ||
volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: emptyDirVolName, MountPath: "/tmp"}) | ||
deploymentSpec.Template.Spec.Containers[0].VolumeMounts = volumeMounts | ||
} | ||
} else { | ||
|
@@ -332,26 +335,6 @@ func buildDeploymentSpec(args *AdaptorArgs) (*appv1.DeploymentSpec, error) { | |
spec.Template.Spec.PriorityClassName = args.EventSource.Spec.Template.PriorityClassName | ||
spec.Template.Spec.Priority = args.EventSource.Spec.Template.Priority | ||
} | ||
allEventTypes := eventsources.GetEventingServers(args.EventSource, nil) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We don't need this any more, with leader election, all the event source deployments can run with |
||
recreateTypes := make(map[apicommon.EventSourceType]bool) | ||
for _, esType := range apicommon.RecreateStrategyEventSources { | ||
recreateTypes[esType] = true | ||
} | ||
recreates := 0 | ||
for eventType := range allEventTypes { | ||
if _, ok := recreateTypes[eventType]; ok { | ||
recreates++ | ||
break | ||
} | ||
} | ||
if recreates > 0 && replicas == 1 { | ||
// For those event types, if there's only 1 replica, use recreate strategy. | ||
// If replicas > 1, which means HA is available for them, rolling update strategy | ||
// is better. | ||
spec.Strategy = appv1.DeploymentStrategy{ | ||
Type: appv1.RecreateDeploymentStrategyType, | ||
} | ||
} | ||
return spec, nil | ||
} | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -185,9 +185,14 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a | |
}, | ||
}, | ||
}) | ||
emptyDirVolName := "tmp-volume" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. minor - maybe just call this There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done. |
||
volumes = append(volumes, corev1.Volume{ | ||
Name: emptyDirVolName, VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, | ||
}) | ||
deploymentSpec.Template.Spec.Volumes = volumes | ||
volumeMounts := deploymentSpec.Template.Spec.Containers[0].VolumeMounts | ||
volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: "auth-volume", MountPath: common.EventBusAuthFileMountPath}) | ||
volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: emptyDirVolName, MountPath: "/tmp"}) | ||
deploymentSpec.Template.Spec.Containers[0].VolumeMounts = volumeMounts | ||
} | ||
} else { | ||
|
@@ -270,10 +275,6 @@ func buildDeploymentSpec(args *AdaptorArgs) (*appv1.DeploymentSpec, error) { | |
MatchLabels: args.Labels, | ||
}, | ||
Replicas: &replicas, | ||
Strategy: appv1.DeploymentStrategy{ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. With leader election, sensor deployments can also run with |
||
// Event bus does not allow multiple clients with same clientID to connect to the server at the same time. | ||
Type: appv1.RecreateDeploymentStrategyType, | ||
}, | ||
Template: corev1.PodTemplateSpec{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Labels: podTemplateLabels, | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
normally defer a cancel?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
When current node is changed from
leader
to non-leader (line 128-133),cancel()
need to be called to terminate the running service, and re-initiate a cctx and cancel. Not quite sure ifdefer cancel()
still works in that case, let me do more testing.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Using defer works, updated.