Skip to content

Commit

Permalink
Manual Backport of Fix issue where terminating gateway service resolv…
Browse files Browse the repository at this point in the history
…ers weren't properly cleaned up into release/1.14.x (#16558)

* Fix issue where terminating gateway service resolvers weren't properly cleaned up

* Add integration test for cleaning up resolvers

* Add changelog entry

* Use state test and drop integration test
  • Loading branch information
Andrew Stucki authored Mar 7, 2023
1 parent 38701d9 commit 89cee20
Show file tree
Hide file tree
Showing 4 changed files with 53 additions and 1 deletion.
3 changes: 3 additions & 0 deletions .changelog/16498.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:bug
proxycfg: fix a bug where terminating gateways were not cleaning up deleted service resolvers for their referenced services
```
22 changes: 22 additions & 0 deletions agent/proxycfg/state_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2093,6 +2093,28 @@ func TestState_WatchesAndUpdates(t *testing.T) {
require.Equal(t, dbResolver.Entry, snap.TerminatingGateway.ServiceResolvers[db])
},
},
{
requiredWatches: map[string]verifyWatchRequest{
"service-resolver:" + db.String(): genVerifyResolverWatch("db", "dc1", structs.ServiceResolver),
},
events: []UpdateEvent{
{
CorrelationID: "service-resolver:" + db.String(),
Result: &structs.ConfigEntryResponse{
Entry: nil,
},
Err: nil,
},
},
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
require.True(t, snap.Valid(), "gateway with service list is valid")
// Finally ensure we cleaned up the resolver
require.Equal(t, []structs.ServiceName{db}, snap.TerminatingGateway.ValidServices())

require.False(t, snap.TerminatingGateway.ServiceResolversSet[db])
require.Nil(t, snap.TerminatingGateway.ServiceResolvers[db])
},
},
{
events: []UpdateEvent{
{
Expand Down
7 changes: 6 additions & 1 deletion agent/proxycfg/terminating_gateway.go
Original file line number Diff line number Diff line change
Expand Up @@ -354,8 +354,13 @@ func (s *handlerTerminatingGateway) handleUpdate(ctx context.Context, u UpdateEv
// There should only ever be one entry for a service resolver within a namespace
if resolver, ok := resp.Entry.(*structs.ServiceResolverConfigEntry); ok {
snap.TerminatingGateway.ServiceResolvers[sn] = resolver
snap.TerminatingGateway.ServiceResolversSet[sn] = true
} else {
// we likely have a deleted service resolver, and our cast is a nil
// cast, so clear this out
delete(snap.TerminatingGateway.ServiceResolvers, sn)
snap.TerminatingGateway.ServiceResolversSet[sn] = false
}
snap.TerminatingGateway.ServiceResolversSet[sn] = true

case strings.HasPrefix(u.CorrelationID, serviceIntentionsIDPrefix):
resp, ok := u.Result.(structs.Intentions)
Expand Down
22 changes: 22 additions & 0 deletions test/integration/connect/envoy/helpers.bash
Original file line number Diff line number Diff line change
Expand Up @@ -359,15 +359,37 @@ function assert_upstream_has_endpoints_in_status_once {

GOT_COUNT=$(get_upstream_endpoint_in_status_count $HOSTPORT $CLUSTER_NAME $HEALTH_STATUS)

echo "GOT: $GOT_COUNT"
[ "$GOT_COUNT" -eq $EXPECT_COUNT ]
}

function assert_upstream_missing_once {
local HOSTPORT=$1
local CLUSTER_NAME=$2

run get_upstream_endpoint $HOSTPORT $CLUSTER_NAME
[ "$status" -eq 0 ]
echo "$output"
[ "" == "$output" ]
}

function assert_upstream_missing {
local HOSTPORT=$1
local CLUSTER_NAME=$2
run retry_long assert_upstream_missing_once $HOSTPORT $CLUSTER_NAME
echo "OUTPUT: $output $status"

[ "$status" -eq 0 ]
}

function assert_upstream_has_endpoints_in_status {
local HOSTPORT=$1
local CLUSTER_NAME=$2
local HEALTH_STATUS=$3
local EXPECT_COUNT=$4
run retry_long assert_upstream_has_endpoints_in_status_once $HOSTPORT $CLUSTER_NAME $HEALTH_STATUS $EXPECT_COUNT
echo "$output"

[ "$status" -eq 0 ]
}

Expand Down

0 comments on commit 89cee20

Please sign in to comment.