-
Notifications
You must be signed in to change notification settings - Fork 130
Add CEL validation test for targetRef
in ClientSettingsPolicy
#3623
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 22 commits
dbb5b5c
48ec86b
c7129bd
87a4e0e
29f201e
d921ed2
f22269a
7fddffa
dd66a79
d664f35
cd9bada
7d9989d
9274e57
506b01c
f99ab29
ad80b70
e5479fb
1789701
aa4a6b2
8f74214
4486bd5
1adf168
7c6dba4
69f485b
1ad24c4
aa5adc6
29f26ef
f26eea3
efa4a81
d25661a
c413895
9962e03
feca4ed
8d84e7b
ddce60f
d518ada
39a6b38
5079c21
ce0960b
248df53
c5860e2
83139ce
a4e58ee
339e6b8
716d051
d273281
4a6e84e
66b7105
510aac1
c45403a
e284740
06adc0e
cb43b52
9217339
f30f117
e18b9dc
869a091
92caf12
81fd868
94a8dd0
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,226 @@ | ||
package cel | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"strings" | ||
"testing" | ||
"time" | ||
|
||
"k8s.io/apimachinery/pkg/runtime" | ||
controllerruntime "sigs.k8s.io/controller-runtime" | ||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
gatewayv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2" | ||
|
||
ngfAPIv1alpha1 "github.com/nginx/nginx-gateway-fabric/apis/v1alpha1" | ||
) | ||
|
||
const ( | ||
GatewayKind = "Gateway" | ||
HTTPRouteKind = "HTTPRoute" | ||
GRPCRouteKind = "GRPCRoute" | ||
TCPRouteKind = "TCPRoute" | ||
InvalidKind = "InvalidKind" | ||
) | ||
|
||
const ( | ||
GatewayGroup = "gateway.networking.k8s.io" | ||
InvalidGroup = "invalid.networking.k8s.io" | ||
DiscoveryGroup = "discovery.k8s.io/v1" | ||
) | ||
|
||
const ( | ||
ExpectedTargetRefKindError = `TargetRef Kind must be one of: Gateway, HTTPRoute, or GRPCRoute` | ||
ExpectedTargetRefGroupError = `TargetRef Group must be gateway.networking.k8s.io.` | ||
) | ||
|
||
const ( | ||
PolicyName = "test-policy" | ||
TargetRefFormat = "targetRef-name-%d" | ||
) | ||
|
||
func TestClientSettingsPoliciesTargetRefKind(t *testing.T) { | ||
shaun-nx marked this conversation as resolved.
Show resolved
Hide resolved
|
||
tests := []struct { | ||
policySpec ngfAPIv1alpha1.ClientSettingsPolicySpec | ||
name string | ||
wantErrors []string | ||
}{ | ||
{ | ||
name: "Validate TargetRef of kind Gateway is allowed", | ||
policySpec: ngfAPIv1alpha1.ClientSettingsPolicySpec{ | ||
TargetRef: gatewayv1alpha2.LocalPolicyTargetReference{ | ||
Kind: GatewayKind, | ||
Group: GatewayGroup, | ||
}, | ||
}, | ||
}, | ||
{ | ||
name: "Validate TargetRef of kind HTTPRoute is allowed", | ||
policySpec: ngfAPIv1alpha1.ClientSettingsPolicySpec{ | ||
TargetRef: gatewayv1alpha2.LocalPolicyTargetReference{ | ||
Kind: HTTPRouteKind, | ||
Group: GatewayGroup, | ||
}, | ||
}, | ||
}, | ||
{ | ||
name: "Validate TargetRef of kind GRPCRoute is allowed", | ||
policySpec: ngfAPIv1alpha1.ClientSettingsPolicySpec{ | ||
TargetRef: gatewayv1alpha2.LocalPolicyTargetReference{ | ||
Kind: GRPCRouteKind, | ||
Group: GatewayGroup, | ||
}, | ||
}, | ||
}, | ||
{ | ||
name: "Validate Invalid TargetRef Kind is not allowed", | ||
wantErrors: []string{ExpectedTargetRefKindError}, | ||
policySpec: ngfAPIv1alpha1.ClientSettingsPolicySpec{ | ||
TargetRef: gatewayv1alpha2.LocalPolicyTargetReference{ | ||
Kind: InvalidKind, | ||
Group: GatewayGroup, | ||
}, | ||
}, | ||
}, | ||
{ | ||
name: "Validate TCPRoute TargetRef Kind is not allowed", | ||
wantErrors: []string{ExpectedTargetRefKindError}, | ||
policySpec: ngfAPIv1alpha1.ClientSettingsPolicySpec{ | ||
TargetRef: gatewayv1alpha2.LocalPolicyTargetReference{ | ||
Kind: TCPRouteKind, | ||
Group: GatewayGroup, | ||
}, | ||
}, | ||
}, | ||
} | ||
|
||
for _, tt := range tests { | ||
t.Run(tt.name, func(t *testing.T) { | ||
validateClientSettingsPolicy(t, tt) | ||
}) | ||
} | ||
} | ||
|
||
func TestClientSettingsPoliciesTargetRefGroup(t *testing.T) { | ||
tests := []struct { | ||
policySpec ngfAPIv1alpha1.ClientSettingsPolicySpec | ||
name string | ||
wantErrors []string | ||
}{ | ||
{ | ||
name: "Validate gateway.networking.k8s.io TargetRef Group is allowed", | ||
policySpec: ngfAPIv1alpha1.ClientSettingsPolicySpec{ | ||
TargetRef: gatewayv1alpha2.LocalPolicyTargetReference{ | ||
Kind: GatewayKind, | ||
Group: GatewayGroup, | ||
}, | ||
}, | ||
}, | ||
{ | ||
name: "Validate invalid.networking.k8s.io TargetRef Group is not allowed", | ||
wantErrors: []string{ExpectedTargetRefGroupError}, | ||
policySpec: ngfAPIv1alpha1.ClientSettingsPolicySpec{ | ||
TargetRef: gatewayv1alpha2.LocalPolicyTargetReference{ | ||
Kind: GatewayKind, | ||
Group: InvalidGroup, | ||
}, | ||
}, | ||
}, | ||
{ | ||
name: "Validate discovery.k8s.io/v1 TargetRef Group is not allowed", | ||
wantErrors: []string{ExpectedTargetRefGroupError}, | ||
policySpec: ngfAPIv1alpha1.ClientSettingsPolicySpec{ | ||
TargetRef: gatewayv1alpha2.LocalPolicyTargetReference{ | ||
Kind: GatewayKind, | ||
Group: DiscoveryGroup, | ||
}, | ||
}, | ||
}, | ||
} | ||
|
||
for _, tt := range tests { | ||
t.Run(tt.name, func(t *testing.T) { | ||
validateClientSettingsPolicy(t, tt) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We can also add t.Parallel() to each loop iteration. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Interesting, I've never seen that before. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @sjberman I made this change so that each policy created has a unique name during the parallel test runs. Even with this, the test runs are inconsistent. It looks like some of them are being created in parallel so fast that they still get the same names I noticed it when I tried making new tests for the ClientKeepAliveTimeout validation === NAME TestClientSettingsPoliciesKeepAliveTimeout/Validate_KeepAliveTimeout_is_not_set
clientsettingspolicy_test.go:205:
Unexpected error:
<*errors.StatusError | 0x14000516a00>:
clientsettingspolicies.gateway.nginx.org "test-policy-1753786044059919000" already exists
{
ErrStatus: {
TypeMeta: {Kind: "", APIVersion: ""},
ListMeta: {
SelfLink: "",
ResourceVersion: "",
Continue: "",
RemainingItemCount: nil,
},
Status: "Failure",
Message: "clientsettingspolicies.gateway.nginx.org \"test-policy-1753786044059919000\" already exists",
Reason: "AlreadyExists",
Details: {
Name: "test-policy-1753786044059919000",
Group: "gateway.nginx.org",
Kind: "clientsettingspolicies",
UID: "",
Causes: nil,
RetryAfterSeconds: 0,
},
Code: 409,
},
}
occurred
--- FAIL: TestClientSettingsPoliciesKeepAliveTimeout (0.00s) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The results are more consistent if I remove There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hm, okay. Maybe because this is running on a live system. that's fine then. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hmm, this is interesting. golangci-lint-tests......................................................Failed
- hook id: golangci-lint-full
- exit code: 1
tests/cel/clientsettingspolicy_test.go:42:6: TestClientSettingsPoliciesTargetRefKind's subtests should call t.Parallel (tparallel)
func TestClientSettingsPoliciesTargetRefKind(t *testing.T) {
^
tests/cel/clientsettingspolicy_test.go:105:6: TestClientSettingsPoliciesTargetRefGroup's subtests should call t.Parallel (tparallel)
func TestClientSettingsPoliciesTargetRefGroup(t *testing.T) {
^
2 issues:
* tparallel: 2 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Oh fun, well then let's see about making this work. Maybe instead of using the unix time in the name we just use a random uuid. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @sjberman I swapped out the use of There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. would an index id or counter variable appended to the name work? |
||
}) | ||
} | ||
} | ||
|
||
func validateClientSettingsPolicy(t *testing.T, tt struct { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Something to just keep in mind for when we write more CEL tests for other policies, is that a lot of this function (and |
||
policySpec ngfAPIv1alpha1.ClientSettingsPolicySpec | ||
name string | ||
wantErrors []string | ||
}, | ||
) { | ||
t.Helper() | ||
sjberman marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
// Get Kubernetes client from test framework | ||
// This should be set up by your test framework to connect to a real cluster | ||
k8sClient := getKubernetesClient(t) | ||
|
||
policySpec := tt.policySpec | ||
policySpec.TargetRef.Name = gatewayv1alpha2.ObjectName(fmt.Sprintf(TargetRefFormat, time.Now().UnixNano())) | ||
|
||
clientSettingsPolicy := &ngfAPIv1alpha1.ClientSettingsPolicy{ | ||
ObjectMeta: controllerruntime.ObjectMeta{ | ||
Name: PolicyName, | ||
Namespace: "default", | ||
}, | ||
Spec: policySpec, | ||
} | ||
|
||
err := k8sClient.Create(context.Background(), clientSettingsPolicy) | ||
sarthyparty marked this conversation as resolved.
Show resolved
Hide resolved
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Since the k8sClient is using the actual Kubernetes API Server, I wonder if we need to add a timeout
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It probably is fine without it but just curious There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yea, when creating we typically do have a context to make sure it doesn't block there. Like these constants https://github.com/nginx/nginx-gateway-fabric/blob/main/tests/framework/timeout.go |
||
|
||
// Clean up after test | ||
defer func() { | ||
_ = k8sClient.Delete(context.Background(), clientSettingsPolicy) | ||
}() | ||
|
||
// Check if we expected errors | ||
if len(tt.wantErrors) == 0 { | ||
if err != nil { | ||
t.Errorf("expected no error but got: %v", err) | ||
} | ||
shaun-nx marked this conversation as resolved.
Show resolved
Hide resolved
|
||
return | ||
} | ||
|
||
// We expected errors - validation should have failed | ||
if err == nil { | ||
t.Errorf("expected validation error but policy was accepted") | ||
return | ||
} | ||
shaun-nx marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
// Check that we got the expected error messages | ||
var missingErrors []string | ||
for _, wantError := range tt.wantErrors { | ||
if !strings.Contains(err.Error(), wantError) { | ||
missingErrors = append(missingErrors, wantError) | ||
} | ||
} | ||
if len(missingErrors) != 0 { | ||
t.Errorf("missing expected errors: %v, got: %v", missingErrors, err) | ||
} | ||
} | ||
|
||
// getKubernetesClient returns a client connected to a real Kubernetes cluster. | ||
func getKubernetesClient(t *testing.T) client.Client { | ||
t.Helper() | ||
// Use controller-runtime to get cluster connection | ||
k8sConfig, err := controllerruntime.GetConfig() | ||
if err != nil { | ||
t.Skipf("Cannot connect to Kubernetes cluster: %v", err) | ||
} | ||
shaun-nx marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
// Set up scheme with NGF types | ||
scheme := runtime.NewScheme() | ||
if err := ngfAPIv1alpha1.AddToScheme(scheme); err != nil { | ||
t.Fatalf("Failed to add NGF schema: %v", err) | ||
} | ||
|
||
// Create client | ||
k8sClient, err := client.New(k8sConfig, client.Options{Scheme: scheme}) | ||
if err != nil { | ||
t.Skipf("Cannot create k8s client: %v", err) | ||
} | ||
|
||
return k8sClient | ||
} |
Uh oh!
There was an error while loading. Please reload this page.