From 15e87cb49519afc58c05068c25c39acb0825ce07 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Mon, 16 Sep 2024 08:29:40 +0200 Subject: [PATCH 1/6] Verify PR titles with shell script MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stefan BΓΌringer buringerst@vmware.com --- .github/workflows/verify.yml | 20 ++++++------- hack/verify-pr-title.sh | 54 ++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 10 deletions(-) create mode 100755 hack/verify-pr-title.sh diff --git a/.github/workflows/verify.yml b/.github/workflows/verify.yml index e24f962101..a5a3e85c7b 100644 --- a/.github/workflows/verify.yml +++ b/.github/workflows/verify.yml @@ -1,17 +1,17 @@ +name: PR title verifier + on: pull_request_target: - types: [opened, edited, reopened, synchronize] - -permissions: - checks: write # Allow access to checks to write check runs. + types: [opened, edited, synchronize, reopened] jobs: verify: runs-on: ubuntu-latest - name: verify PR contents + steps: - - name: Verifier action - id: verifier - uses: kubernetes-sigs/kubebuilder-release-tools@012269a88fa4c034a0acf1ba84c26b195c0dbab4 # tag=v0.4.3 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # tag=v4.1.7 + + - name: Check if PR title is valid + run: | + ./hack/verify-pr-title.sh "${{ github.event.pull_request.title }}" + diff --git a/hack/verify-pr-title.sh b/hack/verify-pr-title.sh new file mode 100755 index 0000000000..a556b0172b --- /dev/null +++ b/hack/verify-pr-title.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# Copyright 2024 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Define regex patterns +WIP_REGEX="^\W?WIP\W" +TAG_REGEX="^\[[[:alnum:]\._-]*\]" +PR_TITLE="$1" + +# Trim WIP and tags from title +trimmed_title=$(echo "$PR_TITLE" | sed -E "s/$WIP_REGEX//" | sed -E "s/$TAG_REGEX//" | xargs) + +# Normalize common emojis in text form to actual emojis +trimmed_title=$(echo "$trimmed_title" | sed -E "s/:warning:/⚠/g") +trimmed_title=$(echo "$trimmed_title" | sed -E "s/:sparkles:/✨/g") +trimmed_title=$(echo "$trimmed_title" | sed -E "s/:bug:/πŸ›/g") +trimmed_title=$(echo "$trimmed_title" | sed -E "s/:book:/πŸ“–/g") +trimmed_title=$(echo "$trimmed_title" | sed -E "s/:rocket:/πŸš€/g") +trimmed_title=$(echo "$trimmed_title" | sed -E "s/:seedling:/🌱/g") + +# Check PR type prefix +if [[ "$trimmed_title" =~ ^(⚠|✨|πŸ›|πŸ“–|πŸš€|🌱) ]]; then + echo "PR title is valid: $trimmed_title" +else + echo "Error: No matching PR type indicator found in title." + echo "You need to have one of these as the prefix of your PR title:" + echo "- Breaking change: ⚠ (:warning:)" + echo "- Non-breaking feature: ✨ (:sparkles:)" + echo "- Patch fix: πŸ› (:bug:)" + echo "- Docs: πŸ“– (:book:)" + echo "- Release: πŸš€ (:rocket:)" + echo "- Infra/Tests/Other: 🌱 (:seedling:)" + exit 1 +fi + +# Check that PR title does not contain Issue or PR number +if [[ "$trimmed_title" =~ \#[0-9]+ ]]; then + echo "Error: PR title should not contain issue or PR number." + echo "Issue numbers belong in the PR body as either \"Fixes #XYZ\" (if it closes the issue or PR), or something like \"Related to #XYZ\" (if it's just related)." + exit 1 +fi + From f0e55afc5bdb093c80e052f3af10db748f462275 Mon Sep 17 00:00:00 2001 From: Kevin McDermott Date: Sun, 15 Sep 2024 20:13:03 +0100 Subject: [PATCH 2/6] Preserve TypeMeta for PartialObjectMeta resources This updates the fake client to retain the PartialObjectMeta TypeMeta when getting resources. --- pkg/client/fake/client.go | 5 ++++- pkg/client/fake/client_test.go | 27 +++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/pkg/client/fake/client.go b/pkg/client/fake/client.go index 7366a18528..10be14a9df 100644 --- a/pkg/client/fake/client.go +++ b/pkg/client/fake/client.go @@ -508,7 +508,10 @@ func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.O return err } - if _, isUnstructured := obj.(runtime.Unstructured); isUnstructured { + _, isUnstructured := obj.(runtime.Unstructured) + _, isPartialObject := obj.(*metav1.PartialObjectMetadata) + + if isUnstructured || isPartialObject { gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return err diff --git a/pkg/client/fake/client_test.go b/pkg/client/fake/client_test.go index e86a64eefc..ae537d5b43 100644 --- a/pkg/client/fake/client_test.go +++ b/pkg/client/fake/client_test.go @@ -339,6 +339,33 @@ var _ = Describe("Fake client", func() { Expect(apierrors.IsNotFound(err)).To(BeTrue()) }) + It("should be able to retrieve objects by PartialObjectMetadata", func() { + By("Creating a Resource") + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + }, + } + err := cl.Create(context.Background(), secret) + Expect(err).ToNot(HaveOccurred()) + + By("Fetching the resource using a PartialObjectMeta") + partialObjMeta := &metav1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + }, + } + partialObjMeta.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) + + err = cl.Get(context.Background(), client.ObjectKeyFromObject(partialObjMeta), partialObjMeta) + Expect(err).ToNot(HaveOccurred()) + + Expect(partialObjMeta.Kind).To(Equal("Secret")) + Expect(partialObjMeta.APIVersion).To(Equal("v1")) + }) + It("should support filtering by labels and their values", func() { By("Listing deployments with a particular label and value") list := &appsv1.DeploymentList{} From b40036699edf7c81ad45f2af28be6ac3ebab9528 Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Fri, 27 Sep 2024 11:07:59 +0200 Subject: [PATCH 3/6] pr-verify: use env var for passing the PR title Co-Authored-By: Aviv Keller --- .github/workflows/verify.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/verify.yml b/.github/workflows/verify.yml index a5a3e85c7b..dfe953846f 100644 --- a/.github/workflows/verify.yml +++ b/.github/workflows/verify.yml @@ -12,6 +12,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # tag=v4.1.7 - name: Check if PR title is valid - run: | - ./hack/verify-pr-title.sh "${{ github.event.pull_request.title }}" - + env: + PR_TITLE: ${{ github.event.pull_request.title }} + run: | + ./hack/verify-pr-title.sh "${{ github.event.pull_request.title }}" From 465b62a5b08168851bff62a23f8e634f8e52e1bc Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Fri, 27 Sep 2024 12:05:51 +0200 Subject: [PATCH 4/6] pr-verify: use env var for passing the PR title Co-Authored-By: Aviv Keller --- .github/workflows/verify.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/verify.yml b/.github/workflows/verify.yml index dfe953846f..a66ba0c43f 100644 --- a/.github/workflows/verify.yml +++ b/.github/workflows/verify.yml @@ -15,4 +15,4 @@ jobs: env: PR_TITLE: ${{ github.event.pull_request.title }} run: | - ./hack/verify-pr-title.sh "${{ github.event.pull_request.title }}" + ./hack/verify-pr-title.sh "${PR_TITLE}" From f883b25b1b48aebcd4a1dc2aa42090e47d32b51d Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Fri, 11 Oct 2024 15:55:11 +0200 Subject: [PATCH 5/6] Fix PR verify action MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stefan BΓΌringer buringerst@vmware.com --- .github/workflows/verify.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/verify.yml b/.github/workflows/verify.yml index a66ba0c43f..303c28b9d4 100644 --- a/.github/workflows/verify.yml +++ b/.github/workflows/verify.yml @@ -9,9 +9,9 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # tag=v4.1.7 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # tag=v4.2.1 - - name: Check if PR title is valid + - name: Check if PR title is valid env: PR_TITLE: ${{ github.event.pull_request.title }} run: | From 44214256ba36506ed2a0cb0910c687bc172cf78a Mon Sep 17 00:00:00 2001 From: Alvaro Aleman Date: Sat, 12 Oct 2024 13:42:01 -0400 Subject: [PATCH 6/6] bug: Fakeclient: Fix TOCTOU races The fake client currently has a number of time of check time of use races, where it fetches an object to determine what to do in a mutating operation. The problem is that the object might change in between fetching it and doing the mutating operation. Most notably, this happens when: * Patching is done in parallel. Only one of the patches will succeed, the other ones will fail with a conflict * Updates of objects that allow unconditional updates: All updates will succeed, but not all of them will increment the resource version (i.E dirty writes for the RV) * An update for an object that allows createOnUpdate races with a create or delete * A DeleteAllOf call races with Delete calls * A scale update races with a normal update This change: * Adds tests for all of these cases * Fixes them by adding a lock around the write operations, including their read part, if any --- pkg/client/fake/client.go | 38 ++++- pkg/client/fake/client_test.go | 282 ++++++++++++++++++++++++++++++++- 2 files changed, 309 insertions(+), 11 deletions(-) diff --git a/pkg/client/fake/client.go b/pkg/client/fake/client.go index 10be14a9df..54f5b5d258 100644 --- a/pkg/client/fake/client.go +++ b/pkg/client/fake/client.go @@ -68,16 +68,21 @@ type versionedTracker struct { } type fakeClient struct { - tracker versionedTracker - scheme *runtime.Scheme + // trackerWriteLock must be acquired before writing to + // the tracker or performing reads that affect a following + // write. + trackerWriteLock sync.Mutex + tracker versionedTracker + + schemeWriteLock sync.Mutex + scheme *runtime.Scheme + restMapper meta.RESTMapper withStatusSubresource sets.Set[schema.GroupVersionKind] // indexes maps each GroupVersionKind (GVK) to the indexes registered for that GVK. // The inner map maps from index name to IndexerFunc. indexes map[schema.GroupVersionKind]map[string]client.IndexerFunc - - schemeWriteLock sync.Mutex } var _ client.WithWatch = &fakeClient{} @@ -467,6 +472,11 @@ func (t versionedTracker) updateObject(gvr schema.GroupVersionResource, obj runt switch { case allowsUnconditionalUpdate(gvk): accessor.SetResourceVersion(oldAccessor.GetResourceVersion()) + // This is needed because if the patch explicitly sets the RV to null, the client-go reaction we use + // to apply it and whose output we process here will have it unset. It is not clear why the Kubernetes + // apiserver accepts such a patch, but it does so we just copy that behavior. + // Kubernetes apiserver behavior can be checked like this: + // `kubectl patch configmap foo --patch '{"metadata":{"annotations":{"foo":"bar"},"resourceVersion":null}}' -v=9` case bytes. Contains(debug.Stack(), []byte("sigs.k8s.io/controller-runtime/pkg/client/fake.(*fakeClient).Patch")): // We apply patches using a client-go reaction that ends up calling the trackers Update. As we can't change @@ -732,6 +742,8 @@ func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...clie accessor.SetDeletionTimestamp(nil) } + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() return c.tracker.Create(gvr, obj, accessor.GetNamespace()) } @@ -753,6 +765,8 @@ func (c *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...clie } } + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() // Check the ResourceVersion if that Precondition was specified. if delOptions.Preconditions != nil && delOptions.Preconditions.ResourceVersion != nil { name := accessor.GetName() @@ -775,7 +789,7 @@ func (c *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...clie } } - return c.deleteObject(gvr, accessor) + return c.deleteObjectLocked(gvr, accessor) } func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { @@ -793,6 +807,9 @@ func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts .. } } + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() + gvr, _ := meta.UnsafeGuessKindToResource(gvk) o, err := c.tracker.List(gvr, gvk, dcOptions.Namespace) if err != nil { @@ -812,7 +829,7 @@ func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts .. if err != nil { return err } - err = c.deleteObject(gvr, accessor) + err = c.deleteObjectLocked(gvr, accessor) if err != nil { return err } @@ -842,6 +859,9 @@ func (c *fakeClient) update(obj client.Object, isStatus bool, opts ...client.Upd if err != nil { return err } + + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() return c.tracker.update(gvr, obj, accessor.GetNamespace(), isStatus, false, *updateOptions.AsUpdateOptions()) } @@ -877,6 +897,8 @@ func (c *fakeClient) patch(obj client.Object, patch client.Patch, opts ...client return err } + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() oldObj, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName()) if err != nil { return err @@ -1085,7 +1107,7 @@ func (c *fakeClient) SubResource(subResource string) client.SubResourceClient { return &fakeSubResourceClient{client: c, subResource: subResource} } -func (c *fakeClient) deleteObject(gvr schema.GroupVersionResource, accessor metav1.Object) error { +func (c *fakeClient) deleteObjectLocked(gvr schema.GroupVersionResource, accessor metav1.Object) error { old, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName()) if err == nil { oldAccessor, err := meta.Accessor(old) @@ -1167,7 +1189,7 @@ func (sw *fakeSubResourceClient) Update(ctx context.Context, obj client.Object, switch sw.subResource { case subResourceScale: - if err := sw.client.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { + if err := sw.client.Get(ctx, client.ObjectKeyFromObject(obj), obj.DeepCopyObject().(client.Object)); err != nil { return err } if updateOptions.SubResourceBody == nil { diff --git a/pkg/client/fake/client_test.go b/pkg/client/fake/client_test.go index ae537d5b43..2f5392fda7 100644 --- a/pkg/client/fake/client_test.go +++ b/pkg/client/fake/client_test.go @@ -21,6 +21,7 @@ import ( "encoding/json" "fmt" "strconv" + "sync" "time" "github.com/google/go-cmp/cmp" @@ -579,7 +580,7 @@ var _ = Describe("Fake client", func() { Expect(obj.ObjectMeta.ResourceVersion).To(Equal("1000")) }) - It("should allow patch with non-set ResourceVersion for a resource that doesn't allow unconditional updates", func() { + It("should allow patch when the patch sets RV to 'null'", func() { schemeBuilder := &scheme.Builder{GroupVersion: schema.GroupVersion{Group: "test", Version: "v1"}} schemeBuilder.Register(&WithPointerMeta{}, &WithPointerMetaList{}) @@ -604,6 +605,7 @@ var _ = Describe("Fake client", func() { "foo": "bar", }, }} + Expect(cl.Patch(context.Background(), newObj, client.MergeFrom(original))).To(Succeed()) patched := &WithPointerMeta{} @@ -2098,6 +2100,280 @@ var _ = Describe("Fake client", func() { Expect(apierrors.IsNotFound(err)).To(BeTrue()) }) + It("should allow concurrent patches to a configMap", func() { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + ResourceVersion: "0", + }, + } + cl := NewClientBuilder().WithScheme(scheme).WithObjects(obj).Build() + + const tries = 50 + wg := sync.WaitGroup{} + wg.Add(tries) + + for i := range tries { + go func() { + defer wg.Done() + defer GinkgoRecover() + + newObj := obj.DeepCopy() + newObj.Data = map[string]string{"foo": strconv.Itoa(i)} + Expect(cl.Patch(context.Background(), newObj, client.MergeFrom(obj))).To(Succeed()) + }() + } + wg.Wait() + + // While the order is not deterministic, there must be $tries distinct updates + // that each increment the resource version by one + Expect(cl.Get(context.Background(), client.ObjectKey{Name: "foo"}, obj)).To(Succeed()) + Expect(obj.ResourceVersion).To(Equal(strconv.Itoa(tries))) + }) + + It("should not allow concurrent patches to a configMap if the patch contains a ResourceVersion", func() { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + ResourceVersion: "0", + }, + } + cl := NewClientBuilder().WithScheme(scheme).WithObjects(obj).Build() + wg := sync.WaitGroup{} + wg.Add(5) + + for i := range 5 { + go func() { + defer wg.Done() + defer GinkgoRecover() + + newObj := obj.DeepCopy() + newObj.ResourceVersion = "1" // include an invalid RV to cause a conflict + newObj.Data = map[string]string{"foo": strconv.Itoa(i)} + Expect(apierrors.IsConflict(cl.Patch(context.Background(), newObj, client.MergeFrom(obj)))).To(BeTrue()) + }() + } + wg.Wait() + }) + + It("should allow concurrent updates to an object that allows unconditionalUpdate if the incoming request has no RV", func() { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + ResourceVersion: "0", + }, + } + cl := NewClientBuilder().WithScheme(scheme).WithObjects(obj).Build() + + const tries = 50 + wg := sync.WaitGroup{} + wg.Add(tries) + + for i := range tries { + go func() { + defer wg.Done() + defer GinkgoRecover() + + newObj := obj.DeepCopy() + newObj.Data = map[string]string{"foo": strconv.Itoa(i)} + newObj.ResourceVersion = "" + Expect(cl.Update(context.Background(), newObj)).To(Succeed()) + }() + } + wg.Wait() + + // While the order is not deterministic, there must be $tries distinct updates + // that each increment the resource version by one + Expect(cl.Get(context.Background(), client.ObjectKey{Name: "foo"}, obj)).To(Succeed()) + Expect(obj.ResourceVersion).To(Equal(strconv.Itoa(tries))) + }) + + It("If a create races with an update for an object that allows createOnUpdate, the update should always succeed", func() { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + cl := NewClientBuilder().WithScheme(scheme).Build() + + const tries = 50 + wg := sync.WaitGroup{} + wg.Add(tries * 2) + + for i := range tries { + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + go func() { + defer wg.Done() + defer GinkgoRecover() + + // this may or may not succeed depending on if we win the race. Either is acceptable, + // but if it fails, it must fail due to an AlreadyExists. + err := cl.Create(context.Background(), obj.DeepCopy()) + if err != nil { + Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) + } + }() + + go func() { + defer wg.Done() + defer GinkgoRecover() + + // This must always succeed, regardless of the outcome of the create. + Expect(cl.Update(context.Background(), obj.DeepCopy())).To(Succeed()) + }() + } + + wg.Wait() + }) + + It("If a delete races with an update for an object that allows createOnUpdate, the update should always succeed", func() { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + cl := NewClientBuilder().WithScheme(scheme).Build() + + const tries = 50 + wg := sync.WaitGroup{} + wg.Add(tries * 2) + + for i := range tries { + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + Expect(cl.Create(context.Background(), obj.DeepCopy())).To(Succeed()) + + go func() { + defer wg.Done() + defer GinkgoRecover() + + Expect(cl.Delete(context.Background(), obj.DeepCopy())).To(Succeed()) + }() + + go func() { + defer wg.Done() + defer GinkgoRecover() + + // This must always succeed, regardless of if the delete came before or + // after us. + Expect(cl.Update(context.Background(), obj.DeepCopy())).To(Succeed()) + }() + } + + wg.Wait() + }) + + It("If a DeleteAllOf races with a delete, the DeleteAllOf should always succeed", func() { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + cl := NewClientBuilder().WithScheme(scheme).Build() + + const objects = 50 + wg := sync.WaitGroup{} + wg.Add(objects) + + for i := range objects { + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + Expect(cl.Create(context.Background(), obj.DeepCopy())).To(Succeed()) + } + + for i := range objects { + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + + go func() { + defer wg.Done() + defer GinkgoRecover() + + // This may or may not succeed depending on if the DeleteAllOf is faster, + // but if it fails, it should be a not found. + err := cl.Delete(context.Background(), obj) + if err != nil { + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + } + }() + } + Expect(cl.DeleteAllOf(context.Background(), &corev1.Service{})).To(Succeed()) + + wg.Wait() + }) + + It("If an update races with a scale update, only one of them succeeds", func() { + scheme := runtime.NewScheme() + Expect(appsv1.AddToScheme(scheme)).To(Succeed()) + + cl := NewClientBuilder().WithScheme(scheme).Build() + + const tries = 5000 + for i := range tries { + dep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + Expect(cl.Create(context.Background(), dep)).To(Succeed()) + + wg := sync.WaitGroup{} + wg.Add(2) + var updateSucceeded bool + var scaleSucceeded bool + + go func() { + defer wg.Done() + defer GinkgoRecover() + + dep := dep.DeepCopy() + dep.Annotations = map[string]string{"foo": "bar"} + + // This may or may not fail. If it does fail, it must be a conflict. + err := cl.Update(context.Background(), dep) + if err != nil { + Expect(apierrors.IsConflict(err)).To(BeTrue()) + } else { + updateSucceeded = true + } + }() + + go func() { + defer wg.Done() + defer GinkgoRecover() + + // This may or may not fail. If it does fail, it must be a conflict. + scale := &autoscalingv1.Scale{Spec: autoscalingv1.ScaleSpec{Replicas: 10}} + err := cl.SubResource("scale").Update(context.Background(), dep.DeepCopy(), client.WithSubResourceBody(scale)) + if err != nil { + Expect(apierrors.IsConflict(err)).To(BeTrue()) + } else { + scaleSucceeded = true + } + }() + + wg.Wait() + Expect(updateSucceeded).ToNot(Equal(scaleSucceeded)) + } + + }) + It("disallows scale subresources on unsupported built-in types", func() { scheme := runtime.NewScheme() Expect(corev1.AddToScheme(scheme)).To(Succeed()) @@ -2252,8 +2528,8 @@ func (t *WithPointerMetaList) DeepCopyObject() runtime.Object { } type WithPointerMeta struct { - *metav1.TypeMeta - *metav1.ObjectMeta + *metav1.TypeMeta `json:",inline"` + *metav1.ObjectMeta `json:"metadata,omitempty"` } func (t *WithPointerMeta) DeepCopy() *WithPointerMeta {