pax_global_header00006660000000000000000000000064147642225010014515gustar00rootroot0000000000000052 comment=41f945cf5869eeb4fa3c973a61ed4486368b8811 kubernetes-component-helpers-b5afa51/000077500000000000000000000000001476422250100200125ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/.github/000077500000000000000000000000001476422250100213525ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000002251476422250100251520ustar00rootroot00000000000000Sorry, we do not accept changes directly against this repository. Please see CONTRIBUTING.md for information on where and how to contribute instead. kubernetes-component-helpers-b5afa51/CONTRIBUTING.md000066400000000000000000000013641476422250100222470ustar00rootroot00000000000000# Contributing guidelines Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/component-helpers](https://git.k8s.io/kubernetes/staging/src/k8s.io/component-helpers) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/sig-architecture/staging.md) for more information kubernetes-component-helpers-b5afa51/LICENSE000066400000000000000000000261361476422250100210270ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. kubernetes-component-helpers-b5afa51/OWNERS000066400000000000000000000002431476422250100207510ustar00rootroot00000000000000# See the OWNERS docs at https://go.k8s.io/owners approvers: - dims - deads2k - liggitt - soltysh reviewers: - dims - deads2k - liggitt - soltysh kubernetes-component-helpers-b5afa51/README.md000066400000000000000000000043461476422250100213000ustar00rootroot00000000000000# component-helpers This repository provides helpers primarily for core components (core components as described in [Create a k8s.io/component-base repo](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/wgs/783-component-base/README.md#component-definition)) which are required by at least two separate binaries in kubernetes org. Yet, still with a high level of abstraction. `k8s.io/component-base` staging repository was considered as a candidate for hosting the helpers. Although, since the helpers are not required by the core components, the repository was deemed unsuitable. The only allowed kubernetes dependencies are `k8s.io/apimachinery`, `k8s.io/api` and `k8s.io/client-go`. ## Purpose One of the goals is to provide a better location for helpers currently located under `k8s.io/kubernetes/pkg/apis`. Recent effort of moving [scheduling framework](https://kubernetes.io/docs/concepts/scheduling-eviction/scheduling-framework/) under `k8s.io/kube-scheduler` requires duplication of many helper functions (see [#91782](https://github.com/kubernetes/kubernetes/issues/91782) for more details). Importing the helpers from this repository allows to minimize or remove already existing duplication. Another example is shared RBAC code which is blocking extracting kubectl to staging (see https://github.com/kubernetes/enhancements/issues/1020). This problem dates all the way back to December 2018 (see SIG-CLI call from December 19, 2018: https://docs.google.com/document/d/1r0YElcXt6G5mOWxwZiXgGu_X6he3F--wKwg-9UBc29I/edit?pli=1). Recently the topic was touched during sig-auth call (see https://docs.google.com/document/d/1woLGRoONE3EBVx-wTb4pvp4CI7tmLZ6lS26VTbosLKM/edit?ts=5ef3be6a#heading=h.etc9yylhln8x). ## Compatibility There are NO compatibility guarantees for this repository. It is in direct support of Kubernetes, so branches will track Kubernetes and be compatible with that repo. As we more cleanly separate the layers, we will review the compatibility guarantee. ## Where does it come from? This repo is synced from https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/component-helpers. Code changes are made in that location, merged into `k8s.io/kubernetes` and later synced here by a bot. kubernetes-component-helpers-b5afa51/SECURITY_CONTACTS000066400000000000000000000010761476422250100225060ustar00rootroot00000000000000# Defined below are the security contacts for this repo. # # They are the contact point for the Product Security Committee to reach out # to for triaging and handling of incoming issues. # # The below names agree to abide by the # [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) # and will be removed and replaced if they violate that agreement. # # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # INSTRUCTIONS AT https://kubernetes.io/security/ cjcullen cji joelsmith lukehinds micahhausler tallclair kubernetes-component-helpers-b5afa51/apimachinery/000077500000000000000000000000001476422250100224635ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/apimachinery/OWNERS000066400000000000000000000002571476422250100234270ustar00rootroot00000000000000# See the OWNERS docs at https://go.k8s.io/owners approvers: - sig-api-machinery-api-approvers reviewers: - sig-api-machinery-api-reviewers labels: - sig/api-machinery kubernetes-component-helpers-b5afa51/apimachinery/lease/000077500000000000000000000000001476422250100235545ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/apimachinery/lease/controller.go000066400000000000000000000211431476422250100262670ustar00rootroot00000000000000/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package lease import ( "context" "fmt" "time" coordinationv1 "k8s.io/api/coordination/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" coordclientset "k8s.io/client-go/kubernetes/typed/coordination/v1" "k8s.io/utils/clock" "k8s.io/utils/pointer" "k8s.io/klog/v2" ) const ( // maxUpdateRetries is the number of immediate, successive retries the controller will attempt // when renewing the lease before it waits for the renewal interval before trying again, // similar to what we do for node status retries maxUpdateRetries = 5 // maxBackoff is the maximum sleep time during backoff (e.g. in backoffEnsureLease) maxBackoff = 7 * time.Second ) // Controller manages creating and renewing the lease for this component (kube-apiserver, kubelet, etc.) type Controller interface { Run(ctx context.Context) } // ProcessLeaseFunc processes the given lease in-place type ProcessLeaseFunc func(*coordinationv1.Lease) error type controller struct { client clientset.Interface leaseClient coordclientset.LeaseInterface holderIdentity string leaseName string leaseNamespace string leaseDurationSeconds int32 renewInterval time.Duration clock clock.Clock onRepeatedHeartbeatFailure func() // latestLease is the latest lease which the controller updated or created latestLease *coordinationv1.Lease // newLeasePostProcessFunc allows customizing a lease object (e.g. setting OwnerReference) // before every time the lease is created/refreshed(updated). // Note that an error will block the lease operation. newLeasePostProcessFunc ProcessLeaseFunc } // NewController constructs and returns a controller func NewController(clock clock.Clock, client clientset.Interface, holderIdentity string, leaseDurationSeconds int32, onRepeatedHeartbeatFailure func(), renewInterval time.Duration, leaseName, leaseNamespace string, newLeasePostProcessFunc ProcessLeaseFunc) Controller { var leaseClient coordclientset.LeaseInterface if client != nil { leaseClient = client.CoordinationV1().Leases(leaseNamespace) } return &controller{ client: client, leaseClient: leaseClient, holderIdentity: holderIdentity, leaseName: leaseName, leaseNamespace: leaseNamespace, leaseDurationSeconds: leaseDurationSeconds, renewInterval: renewInterval, clock: clock, onRepeatedHeartbeatFailure: onRepeatedHeartbeatFailure, newLeasePostProcessFunc: newLeasePostProcessFunc, } } // Run runs the controller func (c *controller) Run(ctx context.Context) { if c.leaseClient == nil { klog.FromContext(ctx).Info("lease controller has nil lease client, will not claim or renew leases") return } wait.JitterUntilWithContext(ctx, c.sync, c.renewInterval, 0.04, true) } func (c *controller) sync(ctx context.Context) { if c.latestLease != nil { // As long as the lease is not (or very rarely) updated by any other agent than the component itself, // we can optimistically assume it didn't change since our last update and try updating // based on the version from that time. Thanks to it we avoid GET call and reduce load // on etcd and kube-apiserver. // If at some point other agents will also be frequently updating the Lease object, this // can result in performance degradation, because we will end up with calling additional // GET/PUT - at this point this whole "if" should be removed. err := c.retryUpdateLease(ctx, c.latestLease) if err == nil { return } klog.FromContext(ctx).Info("failed to update lease using latest lease, fallback to ensure lease", "err", err) } lease, created := c.backoffEnsureLease(ctx) c.latestLease = lease // we don't need to update the lease if we just created it if !created && lease != nil { if err := c.retryUpdateLease(ctx, lease); err != nil { klog.FromContext(ctx).Error(err, "Will retry updating lease", "interval", c.renewInterval) } } } // backoffEnsureLease attempts to create the lease if it does not exist, // and uses exponentially increasing waits to prevent overloading the API server // with retries. Returns the lease, and true if this call created the lease, // false otherwise. func (c *controller) backoffEnsureLease(ctx context.Context) (*coordinationv1.Lease, bool) { var ( lease *coordinationv1.Lease created bool err error ) sleep := 100 * time.Millisecond for { lease, created, err = c.ensureLease(ctx) if err == nil { break } sleep = minDuration(2*sleep, maxBackoff) klog.FromContext(ctx).Error(err, "Failed to ensure lease exists, will retry", "interval", sleep) // backoff wait with early return if the context gets canceled select { case <-ctx.Done(): return nil, false case <-time.After(sleep): } } return lease, created } // ensureLease creates the lease if it does not exist. Returns the lease and // a bool (true if this call created the lease), or any error that occurs. func (c *controller) ensureLease(ctx context.Context) (*coordinationv1.Lease, bool, error) { lease, err := c.leaseClient.Get(ctx, c.leaseName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { // lease does not exist, create it. leaseToCreate, err := c.newLease(nil) // An error occurred during allocating the new lease (likely from newLeasePostProcessFunc). // Given that we weren't able to set the lease correctly, we simply // not create it this time - we will retry in the next iteration. if err != nil { return nil, false, nil } lease, err := c.leaseClient.Create(ctx, leaseToCreate, metav1.CreateOptions{}) if err != nil { return nil, false, err } return lease, true, nil } else if err != nil { // unexpected error getting lease return nil, false, err } // lease already existed return lease, false, nil } // retryUpdateLease attempts to update the lease for maxUpdateRetries, // call this once you're sure the lease has been created func (c *controller) retryUpdateLease(ctx context.Context, base *coordinationv1.Lease) error { for i := 0; i < maxUpdateRetries; i++ { leaseToUpdate, err := c.newLease(base) if err != nil { klog.FromContext(ctx).Error(err, "Failed to prepare lease") } else { lease, err := c.leaseClient.Update(ctx, leaseToUpdate, metav1.UpdateOptions{}) if err == nil { c.latestLease = lease return nil } klog.FromContext(ctx).Error(err, "Failed to update lease") // OptimisticLockError requires getting the newer version of lease to proceed. if apierrors.IsConflict(err) { base, _ = c.backoffEnsureLease(ctx) continue } } if i > 0 && c.onRepeatedHeartbeatFailure != nil { c.onRepeatedHeartbeatFailure() } } return fmt.Errorf("failed %d attempts to update lease", maxUpdateRetries) } // newLease constructs a new lease if base is nil, or returns a copy of base // with desired state asserted on the copy. // Note that an error will block lease CREATE, causing the CREATE to be retried in // the next iteration; but the error won't block lease refresh (UPDATE). func (c *controller) newLease(base *coordinationv1.Lease) (*coordinationv1.Lease, error) { // Use the bare minimum set of fields; other fields exist for debugging/legacy, // but we don't need to make component heartbeats more complicated by using them. var lease *coordinationv1.Lease if base == nil { lease = &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: c.leaseName, Namespace: c.leaseNamespace, }, Spec: coordinationv1.LeaseSpec{ HolderIdentity: pointer.StringPtr(c.holderIdentity), LeaseDurationSeconds: pointer.Int32Ptr(c.leaseDurationSeconds), }, } } else { lease = base.DeepCopy() } lease.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()} if c.newLeasePostProcessFunc != nil { err := c.newLeasePostProcessFunc(lease) return lease, err } return lease, nil } func minDuration(a, b time.Duration) time.Duration { if a < b { return a } return b } kubernetes-component-helpers-b5afa51/apimachinery/lease/controller_test.go000066400000000000000000000376771476422250100273510ustar00rootroot00000000000000/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package lease import ( "context" "errors" "fmt" "testing" "time" "github.com/google/go-cmp/cmp" coordinationv1 "k8s.io/api/coordination/v1" corev1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" clienttesting "k8s.io/client-go/testing" testingclock "k8s.io/utils/clock/testing" "k8s.io/utils/pointer" "k8s.io/klog/v2" "k8s.io/klog/v2/ktesting" ) func TestNewNodeLease(t *testing.T) { fakeClock := testingclock.NewFakeClock(time.Now()) node := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", UID: types.UID("foo-uid"), }, } cases := []struct { desc string controller *controller base *coordinationv1.Lease expect *coordinationv1.Lease }{ { desc: "nil base without node", controller: &controller{ client: fake.NewSimpleClientset(), leaseName: node.Name, holderIdentity: node.Name, leaseDurationSeconds: 10, clock: fakeClock, }, base: nil, expect: &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: node.Name, Namespace: corev1.NamespaceNodeLease, }, Spec: coordinationv1.LeaseSpec{ HolderIdentity: pointer.StringPtr(node.Name), LeaseDurationSeconds: pointer.Int32Ptr(10), RenewTime: &metav1.MicroTime{Time: fakeClock.Now()}, }, }, }, { desc: "nil base with node", controller: &controller{ client: fake.NewSimpleClientset(node), leaseName: node.Name, holderIdentity: node.Name, leaseDurationSeconds: 10, clock: fakeClock, }, base: nil, expect: &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: node.Name, Namespace: corev1.NamespaceNodeLease, OwnerReferences: []metav1.OwnerReference{ { APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version, Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind, Name: node.Name, UID: node.UID, }, }, }, Spec: coordinationv1.LeaseSpec{ HolderIdentity: pointer.StringPtr(node.Name), LeaseDurationSeconds: pointer.Int32Ptr(10), RenewTime: &metav1.MicroTime{Time: fakeClock.Now()}, }, }, }, { desc: "non-nil base without owner ref, renew time is updated", controller: &controller{ client: fake.NewSimpleClientset(node), holderIdentity: node.Name, leaseDurationSeconds: 10, clock: fakeClock, }, base: &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: node.Name, Namespace: corev1.NamespaceNodeLease, }, Spec: coordinationv1.LeaseSpec{ HolderIdentity: pointer.StringPtr(node.Name), LeaseDurationSeconds: pointer.Int32Ptr(10), RenewTime: &metav1.MicroTime{Time: fakeClock.Now().Add(-10 * time.Second)}, }, }, expect: &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: node.Name, Namespace: corev1.NamespaceNodeLease, OwnerReferences: []metav1.OwnerReference{ { APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version, Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind, Name: node.Name, UID: node.UID, }, }, }, Spec: coordinationv1.LeaseSpec{ HolderIdentity: pointer.StringPtr(node.Name), LeaseDurationSeconds: pointer.Int32Ptr(10), RenewTime: &metav1.MicroTime{Time: fakeClock.Now()}, }, }, }, { desc: "non-nil base with owner ref, renew time is updated", controller: &controller{ client: fake.NewSimpleClientset(node), holderIdentity: node.Name, leaseDurationSeconds: 10, clock: fakeClock, }, base: &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: node.Name, Namespace: corev1.NamespaceNodeLease, OwnerReferences: []metav1.OwnerReference{ { APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version, Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind, Name: node.Name, UID: node.UID, }, }, }, Spec: coordinationv1.LeaseSpec{ HolderIdentity: pointer.StringPtr(node.Name), LeaseDurationSeconds: pointer.Int32Ptr(10), RenewTime: &metav1.MicroTime{Time: fakeClock.Now().Add(-10 * time.Second)}, }, }, expect: &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: node.Name, Namespace: corev1.NamespaceNodeLease, OwnerReferences: []metav1.OwnerReference{ { APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version, Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind, Name: node.Name, UID: node.UID, }, }, }, Spec: coordinationv1.LeaseSpec{ HolderIdentity: pointer.StringPtr(node.Name), LeaseDurationSeconds: pointer.Int32Ptr(10), RenewTime: &metav1.MicroTime{Time: fakeClock.Now()}, }, }, }, } for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { logger, _ := ktesting.NewTestContext(t) tc.controller.newLeasePostProcessFunc = setNodeOwnerFunc(logger, tc.controller.client, node.Name) tc.controller.leaseNamespace = corev1.NamespaceNodeLease newLease, _ := tc.controller.newLease(tc.base) if newLease == tc.base { t.Fatalf("the new lease must be newly allocated, but got same address as base") } if !apiequality.Semantic.DeepEqual(tc.expect, newLease) { t.Errorf("unexpected result from newLease: %s", cmp.Diff(tc.expect, newLease)) } }) } } func TestRetryUpdateNodeLease(t *testing.T) { node := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", UID: types.UID("foo-uid"), }, } gr := schema.GroupResource{Group: "v1", Resource: "lease"} noConnectionUpdateErr := apierrors.NewServerTimeout(gr, "put", 1) optimistcLockUpdateErr := apierrors.NewConflict(gr, "lease", fmt.Errorf("conflict")) cases := []struct { desc string updateReactor func(action clienttesting.Action) (bool, runtime.Object, error) getReactor func(action clienttesting.Action) (bool, runtime.Object, error) onRepeatedHeartbeatFailure func() expectErr bool client *fake.Clientset }{ { desc: "no errors", updateReactor: func(action clienttesting.Action) (bool, runtime.Object, error) { return true, &coordinationv1.Lease{}, nil }, getReactor: nil, onRepeatedHeartbeatFailure: nil, expectErr: false, client: fake.NewSimpleClientset(node), }, { desc: "connection errors", updateReactor: func(action clienttesting.Action) (bool, runtime.Object, error) { return true, nil, noConnectionUpdateErr }, getReactor: nil, onRepeatedHeartbeatFailure: nil, expectErr: true, client: fake.NewSimpleClientset(node), }, { desc: "optimistic lock errors", updateReactor: func() func(action clienttesting.Action) (bool, runtime.Object, error) { i := 0 return func(action clienttesting.Action) (bool, runtime.Object, error) { i++ switch i { case 1: return true, nil, noConnectionUpdateErr case 2: return true, nil, optimistcLockUpdateErr default: return true, &coordinationv1.Lease{}, nil } } }(), getReactor: func(action clienttesting.Action) (bool, runtime.Object, error) { return true, &coordinationv1.Lease{}, nil }, onRepeatedHeartbeatFailure: func() { t.Fatalf("onRepeatedHeartbeatFailure called") }, expectErr: false, client: fake.NewSimpleClientset(node), }, { desc: "node not found errors", updateReactor: func(action clienttesting.Action) (bool, runtime.Object, error) { t.Fatalf("lease was updated when node does not exist!") return true, nil, nil }, getReactor: nil, onRepeatedHeartbeatFailure: nil, expectErr: true, client: fake.NewSimpleClientset(), }, } for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { logger, ctx := ktesting.NewTestContext(t) cl := tc.client if tc.updateReactor != nil { cl.PrependReactor("update", "leases", tc.updateReactor) } if tc.getReactor != nil { cl.PrependReactor("get", "leases", tc.getReactor) } c := &controller{ clock: testingclock.NewFakeClock(time.Now()), client: cl, leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease), holderIdentity: node.Name, leaseNamespace: corev1.NamespaceNodeLease, leaseDurationSeconds: 10, onRepeatedHeartbeatFailure: tc.onRepeatedHeartbeatFailure, newLeasePostProcessFunc: setNodeOwnerFunc(logger, cl, node.Name), } if err := c.retryUpdateLease(ctx, nil); tc.expectErr != (err != nil) { t.Fatalf("got %v, expected %v", err != nil, tc.expectErr) } }) } } func TestUpdateUsingLatestLease(t *testing.T) { nodeName := "foo" node := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, UID: types.UID("foo-uid"), }, } notFoundErr := apierrors.NewNotFound(coordinationv1.Resource("lease"), nodeName) internalErr := apierrors.NewInternalError(errors.New("unreachable code")) makeLease := func(name, resourceVersion string) *coordinationv1.Lease { return &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Namespace: corev1.NamespaceNodeLease, Name: name, ResourceVersion: resourceVersion, }, } } cases := []struct { desc string existingObjs []runtime.Object latestLease *coordinationv1.Lease updateReactor func(action clienttesting.Action) (bool, runtime.Object, error) getReactor func(action clienttesting.Action) (bool, runtime.Object, error) createReactor func(action clienttesting.Action) (bool, runtime.Object, error) expectLatestLease bool expectLeaseResourceVersion string }{ { desc: "latestLease is nil and need to create", existingObjs: []runtime.Object{node}, latestLease: nil, updateReactor: nil, getReactor: func(action clienttesting.Action) (bool, runtime.Object, error) { return true, nil, notFoundErr }, createReactor: func(action clienttesting.Action) (bool, runtime.Object, error) { return true, makeLease(nodeName, "1"), nil }, expectLatestLease: true, expectLeaseResourceVersion: "1", }, { desc: "latestLease is nil and need to create, node doesn't exist", existingObjs: nil, latestLease: nil, updateReactor: nil, getReactor: func(action clienttesting.Action) (bool, runtime.Object, error) { return true, nil, notFoundErr }, createReactor: func(action clienttesting.Action) (bool, runtime.Object, error) { return true, makeLease(nodeName, "1"), nil }, expectLatestLease: false, expectLeaseResourceVersion: "1", }, { desc: "latestLease is nil and need to update", existingObjs: []runtime.Object{node}, latestLease: nil, updateReactor: func(action clienttesting.Action) (bool, runtime.Object, error) { return true, makeLease(nodeName, "2"), nil }, getReactor: func(action clienttesting.Action) (bool, runtime.Object, error) { return true, makeLease(nodeName, "1"), nil }, expectLatestLease: true, expectLeaseResourceVersion: "2", }, { desc: "latestLease exist and need to update", existingObjs: []runtime.Object{node}, latestLease: makeLease(nodeName, "1"), updateReactor: func(action clienttesting.Action) (bool, runtime.Object, error) { return true, makeLease(nodeName, "2"), nil }, expectLatestLease: true, expectLeaseResourceVersion: "2", }, { desc: "update with latest lease failed", existingObjs: []runtime.Object{node}, latestLease: makeLease(nodeName, "1"), updateReactor: func() func(action clienttesting.Action) (bool, runtime.Object, error) { i := 0 return func(action clienttesting.Action) (bool, runtime.Object, error) { i++ switch i { case 1: return true, nil, notFoundErr case 2: return true, makeLease(nodeName, "3"), nil default: t.Fatalf("unexpect call update lease") return true, nil, internalErr } } }(), getReactor: func(action clienttesting.Action) (bool, runtime.Object, error) { return true, makeLease(nodeName, "2"), nil }, expectLatestLease: true, expectLeaseResourceVersion: "3", }, } for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { logger, ctx := ktesting.NewTestContext(t) cl := fake.NewSimpleClientset(tc.existingObjs...) if tc.updateReactor != nil { cl.PrependReactor("update", "leases", tc.updateReactor) } if tc.getReactor != nil { cl.PrependReactor("get", "leases", tc.getReactor) } if tc.createReactor != nil { cl.PrependReactor("create", "leases", tc.createReactor) } c := &controller{ clock: testingclock.NewFakeClock(time.Now()), client: cl, leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease), holderIdentity: node.Name, leaseNamespace: corev1.NamespaceNodeLease, leaseDurationSeconds: 10, latestLease: tc.latestLease, newLeasePostProcessFunc: setNodeOwnerFunc(logger, cl, node.Name), } c.sync(ctx) if tc.expectLatestLease { if tc.expectLeaseResourceVersion != c.latestLease.ResourceVersion { t.Fatalf("latestLease RV got %v, expected %v", c.latestLease.ResourceVersion, tc.expectLeaseResourceVersion) } } else { if c.latestLease != nil { t.Fatalf("unexpected latestLease: %v", c.latestLease) } } }) } } // setNodeOwnerFunc helps construct a newLeasePostProcessFunc which sets // a node OwnerReference to the given lease object func setNodeOwnerFunc(logger klog.Logger, c clientset.Interface, nodeName string) func(lease *coordinationv1.Lease) error { return func(lease *coordinationv1.Lease) error { // Setting owner reference needs node's UID. Note that it is different from // kubelet.nodeRef.UID. When lease is initially created, it is possible that // the connection between master and node is not ready yet. So try to set // owner reference every time when renewing the lease, until successful. if len(lease.OwnerReferences) == 0 { if node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}); err == nil { lease.OwnerReferences = []metav1.OwnerReference{ { APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version, Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind, Name: nodeName, UID: node.UID, }, } } else { logger.Error(err, "failed to get node when trying to set owner ref to the node lease", "node", nodeName) return err } } return nil } } kubernetes-component-helpers-b5afa51/apps/000077500000000000000000000000001476422250100207555ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/apps/OWNERS000066400000000000000000000002241476422250100217130ustar00rootroot00000000000000# See the OWNERS docs at https://go.k8s.io/owners approvers: - sig-apps-api-approvers reviewers: - sig-apps-api-reviewers labels: - sig/apps kubernetes-component-helpers-b5afa51/apps/poddisruptionbudget/000077500000000000000000000000001476422250100250535ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/apps/poddisruptionbudget/helpers.go000066400000000000000000000044661476422250100270560ustar00rootroot00000000000000/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package poddisruptionbudget import ( policy "k8s.io/api/policy/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // UpdateDisruptionAllowedCondition updates the DisruptionAllowed condition // on a PodDisruptionBudget based on the value of the DisruptionsAllowed field. func UpdateDisruptionAllowedCondition(pdb *policy.PodDisruptionBudget) { if pdb.Status.Conditions == nil { pdb.Status.Conditions = make([]metav1.Condition, 0) } if pdb.Status.DisruptionsAllowed > 0 { apimeta.SetStatusCondition(&pdb.Status.Conditions, metav1.Condition{ Type: policy.DisruptionAllowedCondition, Reason: policy.SufficientPodsReason, Status: metav1.ConditionTrue, ObservedGeneration: pdb.Status.ObservedGeneration, }) } else { apimeta.SetStatusCondition(&pdb.Status.Conditions, metav1.Condition{ Type: policy.DisruptionAllowedCondition, Reason: policy.InsufficientPodsReason, Status: metav1.ConditionFalse, ObservedGeneration: pdb.Status.ObservedGeneration, }) } } // ConditionsAreUpToDate checks whether the status and reason for the // DisruptionAllowed condition are set to the correct values based on the // DisruptionsAllowed field. func ConditionsAreUpToDate(pdb *policy.PodDisruptionBudget) bool { cond := apimeta.FindStatusCondition(pdb.Status.Conditions, policy.DisruptionAllowedCondition) if cond == nil { return false } if pdb.Status.ObservedGeneration != pdb.Generation { return false } if pdb.Status.DisruptionsAllowed > 0 { return cond.Status == metav1.ConditionTrue && cond.Reason == policy.SufficientPodsReason } return cond.Status == metav1.ConditionFalse && cond.Reason == policy.InsufficientPodsReason } kubernetes-component-helpers-b5afa51/auth/000077500000000000000000000000001476422250100207535ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/auth/OWNERS000066400000000000000000000002241476422250100217110ustar00rootroot00000000000000# See the OWNERS docs at https://go.k8s.io/owners approvers: - sig-auth-api-approvers reviewers: - sig-auth-api-reviewers labels: - sig/auth kubernetes-component-helpers-b5afa51/auth/rbac/000077500000000000000000000000001476422250100216625ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/auth/rbac/reconciliation/000077500000000000000000000000001476422250100246645ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/auth/rbac/reconciliation/clusterrole_interfaces.go000066400000000000000000000056201476422250100317640ustar00rootroot00000000000000/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reconciliation import ( "context" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" ) // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/component-helpers/auth/rbac/reconciliation.RuleOwner // +k8s:deepcopy-gen:nonpointer-interfaces=true type ClusterRoleRuleOwner struct { ClusterRole *rbacv1.ClusterRole } func (o ClusterRoleRuleOwner) GetObject() runtime.Object { return o.ClusterRole } func (o ClusterRoleRuleOwner) GetNamespace() string { return o.ClusterRole.Namespace } func (o ClusterRoleRuleOwner) GetName() string { return o.ClusterRole.Name } func (o ClusterRoleRuleOwner) GetLabels() map[string]string { return o.ClusterRole.Labels } func (o ClusterRoleRuleOwner) SetLabels(in map[string]string) { o.ClusterRole.Labels = in } func (o ClusterRoleRuleOwner) GetAnnotations() map[string]string { return o.ClusterRole.Annotations } func (o ClusterRoleRuleOwner) SetAnnotations(in map[string]string) { o.ClusterRole.Annotations = in } func (o ClusterRoleRuleOwner) GetRules() []rbacv1.PolicyRule { return o.ClusterRole.Rules } func (o ClusterRoleRuleOwner) SetRules(in []rbacv1.PolicyRule) { o.ClusterRole.Rules = in } func (o ClusterRoleRuleOwner) GetAggregationRule() *rbacv1.AggregationRule { return o.ClusterRole.AggregationRule } func (o ClusterRoleRuleOwner) SetAggregationRule(in *rbacv1.AggregationRule) { o.ClusterRole.AggregationRule = in } type ClusterRoleModifier struct { Client rbacv1client.ClusterRoleInterface } func (c ClusterRoleModifier) Get(namespace, name string) (RuleOwner, error) { ret, err := c.Client.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, err } return ClusterRoleRuleOwner{ClusterRole: ret}, err } func (c ClusterRoleModifier) Create(in RuleOwner) (RuleOwner, error) { ret, err := c.Client.Create(context.TODO(), in.(ClusterRoleRuleOwner).ClusterRole, metav1.CreateOptions{}) if err != nil { return nil, err } return ClusterRoleRuleOwner{ClusterRole: ret}, err } func (c ClusterRoleModifier) Update(in RuleOwner) (RuleOwner, error) { ret, err := c.Client.Update(context.TODO(), in.(ClusterRoleRuleOwner).ClusterRole, metav1.UpdateOptions{}) if err != nil { return nil, err } return ClusterRoleRuleOwner{ClusterRole: ret}, err } kubernetes-component-helpers-b5afa51/auth/rbac/reconciliation/clusterrolebinding_interfaces.go000066400000000000000000000065451476422250100333260ustar00rootroot00000000000000/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reconciliation import ( "context" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" ) // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/component-helpers/auth/rbac/reconciliation.RoleBinding // +k8s:deepcopy-gen:nonpointer-interfaces=true type ClusterRoleBindingAdapter struct { ClusterRoleBinding *rbacv1.ClusterRoleBinding } func (o ClusterRoleBindingAdapter) GetObject() runtime.Object { return o.ClusterRoleBinding } func (o ClusterRoleBindingAdapter) GetNamespace() string { return o.ClusterRoleBinding.Namespace } func (o ClusterRoleBindingAdapter) GetName() string { return o.ClusterRoleBinding.Name } func (o ClusterRoleBindingAdapter) GetUID() types.UID { return o.ClusterRoleBinding.UID } func (o ClusterRoleBindingAdapter) GetLabels() map[string]string { return o.ClusterRoleBinding.Labels } func (o ClusterRoleBindingAdapter) SetLabels(in map[string]string) { o.ClusterRoleBinding.Labels = in } func (o ClusterRoleBindingAdapter) GetAnnotations() map[string]string { return o.ClusterRoleBinding.Annotations } func (o ClusterRoleBindingAdapter) SetAnnotations(in map[string]string) { o.ClusterRoleBinding.Annotations = in } func (o ClusterRoleBindingAdapter) GetRoleRef() rbacv1.RoleRef { return o.ClusterRoleBinding.RoleRef } func (o ClusterRoleBindingAdapter) GetSubjects() []rbacv1.Subject { return o.ClusterRoleBinding.Subjects } func (o ClusterRoleBindingAdapter) SetSubjects(in []rbacv1.Subject) { o.ClusterRoleBinding.Subjects = in } type ClusterRoleBindingClientAdapter struct { Client rbacv1client.ClusterRoleBindingInterface } func (c ClusterRoleBindingClientAdapter) Get(namespace, name string) (RoleBinding, error) { ret, err := c.Client.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, err } return ClusterRoleBindingAdapter{ClusterRoleBinding: ret}, err } func (c ClusterRoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) { ret, err := c.Client.Create(context.TODO(), in.(ClusterRoleBindingAdapter).ClusterRoleBinding, metav1.CreateOptions{}) if err != nil { return nil, err } return ClusterRoleBindingAdapter{ClusterRoleBinding: ret}, err } func (c ClusterRoleBindingClientAdapter) Update(in RoleBinding) (RoleBinding, error) { ret, err := c.Client.Update(context.TODO(), in.(ClusterRoleBindingAdapter).ClusterRoleBinding, metav1.UpdateOptions{}) if err != nil { return nil, err } return ClusterRoleBindingAdapter{ClusterRoleBinding: ret}, err } func (c ClusterRoleBindingClientAdapter) Delete(namespace, name string, uid types.UID) error { return c.Client.Delete(context.TODO(), name, metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}) } kubernetes-component-helpers-b5afa51/auth/rbac/reconciliation/namespace.go000066400000000000000000000032441476422250100271520ustar00rootroot00000000000000/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reconciliation import ( "context" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" ) // tryEnsureNamespace gets or creates the given namespace while ignoring forbidden errors. // It is a best effort attempt as the user may not be able to get or create namespaces. // This allows us to handle flows where the user can only mutate roles and role bindings. func tryEnsureNamespace(client corev1client.NamespaceInterface, namespace string) error { _, getErr := client.Get(context.TODO(), namespace, metav1.GetOptions{}) if getErr == nil { return nil } if fatalGetErr := utilerrors.FilterOut(getErr, apierrors.IsNotFound, apierrors.IsForbidden); fatalGetErr != nil { return fatalGetErr } ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} _, createErr := client.Create(context.TODO(), ns, metav1.CreateOptions{}) return utilerrors.FilterOut(createErr, apierrors.IsAlreadyExists, apierrors.IsForbidden) } kubernetes-component-helpers-b5afa51/auth/rbac/reconciliation/reconcile_role.go000066400000000000000000000235501476422250100302040ustar00rootroot00000000000000/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reconciliation import ( "fmt" "reflect" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/component-helpers/auth/rbac/validation" ) type ReconcileOperation string var ( ReconcileCreate ReconcileOperation = "create" ReconcileUpdate ReconcileOperation = "update" ReconcileRecreate ReconcileOperation = "recreate" ReconcileNone ReconcileOperation = "none" ) type RuleOwnerModifier interface { Get(namespace, name string) (RuleOwner, error) Create(RuleOwner) (RuleOwner, error) Update(RuleOwner) (RuleOwner, error) } type RuleOwner interface { GetObject() runtime.Object GetNamespace() string GetName() string GetLabels() map[string]string SetLabels(map[string]string) GetAnnotations() map[string]string SetAnnotations(map[string]string) GetRules() []rbacv1.PolicyRule SetRules([]rbacv1.PolicyRule) GetAggregationRule() *rbacv1.AggregationRule SetAggregationRule(*rbacv1.AggregationRule) DeepCopyRuleOwner() RuleOwner } type ReconcileRoleOptions struct { // Role is the expected role that will be reconciled Role RuleOwner // Confirm indicates writes should be performed. When false, results are returned as a dry-run. Confirm bool // RemoveExtraPermissions indicates reconciliation should remove extra permissions from an existing role RemoveExtraPermissions bool // Client is used to look up existing roles, and create/update the role when Confirm=true Client RuleOwnerModifier } type ReconcileClusterRoleResult struct { // Role is the reconciled role from the reconciliation operation. // If the reconcile was performed as a dry-run, or the existing role was protected, the reconciled role is not persisted. Role RuleOwner // MissingRules contains expected rules that were missing from the currently persisted role MissingRules []rbacv1.PolicyRule // ExtraRules contains extra permissions the currently persisted role had ExtraRules []rbacv1.PolicyRule // MissingAggregationRuleSelectors contains expected selectors that were missing from the currently persisted role MissingAggregationRuleSelectors []metav1.LabelSelector // ExtraAggregationRuleSelectors contains extra selectors the currently persisted role had ExtraAggregationRuleSelectors []metav1.LabelSelector // Operation is the API operation required to reconcile. // If no reconciliation was needed, it is set to ReconcileNone. // If options.Confirm == false, the reconcile was in dry-run mode, so the operation was not performed. // If result.Protected == true, the role opted out of reconciliation, so the operation was not performed. // Otherwise, the operation was performed. Operation ReconcileOperation // Protected indicates an existing role prevented reconciliation Protected bool } func (o *ReconcileRoleOptions) Run() (*ReconcileClusterRoleResult, error) { return o.run(0) } func (o *ReconcileRoleOptions) run(attempts int) (*ReconcileClusterRoleResult, error) { // This keeps us from retrying forever if a role keeps appearing and disappearing as we reconcile. // Conflict errors on update are handled at a higher level. if attempts > 2 { return nil, fmt.Errorf("exceeded maximum attempts") } var result *ReconcileClusterRoleResult existing, err := o.Client.Get(o.Role.GetNamespace(), o.Role.GetName()) switch { case errors.IsNotFound(err): aggregationRule := o.Role.GetAggregationRule() if aggregationRule == nil { aggregationRule = &rbacv1.AggregationRule{} } result = &ReconcileClusterRoleResult{ Role: o.Role, MissingRules: o.Role.GetRules(), MissingAggregationRuleSelectors: aggregationRule.ClusterRoleSelectors, Operation: ReconcileCreate, } case err != nil: return nil, err default: result, err = computeReconciledRole(existing, o.Role, o.RemoveExtraPermissions) if err != nil { return nil, err } } // If reconcile-protected, short-circuit if result.Protected { return result, nil } // If we're in dry-run mode, short-circuit if !o.Confirm { return result, nil } switch result.Operation { case ReconcileCreate: created, err := o.Client.Create(result.Role) // If created since we started this reconcile, re-run if errors.IsAlreadyExists(err) { return o.run(attempts + 1) } if err != nil { return nil, err } result.Role = created case ReconcileUpdate: updated, err := o.Client.Update(result.Role) // If deleted since we started this reconcile, re-run if errors.IsNotFound(err) { return o.run(attempts + 1) } if err != nil { return nil, err } result.Role = updated case ReconcileNone: // no-op default: return nil, fmt.Errorf("invalid operation: %v", result.Operation) } return result, nil } // computeReconciledRole returns the role that must be created and/or updated to make the // existing role's permissions match the expected role's permissions func computeReconciledRole(existing, expected RuleOwner, removeExtraPermissions bool) (*ReconcileClusterRoleResult, error) { result := &ReconcileClusterRoleResult{Operation: ReconcileNone} result.Protected = (existing.GetAnnotations()[rbacv1.AutoUpdateAnnotationKey] == "false") // Start with a copy of the existing object result.Role = existing.DeepCopyRuleOwner() // Merge expected annotations and labels result.Role.SetAnnotations(merge(expected.GetAnnotations(), result.Role.GetAnnotations())) if !reflect.DeepEqual(result.Role.GetAnnotations(), existing.GetAnnotations()) { result.Operation = ReconcileUpdate } result.Role.SetLabels(merge(expected.GetLabels(), result.Role.GetLabels())) if !reflect.DeepEqual(result.Role.GetLabels(), existing.GetLabels()) { result.Operation = ReconcileUpdate } // Compute extra and missing rules // Don't compute extra permissions if expected and existing roles are both aggregated if expected.GetAggregationRule() == nil || existing.GetAggregationRule() == nil { _, result.ExtraRules = validation.Covers(expected.GetRules(), existing.GetRules()) } _, result.MissingRules = validation.Covers(existing.GetRules(), expected.GetRules()) switch { case !removeExtraPermissions && len(result.MissingRules) > 0: // add missing rules in the union case result.Role.SetRules(append(result.Role.GetRules(), result.MissingRules...)) result.Operation = ReconcileUpdate case removeExtraPermissions && (len(result.MissingRules) > 0 || len(result.ExtraRules) > 0): // stomp to expected rules in the non-union case result.Role.SetRules(expected.GetRules()) result.Operation = ReconcileUpdate } // Compute extra and missing rules _, result.ExtraAggregationRuleSelectors = aggregationRuleCovers(expected.GetAggregationRule(), existing.GetAggregationRule()) _, result.MissingAggregationRuleSelectors = aggregationRuleCovers(existing.GetAggregationRule(), expected.GetAggregationRule()) switch { case expected.GetAggregationRule() == nil && existing.GetAggregationRule() != nil: // we didn't expect this to be an aggregated role at all, remove the existing aggregation result.Role.SetAggregationRule(nil) result.Operation = ReconcileUpdate case !removeExtraPermissions && len(result.MissingAggregationRuleSelectors) > 0: // add missing rules in the union case aggregationRule := result.Role.GetAggregationRule() if aggregationRule == nil { aggregationRule = &rbacv1.AggregationRule{} } aggregationRule.ClusterRoleSelectors = append(aggregationRule.ClusterRoleSelectors, result.MissingAggregationRuleSelectors...) result.Role.SetAggregationRule(aggregationRule) result.Operation = ReconcileUpdate case removeExtraPermissions && (len(result.MissingAggregationRuleSelectors) > 0 || len(result.ExtraAggregationRuleSelectors) > 0): result.Role.SetAggregationRule(expected.GetAggregationRule()) result.Operation = ReconcileUpdate } return result, nil } // merge combines the given maps with the later annotations having higher precedence func merge(maps ...map[string]string) map[string]string { var output map[string]string = nil for _, m := range maps { if m != nil && output == nil { output = map[string]string{} } for k, v := range m { output[k] = v } } return output } // aggregationRuleCovers determines whether or not the ownerSelectors cover the servantSelectors in terms of semantically // equal label selectors. // It returns whether or not the ownerSelectors cover and a list of the rules that the ownerSelectors do not cover. func aggregationRuleCovers(ownerRule, servantRule *rbacv1.AggregationRule) (bool, []metav1.LabelSelector) { switch { case ownerRule == nil && servantRule == nil: return true, []metav1.LabelSelector{} case ownerRule == nil && servantRule != nil: return false, servantRule.ClusterRoleSelectors case ownerRule != nil && servantRule == nil: return true, []metav1.LabelSelector{} } ownerSelectors := ownerRule.ClusterRoleSelectors servantSelectors := servantRule.ClusterRoleSelectors uncoveredSelectors := []metav1.LabelSelector{} for _, servantSelector := range servantSelectors { covered := false for _, ownerSelector := range ownerSelectors { if equality.Semantic.DeepEqual(ownerSelector, servantSelector) { covered = true break } } if !covered { uncoveredSelectors = append(uncoveredSelectors, servantSelector) } } return (len(uncoveredSelectors) == 0), uncoveredSelectors } kubernetes-component-helpers-b5afa51/auth/rbac/reconciliation/reconcile_role_test.go000066400000000000000000000366761476422250100312600ustar00rootroot00000000000000/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reconciliation import ( "testing" "github.com/google/go-cmp/cmp" rbacv1 "k8s.io/api/rbac/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func role(rules []rbacv1.PolicyRule, labels map[string]string, annotations map[string]string) *rbacv1.ClusterRole { return &rbacv1.ClusterRole{ Rules: rules, ObjectMeta: metav1.ObjectMeta{Labels: labels, Annotations: annotations}, } } func rules(resources ...string) []rbacv1.PolicyRule { r := []rbacv1.PolicyRule{} for _, resource := range resources { r = append(r, rbacv1.PolicyRule{APIGroups: []string{""}, Verbs: []string{"get"}, Resources: []string{resource}}) } return r } type ss map[string]string func TestComputeReconciledRoleRules(t *testing.T) { tests := map[string]struct { expectedRole *rbacv1.ClusterRole actualRole *rbacv1.ClusterRole removeExtraPermissions bool expectedReconciledRole *rbacv1.ClusterRole expectedReconciliationNeeded bool }{ "empty": { expectedRole: role(rules(), nil, nil), actualRole: role(rules(), nil, nil), removeExtraPermissions: true, expectedReconciledRole: nil, expectedReconciliationNeeded: false, }, "match without union": { expectedRole: role(rules("a"), nil, nil), actualRole: role(rules("a"), nil, nil), removeExtraPermissions: true, expectedReconciledRole: nil, expectedReconciliationNeeded: false, }, "match with union": { expectedRole: role(rules("a"), nil, nil), actualRole: role(rules("a"), nil, nil), removeExtraPermissions: false, expectedReconciledRole: nil, expectedReconciliationNeeded: false, }, "different rules without union": { expectedRole: role(rules("a"), nil, nil), actualRole: role(rules("b"), nil, nil), removeExtraPermissions: true, expectedReconciledRole: role(rules("a"), nil, nil), expectedReconciliationNeeded: true, }, "different rules with union": { expectedRole: role(rules("a"), nil, nil), actualRole: role(rules("b"), nil, nil), removeExtraPermissions: false, expectedReconciledRole: role(rules("b", "a"), nil, nil), expectedReconciliationNeeded: true, }, "match labels without union": { expectedRole: role(rules("a"), ss{"1": "a"}, nil), actualRole: role(rules("a"), ss{"1": "a"}, nil), removeExtraPermissions: true, expectedReconciledRole: nil, expectedReconciliationNeeded: false, }, "match labels with union": { expectedRole: role(rules("a"), ss{"1": "a"}, nil), actualRole: role(rules("a"), ss{"1": "a"}, nil), removeExtraPermissions: false, expectedReconciledRole: nil, expectedReconciliationNeeded: false, }, "different labels without union": { expectedRole: role(rules("a"), ss{"1": "a"}, nil), actualRole: role(rules("a"), ss{"2": "b"}, nil), removeExtraPermissions: true, expectedReconciledRole: role(rules("a"), ss{"1": "a", "2": "b"}, nil), expectedReconciliationNeeded: true, }, "different labels with union": { expectedRole: role(rules("a"), ss{"1": "a"}, nil), actualRole: role(rules("a"), ss{"2": "b"}, nil), removeExtraPermissions: false, expectedReconciledRole: role(rules("a"), ss{"1": "a", "2": "b"}, nil), expectedReconciliationNeeded: true, }, "different labels and rules without union": { expectedRole: role(rules("a"), ss{"1": "a"}, nil), actualRole: role(rules("b"), ss{"2": "b"}, nil), removeExtraPermissions: true, expectedReconciledRole: role(rules("a"), ss{"1": "a", "2": "b"}, nil), expectedReconciliationNeeded: true, }, "different labels and rules with union": { expectedRole: role(rules("a"), ss{"1": "a"}, nil), actualRole: role(rules("b"), ss{"2": "b"}, nil), removeExtraPermissions: false, expectedReconciledRole: role(rules("b", "a"), ss{"1": "a", "2": "b"}, nil), expectedReconciliationNeeded: true, }, "conflicting labels and rules without union": { expectedRole: role(rules("a"), ss{"1": "a"}, nil), actualRole: role(rules("b"), ss{"1": "b"}, nil), removeExtraPermissions: true, expectedReconciledRole: role(rules("a"), ss{"1": "b"}, nil), expectedReconciliationNeeded: true, }, "conflicting labels and rules with union": { expectedRole: role(rules("a"), ss{"1": "a"}, nil), actualRole: role(rules("b"), ss{"1": "b"}, nil), removeExtraPermissions: false, expectedReconciledRole: role(rules("b", "a"), ss{"1": "b"}, nil), expectedReconciliationNeeded: true, }, "match annotations without union": { expectedRole: role(rules("a"), nil, ss{"1": "a"}), actualRole: role(rules("a"), nil, ss{"1": "a"}), removeExtraPermissions: true, expectedReconciledRole: nil, expectedReconciliationNeeded: false, }, "match annotations with union": { expectedRole: role(rules("a"), nil, ss{"1": "a"}), actualRole: role(rules("a"), nil, ss{"1": "a"}), removeExtraPermissions: false, expectedReconciledRole: nil, expectedReconciliationNeeded: false, }, "different annotations without union": { expectedRole: role(rules("a"), nil, ss{"1": "a"}), actualRole: role(rules("a"), nil, ss{"2": "b"}), removeExtraPermissions: true, expectedReconciledRole: role(rules("a"), nil, ss{"1": "a", "2": "b"}), expectedReconciliationNeeded: true, }, "different annotations with union": { expectedRole: role(rules("a"), nil, ss{"1": "a"}), actualRole: role(rules("a"), nil, ss{"2": "b"}), removeExtraPermissions: false, expectedReconciledRole: role(rules("a"), nil, ss{"1": "a", "2": "b"}), expectedReconciliationNeeded: true, }, "different annotations and rules without union": { expectedRole: role(rules("a"), nil, ss{"1": "a"}), actualRole: role(rules("b"), nil, ss{"2": "b"}), removeExtraPermissions: true, expectedReconciledRole: role(rules("a"), nil, ss{"1": "a", "2": "b"}), expectedReconciliationNeeded: true, }, "different annotations and rules with union": { expectedRole: role(rules("a"), nil, ss{"1": "a"}), actualRole: role(rules("b"), nil, ss{"2": "b"}), removeExtraPermissions: false, expectedReconciledRole: role(rules("b", "a"), nil, ss{"1": "a", "2": "b"}), expectedReconciliationNeeded: true, }, "conflicting annotations and rules without union": { expectedRole: role(rules("a"), nil, ss{"1": "a"}), actualRole: role(rules("b"), nil, ss{"1": "b"}), removeExtraPermissions: true, expectedReconciledRole: role(rules("a"), nil, ss{"1": "b"}), expectedReconciliationNeeded: true, }, "conflicting annotations and rules with union": { expectedRole: role(rules("a"), nil, ss{"1": "a"}), actualRole: role(rules("b"), nil, ss{"1": "b"}), removeExtraPermissions: false, expectedReconciledRole: role(rules("b", "a"), nil, ss{"1": "b"}), expectedReconciliationNeeded: true, }, "conflicting labels/annotations and rules without union": { expectedRole: role(rules("a"), ss{"3": "d"}, ss{"1": "a"}), actualRole: role(rules("b"), ss{"4": "e"}, ss{"1": "b"}), removeExtraPermissions: true, expectedReconciledRole: role(rules("a"), ss{"3": "d", "4": "e"}, ss{"1": "b"}), expectedReconciliationNeeded: true, }, "conflicting labels/annotations and rules with union": { expectedRole: role(rules("a"), ss{"3": "d"}, ss{"1": "a"}), actualRole: role(rules("b"), ss{"4": "e"}, ss{"1": "b"}), removeExtraPermissions: false, expectedReconciledRole: role(rules("b", "a"), ss{"3": "d", "4": "e"}, ss{"1": "b"}), expectedReconciliationNeeded: true, }, "complex labels/annotations and rules without union": { expectedRole: role(rules("pods", "nodes", "secrets"), ss{"env": "prod", "color": "blue"}, ss{"description": "fancy", "system": "true"}), actualRole: role(rules("nodes", "images", "projects"), ss{"color": "red", "team": "pm"}, ss{"system": "false", "owner": "admin", "vip": "yes"}), removeExtraPermissions: true, expectedReconciledRole: role( rules("pods", "nodes", "secrets"), ss{"env": "prod", "color": "red", "team": "pm"}, ss{"description": "fancy", "system": "false", "owner": "admin", "vip": "yes"}), expectedReconciliationNeeded: true, }, "complex labels/annotations and rules with union": { expectedRole: role(rules("pods", "nodes", "secrets"), ss{"env": "prod", "color": "blue", "manager": "randy"}, ss{"description": "fancy", "system": "true", "up": "true"}), actualRole: role(rules("nodes", "images", "projects"), ss{"color": "red", "team": "pm"}, ss{"system": "false", "owner": "admin", "vip": "yes", "rate": "down"}), removeExtraPermissions: false, expectedReconciledRole: role( rules("nodes", "images", "projects", "pods", "secrets"), ss{"env": "prod", "manager": "randy", "color": "red", "team": "pm"}, ss{"description": "fancy", "system": "false", "owner": "admin", "vip": "yes", "rate": "down", "up": "true"}), expectedReconciliationNeeded: true, }, } for k, tc := range tests { actualRole := ClusterRoleRuleOwner{ClusterRole: tc.actualRole} expectedRole := ClusterRoleRuleOwner{ClusterRole: tc.expectedRole} result, err := computeReconciledRole(actualRole, expectedRole, tc.removeExtraPermissions) if err != nil { t.Errorf("%s: %v", k, err) continue } reconciliationNeeded := result.Operation != ReconcileNone if reconciliationNeeded != tc.expectedReconciliationNeeded { t.Errorf("%s: Expected\n\t%v\ngot\n\t%v", k, tc.expectedReconciliationNeeded, reconciliationNeeded) continue } if reconciliationNeeded && !apiequality.Semantic.DeepEqual(result.Role.(ClusterRoleRuleOwner).ClusterRole, tc.expectedReconciledRole) { t.Errorf("%s: Expected\n\t%#v\ngot\n\t%#v", k, tc.expectedReconciledRole, result.Role) } } } func aggregatedRole(aggregationRule *rbacv1.AggregationRule) *rbacv1.ClusterRole { return &rbacv1.ClusterRole{ AggregationRule: aggregationRule, } } func aggregationrule(selectors []map[string]string) *rbacv1.AggregationRule { ret := &rbacv1.AggregationRule{} for _, selector := range selectors { ret.ClusterRoleSelectors = append(ret.ClusterRoleSelectors, metav1.LabelSelector{MatchLabels: selector}) } return ret } func TestComputeReconciledRoleAggregationRules(t *testing.T) { tests := map[string]struct { expectedRole *rbacv1.ClusterRole actualRole *rbacv1.ClusterRole removeExtraPermissions bool expectedReconciledRole *rbacv1.ClusterRole expectedReconciliationNeeded bool }{ "empty": { expectedRole: aggregatedRole(&rbacv1.AggregationRule{}), actualRole: aggregatedRole(nil), removeExtraPermissions: true, expectedReconciledRole: nil, expectedReconciliationNeeded: false, }, "empty-2": { expectedRole: aggregatedRole(&rbacv1.AggregationRule{}), actualRole: aggregatedRole(&rbacv1.AggregationRule{}), removeExtraPermissions: true, expectedReconciledRole: nil, expectedReconciliationNeeded: false, }, "match without union": { expectedRole: aggregatedRole(aggregationrule([]map[string]string{{"foo": "bar"}})), actualRole: aggregatedRole(aggregationrule([]map[string]string{{"foo": "bar"}})), removeExtraPermissions: true, expectedReconciledRole: nil, expectedReconciliationNeeded: false, }, "match with union": { expectedRole: aggregatedRole(aggregationrule([]map[string]string{{"foo": "bar"}})), actualRole: aggregatedRole(aggregationrule([]map[string]string{{"foo": "bar"}})), removeExtraPermissions: false, expectedReconciledRole: nil, expectedReconciliationNeeded: false, }, "different rules without union": { expectedRole: aggregatedRole(aggregationrule([]map[string]string{{"foo": "bar"}})), actualRole: aggregatedRole(aggregationrule([]map[string]string{{"alpha": "bravo"}})), removeExtraPermissions: true, expectedReconciledRole: aggregatedRole(aggregationrule([]map[string]string{{"foo": "bar"}})), expectedReconciliationNeeded: true, }, "different rules with union": { expectedRole: aggregatedRole(aggregationrule([]map[string]string{{"foo": "bar"}})), actualRole: aggregatedRole(aggregationrule([]map[string]string{{"alpha": "bravo"}})), removeExtraPermissions: false, expectedReconciledRole: aggregatedRole(aggregationrule([]map[string]string{{"alpha": "bravo"}, {"foo": "bar"}})), expectedReconciliationNeeded: true, }, "unexpected aggregation": { // desired role is not aggregated expectedRole: role(rules("pods", "nodes", "secrets"), nil, nil), // existing role is aggregated actualRole: aggregatedRole(aggregationrule([]map[string]string{{"alpha": "bravo"}})), removeExtraPermissions: false, // reconciled role should have desired permissions and not be aggregated expectedReconciledRole: role(rules("pods", "nodes", "secrets"), nil, nil), expectedReconciliationNeeded: true, }, "unexpected aggregation with differing permissions": { // desired role is not aggregated expectedRole: role(rules("pods", "nodes", "secrets"), nil, nil), // existing role is aggregated and has other permissions actualRole: func() *rbacv1.ClusterRole { r := aggregatedRole(aggregationrule([]map[string]string{{"alpha": "bravo"}})) r.Rules = rules("deployments") return r }(), removeExtraPermissions: false, // reconciled role should have aggregation removed, preserve differing permissions, and include desired permissions expectedReconciledRole: role(rules("deployments", "pods", "nodes", "secrets"), nil, nil), expectedReconciliationNeeded: true, }, } for k, tc := range tests { actualRole := ClusterRoleRuleOwner{ClusterRole: tc.actualRole} expectedRole := ClusterRoleRuleOwner{ClusterRole: tc.expectedRole} result, err := computeReconciledRole(actualRole, expectedRole, tc.removeExtraPermissions) if err != nil { t.Errorf("%s: %v", k, err) continue } reconciliationNeeded := result.Operation != ReconcileNone if reconciliationNeeded != tc.expectedReconciliationNeeded { t.Errorf("%s: Expected\n\t%v\ngot\n\t%v", k, tc.expectedReconciliationNeeded, reconciliationNeeded) continue } if reconciliationNeeded && !apiequality.Semantic.DeepEqual(result.Role.(ClusterRoleRuleOwner).ClusterRole, tc.expectedReconciledRole) { t.Errorf("%s: %v", k, cmp.Diff(tc.expectedReconciledRole, result.Role.(ClusterRoleRuleOwner).ClusterRole)) } } } kubernetes-component-helpers-b5afa51/auth/rbac/reconciliation/reconcile_rolebindings.go000066400000000000000000000202571476422250100317230ustar00rootroot00000000000000/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reconciliation import ( "fmt" "reflect" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ) type RoleBindingModifier interface { Get(namespace, name string) (RoleBinding, error) Delete(namespace, name string, uid types.UID) error Create(RoleBinding) (RoleBinding, error) Update(RoleBinding) (RoleBinding, error) } type RoleBinding interface { GetObject() runtime.Object GetNamespace() string GetName() string GetUID() types.UID GetLabels() map[string]string SetLabels(map[string]string) GetAnnotations() map[string]string SetAnnotations(map[string]string) GetRoleRef() rbacv1.RoleRef GetSubjects() []rbacv1.Subject SetSubjects([]rbacv1.Subject) DeepCopyRoleBinding() RoleBinding } // ReconcileRoleBindingOptions holds options for running a role binding reconciliation type ReconcileRoleBindingOptions struct { // RoleBinding is the expected rolebinding that will be reconciled RoleBinding RoleBinding // Confirm indicates writes should be performed. When false, results are returned as a dry-run. Confirm bool // RemoveExtraSubjects indicates reconciliation should remove extra subjects from an existing role binding RemoveExtraSubjects bool // Client is used to look up existing rolebindings, and create/update the rolebinding when Confirm=true Client RoleBindingModifier } // ReconcileClusterRoleBindingResult holds the result of a reconciliation operation. type ReconcileClusterRoleBindingResult struct { // RoleBinding is the reconciled rolebinding from the reconciliation operation. // If the reconcile was performed as a dry-run, or the existing rolebinding was protected, the reconciled rolebinding is not persisted. RoleBinding RoleBinding // MissingSubjects contains expected subjects that were missing from the currently persisted rolebinding MissingSubjects []rbacv1.Subject // ExtraSubjects contains extra subjects the currently persisted rolebinding had ExtraSubjects []rbacv1.Subject // Operation is the API operation required to reconcile. // If no reconciliation was needed, it is set to ReconcileNone. // If options.Confirm == false, the reconcile was in dry-run mode, so the operation was not performed. // If result.Protected == true, the rolebinding opted out of reconciliation, so the operation was not performed. // Otherwise, the operation was performed. Operation ReconcileOperation // Protected indicates an existing role prevented reconciliation Protected bool } func (o *ReconcileRoleBindingOptions) Run() (*ReconcileClusterRoleBindingResult, error) { return o.run(0) } func (o *ReconcileRoleBindingOptions) run(attempts int) (*ReconcileClusterRoleBindingResult, error) { // This keeps us from retrying forever if a rolebinding keeps appearing and disappearing as we reconcile. // Conflict errors on update are handled at a higher level. if attempts > 3 { return nil, fmt.Errorf("exceeded maximum attempts") } var result *ReconcileClusterRoleBindingResult existingBinding, err := o.Client.Get(o.RoleBinding.GetNamespace(), o.RoleBinding.GetName()) switch { case errors.IsNotFound(err): result = &ReconcileClusterRoleBindingResult{ RoleBinding: o.RoleBinding, MissingSubjects: o.RoleBinding.GetSubjects(), Operation: ReconcileCreate, } case err != nil: return nil, err default: result, err = computeReconciledRoleBinding(existingBinding, o.RoleBinding, o.RemoveExtraSubjects) if err != nil { return nil, err } } // If reconcile-protected, short-circuit if result.Protected { return result, nil } // If we're in dry-run mode, short-circuit if !o.Confirm { return result, nil } switch result.Operation { case ReconcileRecreate: // Try deleting err := o.Client.Delete(existingBinding.GetNamespace(), existingBinding.GetName(), existingBinding.GetUID()) switch { case err == nil, errors.IsNotFound(err): // object no longer exists, as desired case errors.IsConflict(err): // delete failed because our UID precondition conflicted // this could mean another object exists with a different UID, re-run return o.run(attempts + 1) default: // return other errors return nil, err } // continue to create fallthrough case ReconcileCreate: created, err := o.Client.Create(result.RoleBinding) // If created since we started this reconcile, re-run if errors.IsAlreadyExists(err) { return o.run(attempts + 1) } if err != nil { return nil, err } result.RoleBinding = created case ReconcileUpdate: updated, err := o.Client.Update(result.RoleBinding) // If deleted since we started this reconcile, re-run if errors.IsNotFound(err) { return o.run(attempts + 1) } if err != nil { return nil, err } result.RoleBinding = updated case ReconcileNone: // no-op default: return nil, fmt.Errorf("invalid operation: %v", result.Operation) } return result, nil } // computeReconciledRoleBinding returns the rolebinding that must be created and/or updated to make the // existing rolebinding's subjects, roleref, labels, and annotations match the expected rolebinding func computeReconciledRoleBinding(existing, expected RoleBinding, removeExtraSubjects bool) (*ReconcileClusterRoleBindingResult, error) { result := &ReconcileClusterRoleBindingResult{Operation: ReconcileNone} result.Protected = (existing.GetAnnotations()[rbacv1.AutoUpdateAnnotationKey] == "false") // Reset the binding completely if the roleRef is different if expected.GetRoleRef() != existing.GetRoleRef() { result.RoleBinding = expected result.Operation = ReconcileRecreate return result, nil } // Start with a copy of the existing object result.RoleBinding = existing.DeepCopyRoleBinding() // Merge expected annotations and labels result.RoleBinding.SetAnnotations(merge(expected.GetAnnotations(), result.RoleBinding.GetAnnotations())) if !reflect.DeepEqual(result.RoleBinding.GetAnnotations(), existing.GetAnnotations()) { result.Operation = ReconcileUpdate } result.RoleBinding.SetLabels(merge(expected.GetLabels(), result.RoleBinding.GetLabels())) if !reflect.DeepEqual(result.RoleBinding.GetLabels(), existing.GetLabels()) { result.Operation = ReconcileUpdate } // Compute extra and missing subjects result.MissingSubjects, result.ExtraSubjects = diffSubjectLists(expected.GetSubjects(), existing.GetSubjects()) switch { case !removeExtraSubjects && len(result.MissingSubjects) > 0: // add missing subjects in the union case result.RoleBinding.SetSubjects(append(result.RoleBinding.GetSubjects(), result.MissingSubjects...)) result.Operation = ReconcileUpdate case removeExtraSubjects && (len(result.MissingSubjects) > 0 || len(result.ExtraSubjects) > 0): // stomp to expected subjects in the non-union case result.RoleBinding.SetSubjects(expected.GetSubjects()) result.Operation = ReconcileUpdate } return result, nil } func contains(list []rbacv1.Subject, item rbacv1.Subject) bool { for _, listItem := range list { if listItem == item { return true } } return false } // diffSubjectLists returns lists containing the items unique to each provided list: // // list1Only = list1 - list2 // list2Only = list2 - list1 // // if both returned lists are empty, the provided lists are equal func diffSubjectLists(list1 []rbacv1.Subject, list2 []rbacv1.Subject) (list1Only []rbacv1.Subject, list2Only []rbacv1.Subject) { for _, list1Item := range list1 { if !contains(list2, list1Item) { if !contains(list1Only, list1Item) { list1Only = append(list1Only, list1Item) } } } for _, list2Item := range list2 { if !contains(list1, list2Item) { if !contains(list2Only, list2Item) { list2Only = append(list2Only, list2Item) } } } return } kubernetes-component-helpers-b5afa51/auth/rbac/reconciliation/reconcile_rolebindings_test.go000066400000000000000000000130211476422250100327510ustar00rootroot00000000000000/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reconciliation import ( "testing" rbacv1 "k8s.io/api/rbac/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" ) func binding(roleRef rbacv1.RoleRef, subjects []rbacv1.Subject) *rbacv1.ClusterRoleBinding { return &rbacv1.ClusterRoleBinding{RoleRef: roleRef, Subjects: subjects} } func ref(name string) rbacv1.RoleRef { return rbacv1.RoleRef{Name: name} } func subject(name string) rbacv1.Subject { return rbacv1.Subject{Name: name} } func subjects(names ...string) []rbacv1.Subject { r := []rbacv1.Subject{} for _, name := range names { r = append(r, subject(name)) } return r } func TestDiffObjectReferenceLists(t *testing.T) { tests := map[string]struct { A []rbacv1.Subject B []rbacv1.Subject ExpectedOnlyA []rbacv1.Subject ExpectedOnlyB []rbacv1.Subject }{ "empty": {}, "matching, order-independent": { A: subjects("foo", "bar"), B: subjects("bar", "foo"), }, "partial match": { A: subjects("foo", "bar"), B: subjects("foo", "baz"), ExpectedOnlyA: subjects("bar"), ExpectedOnlyB: subjects("baz"), }, "missing": { A: subjects("foo"), B: subjects("bar"), ExpectedOnlyA: subjects("foo"), ExpectedOnlyB: subjects("bar"), }, "remove duplicates": { A: subjects("foo", "foo"), B: subjects("bar", "bar"), ExpectedOnlyA: subjects("foo"), ExpectedOnlyB: subjects("bar"), }, } for k, tc := range tests { onlyA, onlyB := diffSubjectLists(tc.A, tc.B) if !apiequality.Semantic.DeepEqual(onlyA, tc.ExpectedOnlyA) { t.Errorf("%s: Expected %#v, got %#v", k, tc.ExpectedOnlyA, onlyA) } if !apiequality.Semantic.DeepEqual(onlyB, tc.ExpectedOnlyB) { t.Errorf("%s: Expected %#v, got %#v", k, tc.ExpectedOnlyB, onlyB) } } } func TestComputeUpdate(t *testing.T) { tests := map[string]struct { ExpectedBinding *rbacv1.ClusterRoleBinding ActualBinding *rbacv1.ClusterRoleBinding RemoveExtraSubjects bool ExpectedUpdatedBinding *rbacv1.ClusterRoleBinding ExpectedUpdateNeeded bool }{ "match without union": { ExpectedBinding: binding(ref("role"), subjects("a")), ActualBinding: binding(ref("role"), subjects("a")), RemoveExtraSubjects: true, ExpectedUpdatedBinding: nil, ExpectedUpdateNeeded: false, }, "match with union": { ExpectedBinding: binding(ref("role"), subjects("a")), ActualBinding: binding(ref("role"), subjects("a")), RemoveExtraSubjects: false, ExpectedUpdatedBinding: nil, ExpectedUpdateNeeded: false, }, "different roleref with identical subjects": { ExpectedBinding: binding(ref("role"), subjects("a")), ActualBinding: binding(ref("differentRole"), subjects("a")), RemoveExtraSubjects: false, ExpectedUpdatedBinding: binding(ref("role"), subjects("a")), ExpectedUpdateNeeded: true, }, "extra subjects without union": { ExpectedBinding: binding(ref("role"), subjects("a")), ActualBinding: binding(ref("role"), subjects("a", "b")), RemoveExtraSubjects: true, ExpectedUpdatedBinding: binding(ref("role"), subjects("a")), ExpectedUpdateNeeded: true, }, "extra subjects with union": { ExpectedBinding: binding(ref("role"), subjects("a")), ActualBinding: binding(ref("role"), subjects("a", "b")), RemoveExtraSubjects: false, ExpectedUpdatedBinding: nil, ExpectedUpdateNeeded: false, }, "missing subjects without union": { ExpectedBinding: binding(ref("role"), subjects("a", "c")), ActualBinding: binding(ref("role"), subjects("a", "b")), RemoveExtraSubjects: true, ExpectedUpdatedBinding: binding(ref("role"), subjects("a", "c")), ExpectedUpdateNeeded: true, }, "missing subjects with union": { ExpectedBinding: binding(ref("role"), subjects("a", "c")), ActualBinding: binding(ref("role"), subjects("a", "b")), RemoveExtraSubjects: false, ExpectedUpdatedBinding: binding(ref("role"), subjects("a", "b", "c")), ExpectedUpdateNeeded: true, }, } for k, tc := range tests { actualRoleBinding := ClusterRoleBindingAdapter{ClusterRoleBinding: tc.ActualBinding} expectedRoleBinding := ClusterRoleBindingAdapter{ClusterRoleBinding: tc.ExpectedBinding} result, err := computeReconciledRoleBinding(actualRoleBinding, expectedRoleBinding, tc.RemoveExtraSubjects) if err != nil { t.Errorf("%s: %v", k, err) continue } updateNeeded := result.Operation != ReconcileNone updatedBinding := result.RoleBinding.(ClusterRoleBindingAdapter).ClusterRoleBinding if updateNeeded != tc.ExpectedUpdateNeeded { t.Errorf("%s: Expected\n\t%v\ngot\n\t%v (%v)", k, tc.ExpectedUpdateNeeded, updateNeeded, result.Operation) continue } if updateNeeded && !apiequality.Semantic.DeepEqual(updatedBinding, tc.ExpectedUpdatedBinding) { t.Errorf("%s: Expected\n\t%v %v\ngot\n\t%v %v", k, tc.ExpectedUpdatedBinding.RoleRef, tc.ExpectedUpdatedBinding.Subjects, updatedBinding.RoleRef, updatedBinding.Subjects) } } } kubernetes-component-helpers-b5afa51/auth/rbac/reconciliation/role_interfaces.go000066400000000000000000000055451476422250100303700ustar00rootroot00000000000000/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reconciliation import ( "context" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" ) // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/component-helpers/auth/rbac/reconciliation.RuleOwner // +k8s:deepcopy-gen:nonpointer-interfaces=true type RoleRuleOwner struct { Role *rbacv1.Role } func (o RoleRuleOwner) GetObject() runtime.Object { return o.Role } func (o RoleRuleOwner) GetNamespace() string { return o.Role.Namespace } func (o RoleRuleOwner) GetName() string { return o.Role.Name } func (o RoleRuleOwner) GetLabels() map[string]string { return o.Role.Labels } func (o RoleRuleOwner) SetLabels(in map[string]string) { o.Role.Labels = in } func (o RoleRuleOwner) GetAnnotations() map[string]string { return o.Role.Annotations } func (o RoleRuleOwner) SetAnnotations(in map[string]string) { o.Role.Annotations = in } func (o RoleRuleOwner) GetRules() []rbacv1.PolicyRule { return o.Role.Rules } func (o RoleRuleOwner) SetRules(in []rbacv1.PolicyRule) { o.Role.Rules = in } func (o RoleRuleOwner) GetAggregationRule() *rbacv1.AggregationRule { return nil } func (o RoleRuleOwner) SetAggregationRule(in *rbacv1.AggregationRule) { } type RoleModifier struct { Client rbacv1client.RolesGetter NamespaceClient corev1client.NamespaceInterface } func (c RoleModifier) Get(namespace, name string) (RuleOwner, error) { ret, err := c.Client.Roles(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, err } return RoleRuleOwner{Role: ret}, err } func (c RoleModifier) Create(in RuleOwner) (RuleOwner, error) { if err := tryEnsureNamespace(c.NamespaceClient, in.GetNamespace()); err != nil { return nil, err } ret, err := c.Client.Roles(in.GetNamespace()).Create(context.TODO(), in.(RoleRuleOwner).Role, metav1.CreateOptions{}) if err != nil { return nil, err } return RoleRuleOwner{Role: ret}, err } func (c RoleModifier) Update(in RuleOwner) (RuleOwner, error) { ret, err := c.Client.Roles(in.GetNamespace()).Update(context.TODO(), in.(RoleRuleOwner).Role, metav1.UpdateOptions{}) if err != nil { return nil, err } return RoleRuleOwner{Role: ret}, err } kubernetes-component-helpers-b5afa51/auth/rbac/reconciliation/rolebinding_interfaces.go000066400000000000000000000066201476422250100317160ustar00rootroot00000000000000/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reconciliation import ( "context" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" ) // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/component-helpers/auth/rbac/reconciliation.RoleBinding // +k8s:deepcopy-gen:nonpointer-interfaces=true type RoleBindingAdapter struct { RoleBinding *rbacv1.RoleBinding } func (o RoleBindingAdapter) GetObject() runtime.Object { return o.RoleBinding } func (o RoleBindingAdapter) GetNamespace() string { return o.RoleBinding.Namespace } func (o RoleBindingAdapter) GetName() string { return o.RoleBinding.Name } func (o RoleBindingAdapter) GetUID() types.UID { return o.RoleBinding.UID } func (o RoleBindingAdapter) GetLabels() map[string]string { return o.RoleBinding.Labels } func (o RoleBindingAdapter) SetLabels(in map[string]string) { o.RoleBinding.Labels = in } func (o RoleBindingAdapter) GetAnnotations() map[string]string { return o.RoleBinding.Annotations } func (o RoleBindingAdapter) SetAnnotations(in map[string]string) { o.RoleBinding.Annotations = in } func (o RoleBindingAdapter) GetRoleRef() rbacv1.RoleRef { return o.RoleBinding.RoleRef } func (o RoleBindingAdapter) GetSubjects() []rbacv1.Subject { return o.RoleBinding.Subjects } func (o RoleBindingAdapter) SetSubjects(in []rbacv1.Subject) { o.RoleBinding.Subjects = in } type RoleBindingClientAdapter struct { Client rbacv1client.RoleBindingsGetter NamespaceClient corev1client.NamespaceInterface } func (c RoleBindingClientAdapter) Get(namespace, name string) (RoleBinding, error) { ret, err := c.Client.RoleBindings(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, err } return RoleBindingAdapter{RoleBinding: ret}, err } func (c RoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) { if err := tryEnsureNamespace(c.NamespaceClient, in.GetNamespace()); err != nil { return nil, err } ret, err := c.Client.RoleBindings(in.GetNamespace()).Create(context.TODO(), in.(RoleBindingAdapter).RoleBinding, metav1.CreateOptions{}) if err != nil { return nil, err } return RoleBindingAdapter{RoleBinding: ret}, err } func (c RoleBindingClientAdapter) Update(in RoleBinding) (RoleBinding, error) { ret, err := c.Client.RoleBindings(in.GetNamespace()).Update(context.TODO(), in.(RoleBindingAdapter).RoleBinding, metav1.UpdateOptions{}) if err != nil { return nil, err } return RoleBindingAdapter{RoleBinding: ret}, err } func (c RoleBindingClientAdapter) Delete(namespace, name string, uid types.UID) error { return c.Client.RoleBindings(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}) } kubernetes-component-helpers-b5afa51/auth/rbac/reconciliation/zz_generated.deepcopy.go000066400000000000000000000076651476422250100315210ustar00rootroot00000000000000//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by deepcopy-gen. DO NOT EDIT. package reconciliation import ( v1 "k8s.io/api/rbac/v1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterRoleBindingAdapter) DeepCopyInto(out *ClusterRoleBindingAdapter) { *out = *in if in.ClusterRoleBinding != nil { in, out := &in.ClusterRoleBinding, &out.ClusterRoleBinding *out = new(v1.ClusterRoleBinding) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingAdapter. func (in *ClusterRoleBindingAdapter) DeepCopy() *ClusterRoleBindingAdapter { if in == nil { return nil } out := new(ClusterRoleBindingAdapter) in.DeepCopyInto(out) return out } // DeepCopyRoleBinding is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. func (in ClusterRoleBindingAdapter) DeepCopyRoleBinding() RoleBinding { return *in.DeepCopy() } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterRoleRuleOwner) DeepCopyInto(out *ClusterRoleRuleOwner) { *out = *in if in.ClusterRole != nil { in, out := &in.ClusterRole, &out.ClusterRole *out = new(v1.ClusterRole) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleRuleOwner. func (in *ClusterRoleRuleOwner) DeepCopy() *ClusterRoleRuleOwner { if in == nil { return nil } out := new(ClusterRoleRuleOwner) in.DeepCopyInto(out) return out } // DeepCopyRuleOwner is an autogenerated deepcopy function, copying the receiver, creating a new RuleOwner. func (in ClusterRoleRuleOwner) DeepCopyRuleOwner() RuleOwner { return *in.DeepCopy() } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RoleBindingAdapter) DeepCopyInto(out *RoleBindingAdapter) { *out = *in if in.RoleBinding != nil { in, out := &in.RoleBinding, &out.RoleBinding *out = new(v1.RoleBinding) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingAdapter. func (in *RoleBindingAdapter) DeepCopy() *RoleBindingAdapter { if in == nil { return nil } out := new(RoleBindingAdapter) in.DeepCopyInto(out) return out } // DeepCopyRoleBinding is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. func (in RoleBindingAdapter) DeepCopyRoleBinding() RoleBinding { return *in.DeepCopy() } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RoleRuleOwner) DeepCopyInto(out *RoleRuleOwner) { *out = *in if in.Role != nil { in, out := &in.Role, &out.Role *out = new(v1.Role) (*in).DeepCopyInto(*out) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRuleOwner. func (in *RoleRuleOwner) DeepCopy() *RoleRuleOwner { if in == nil { return nil } out := new(RoleRuleOwner) in.DeepCopyInto(out) return out } // DeepCopyRuleOwner is an autogenerated deepcopy function, copying the receiver, creating a new RuleOwner. func (in RoleRuleOwner) DeepCopyRuleOwner() RuleOwner { return *in.DeepCopy() } kubernetes-component-helpers-b5afa51/auth/rbac/validation/000077500000000000000000000000001476422250100240145ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/auth/rbac/validation/policy_comparator.go000066400000000000000000000127321476422250100300760ustar00rootroot00000000000000/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package validation import ( "strings" rbacv1 "k8s.io/api/rbac/v1" ) // Covers determines whether or not the ownerRules cover the servantRules in terms of allowed actions. // It returns whether or not the ownerRules cover and a list of the rules that the ownerRules do not cover. func Covers(ownerRules, servantRules []rbacv1.PolicyRule) (bool, []rbacv1.PolicyRule) { // 1. Break every servantRule into individual rule tuples: group, verb, resource, resourceName // 2. Compare the mini-rules against each owner rule. Because the breakdown is down to the most atomic level, we're guaranteed that each mini-servant rule will be either fully covered or not covered by a single owner rule // 3. Any left over mini-rules means that we are not covered and we have a nice list of them. // TODO: it might be nice to collapse the list down into something more human readable subrules := []rbacv1.PolicyRule{} for _, servantRule := range servantRules { subrules = append(subrules, BreakdownRule(servantRule)...) } uncoveredRules := []rbacv1.PolicyRule{} for _, subrule := range subrules { covered := false for _, ownerRule := range ownerRules { if ruleCovers(ownerRule, subrule) { covered = true break } } if !covered { uncoveredRules = append(uncoveredRules, subrule) } } return (len(uncoveredRules) == 0), uncoveredRules } // BreadownRule takes a rule and builds an equivalent list of rules that each have at most one verb, one // resource, and one resource name func BreakdownRule(rule rbacv1.PolicyRule) []rbacv1.PolicyRule { subrules := []rbacv1.PolicyRule{} for _, group := range rule.APIGroups { for _, resource := range rule.Resources { for _, verb := range rule.Verbs { if len(rule.ResourceNames) > 0 { for _, resourceName := range rule.ResourceNames { subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}, ResourceNames: []string{resourceName}}) } } else { subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}}) } } } } // Non-resource URLs are unique because they only combine with verbs. for _, nonResourceURL := range rule.NonResourceURLs { for _, verb := range rule.Verbs { subrules = append(subrules, rbacv1.PolicyRule{NonResourceURLs: []string{nonResourceURL}, Verbs: []string{verb}}) } } return subrules } func has(set []string, ele string) bool { for _, s := range set { if s == ele { return true } } return false } func hasAll(set, contains []string) bool { owning := make(map[string]struct{}, len(set)) for _, ele := range set { owning[ele] = struct{}{} } for _, ele := range contains { if _, ok := owning[ele]; !ok { return false } } return true } func resourceCoversAll(setResources, coversResources []string) bool { // if we have a star or an exact match on all resources, then we match if has(setResources, rbacv1.ResourceAll) || hasAll(setResources, coversResources) { return true } for _, path := range coversResources { // if we have an exact match, then we match. if has(setResources, path) { continue } // if we're not a subresource, then we definitely don't match. fail. if !strings.Contains(path, "/") { return false } tokens := strings.SplitN(path, "/", 2) resourceToCheck := "*/" + tokens[1] if !has(setResources, resourceToCheck) { return false } } return true } func nonResourceURLsCoversAll(set, covers []string) bool { for _, path := range covers { covered := false for _, owner := range set { if nonResourceURLCovers(owner, path) { covered = true break } } if !covered { return false } } return true } func nonResourceURLCovers(ownerPath, subPath string) bool { if ownerPath == subPath { return true } return strings.HasSuffix(ownerPath, "*") && strings.HasPrefix(subPath, strings.TrimRight(ownerPath, "*")) } // ruleCovers determines whether the ownerRule (which may have multiple verbs, resources, and resourceNames) covers // the subrule (which may only contain at most one verb, resource, and resourceName) func ruleCovers(ownerRule, subRule rbacv1.PolicyRule) bool { verbMatches := has(ownerRule.Verbs, rbacv1.VerbAll) || hasAll(ownerRule.Verbs, subRule.Verbs) groupMatches := has(ownerRule.APIGroups, rbacv1.APIGroupAll) || hasAll(ownerRule.APIGroups, subRule.APIGroups) resourceMatches := resourceCoversAll(ownerRule.Resources, subRule.Resources) nonResourceURLMatches := nonResourceURLsCoversAll(ownerRule.NonResourceURLs, subRule.NonResourceURLs) resourceNameMatches := false if len(subRule.ResourceNames) == 0 { resourceNameMatches = (len(ownerRule.ResourceNames) == 0) } else { resourceNameMatches = (len(ownerRule.ResourceNames) == 0) || hasAll(ownerRule.ResourceNames, subRule.ResourceNames) } return verbMatches && groupMatches && resourceMatches && resourceNameMatches && nonResourceURLMatches } kubernetes-component-helpers-b5afa51/auth/rbac/validation/policy_comparator_test.go000066400000000000000000000341741476422250100311410ustar00rootroot00000000000000/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package validation import ( "reflect" "testing" rbacv1 "k8s.io/api/rbac/v1" ) type escalationTest struct { ownerRules []rbacv1.PolicyRule servantRules []rbacv1.PolicyRule expectedCovered bool expectedUncoveredRules []rbacv1.PolicyRule } func TestCoversExactMatch(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversSubresourceWildcard(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*/scale"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"foo/scale"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversMultipleRulesCoveringSingleRule(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"delete"}, Resources: []string{"deployments"}}, {APIGroups: []string{"v1"}, Verbs: []string{"delete"}, Resources: []string{"builds"}}, {APIGroups: []string{"v1"}, Verbs: []string{"update"}, Resources: []string{"builds", "deployments"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversMultipleAPIGroupsCoveringSingleRule(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"group1"}, Verbs: []string{"delete"}, Resources: []string{"deployments"}}, {APIGroups: []string{"group1"}, Verbs: []string{"delete"}, Resources: []string{"builds"}}, {APIGroups: []string{"group1"}, Verbs: []string{"update"}, Resources: []string{"builds", "deployments"}}, {APIGroups: []string{"group2"}, Verbs: []string{"delete"}, Resources: []string{"deployments"}}, {APIGroups: []string{"group2"}, Verbs: []string{"delete"}, Resources: []string{"builds"}}, {APIGroups: []string{"group2"}, Verbs: []string{"update"}, Resources: []string{"builds", "deployments"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"group1", "group2"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversSingleAPIGroupsCoveringMultiple(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"group1", "group2"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"group1"}, Verbs: []string{"delete"}, Resources: []string{"deployments"}}, {APIGroups: []string{"group1"}, Verbs: []string{"delete"}, Resources: []string{"builds"}}, {APIGroups: []string{"group1"}, Verbs: []string{"update"}, Resources: []string{"builds", "deployments"}}, {APIGroups: []string{"group2"}, Verbs: []string{"delete"}, Resources: []string{"deployments"}}, {APIGroups: []string{"group2"}, Verbs: []string{"delete"}, Resources: []string{"builds"}}, {APIGroups: []string{"group2"}, Verbs: []string{"update"}, Resources: []string{"builds", "deployments"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversMultipleRulesMissingSingleVerbResourceCombination(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments"}}, {APIGroups: []string{"v1"}, Verbs: []string{"delete"}, Resources: []string{"pods"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments", "pods"}}, }, expectedCovered: false, expectedUncoveredRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"update"}, Resources: []string{"pods"}}, }, }.test(t) } func TestCoversAPIGroupStarCoveringMultiple(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"group1", "group2"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversEnumerationNotCoveringAPIGroupStar(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"dummy-group"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, }, expectedCovered: false, expectedUncoveredRules: []rbacv1.PolicyRule{ {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, }, }.test(t) } func TestCoversAPIGroupStarCoveringStar(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversVerbStarCoveringMultiple(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"watch", "list"}, Resources: []string{"roles"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversEnumerationNotCoveringVerbStar(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get", "list", "watch", "create", "update", "delete", "exec"}, Resources: []string{"roles"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, }, expectedCovered: false, expectedUncoveredRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, }, }.test(t) } func TestCoversVerbStarCoveringStar(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversResourceStarCoveringMultiple(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"resourcegroup:deployments"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversEnumerationNotCoveringResourceStar(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"roles", "resourcegroup:deployments"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, }, expectedCovered: false, expectedUncoveredRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, }, }.test(t) } func TestCoversResourceStarCoveringStar(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversResourceNameEmptyCoveringMultiple(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}, ResourceNames: []string{}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}, ResourceNames: []string{"foo", "bar"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversEnumerationNotCoveringResourceNameEmpty(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}, ResourceNames: []string{"foo", "bar"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}, ResourceNames: []string{}}, }, expectedCovered: false, expectedUncoveredRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}}, }, }.test(t) } func TestCoversNonResourceURLs(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis"}, Verbs: []string{"*"}}, }, servantRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis"}, Verbs: []string{"*"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversNonResourceURLsStar(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"*"}, Verbs: []string{"*"}}, }, servantRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis", "/apis/v1", "/"}, Verbs: []string{"*"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversNonResourceURLsStarAfterPrefixDoesntCover(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis/*"}, Verbs: []string{"*"}}, }, servantRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis", "/apis/v1"}, Verbs: []string{"get"}}, }, expectedCovered: false, expectedUncoveredRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis"}, Verbs: []string{"get"}}, }, }.test(t) } func TestCoversNonResourceURLsStarAfterPrefix(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis/*"}, Verbs: []string{"*"}}, }, servantRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis/v1/foo", "/apis/v1"}, Verbs: []string{"get"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversNonResourceURLsWithOtherFields(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}, NonResourceURLs: []string{"/apis"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}, NonResourceURLs: []string{"/apis"}}, }, expectedCovered: true, expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversNonResourceURLsWithOtherFieldsFailure(t *testing.T) { escalationTest{ ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}}, }, servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}, NonResourceURLs: []string{"/apis"}}, }, expectedCovered: false, expectedUncoveredRules: []rbacv1.PolicyRule{{NonResourceURLs: []string{"/apis"}, Verbs: []string{"get"}}}, }.test(t) } func (test escalationTest) test(t *testing.T) { actualCovered, actualUncoveredRules := Covers(test.ownerRules, test.servantRules) if actualCovered != test.expectedCovered { t.Errorf("expected %v, but got %v", test.expectedCovered, actualCovered) } if !rulesMatch(test.expectedUncoveredRules, actualUncoveredRules) { t.Errorf("expected %v, but got %v", test.expectedUncoveredRules, actualUncoveredRules) } } func rulesMatch(expectedRules, actualRules []rbacv1.PolicyRule) bool { if len(expectedRules) != len(actualRules) { return false } for _, expectedRule := range expectedRules { found := false for _, actualRule := range actualRules { if reflect.DeepEqual(expectedRule, actualRule) { found = true break } } if !found { return false } } return true } func TestNonResourceURLCovers(t *testing.T) { tests := []struct { owner string requested string want bool }{ {"*", "", true}, {"*", "/", true}, {"*", "/api", true}, {"/*", "", false}, {"/*", "/", true}, {"/*", "/foo", true}, {"/api", "/api", true}, {"/apis", "/api", false}, {"/api/v1", "/api", false}, {"/api/v1", "/api/v1", true}, {"/api/*", "/api/v1", true}, {"/api/*", "/api", false}, {"/api/*/*", "/api/v1", false}, {"/*/v1/*", "/api/v1", false}, } for _, tc := range tests { got := nonResourceURLCovers(tc.owner, tc.requested) if got != tc.want { t.Errorf("nonResourceURLCovers(%q, %q): want=(%t), got=(%t)", tc.owner, tc.requested, tc.want, got) } } } kubernetes-component-helpers-b5afa51/code-of-conduct.md000066400000000000000000000002241476422250100233030ustar00rootroot00000000000000# Kubernetes Community Code of Conduct Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) kubernetes-component-helpers-b5afa51/doc.go000066400000000000000000000011711476422250100211060ustar00rootroot00000000000000/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package componenthelpers // import "k8s.io/component-helpers" kubernetes-component-helpers-b5afa51/go.mod000066400000000000000000000041511476422250100211210ustar00rootroot00000000000000// This is a generated file. Do not edit directly. module k8s.io/component-helpers go 1.23.0 godebug default=go1.23 godebug winsymlink=0 require ( github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.9.0 k8s.io/api v0.32.3 k8s.io/apimachinery v0.32.3 k8s.io/client-go v0.32.3 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ) require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/x448/float16 v0.8.4 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) kubernetes-component-helpers-b5afa51/go.sum000066400000000000000000000332731476422250100211550ustar00rootroot00000000000000github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= kubernetes-component-helpers-b5afa51/node/000077500000000000000000000000001476422250100207375ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/node/OWNERS000066400000000000000000000002141476422250100216740ustar00rootroot00000000000000# See the OWNERS docs at https://go.k8s.io/owners approvers: - sig-node-approvers reviewers: - sig-node-reviewers labels: - sig/node kubernetes-component-helpers-b5afa51/node/topology/000077500000000000000000000000001476422250100226135ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/node/topology/helpers.go000066400000000000000000000035401476422250100246060ustar00rootroot00000000000000/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package topology import ( "k8s.io/api/core/v1" ) // GetZoneKey is a helper function that builds a string identifier that is unique per failure-zone; // it returns empty-string for no zone. // Since there are currently two separate zone keys: // - "failure-domain.beta.kubernetes.io/zone" // - "topology.kubernetes.io/zone" // // GetZoneKey will first check failure-domain.beta.kubernetes.io/zone and if not exists, will then check // topology.kubernetes.io/zone func GetZoneKey(node *v1.Node) string { labels := node.Labels if labels == nil { return "" } // TODO: "failure-domain.beta..." names are deprecated, but will // stick around a long time due to existing on old extant objects like PVs. // Maybe one day we can stop considering them (see #88493). zone, ok := labels[v1.LabelFailureDomainBetaZone] if !ok { zone, _ = labels[v1.LabelTopologyZone] } region, ok := labels[v1.LabelFailureDomainBetaRegion] if !ok { region, _ = labels[v1.LabelTopologyRegion] } if region == "" && zone == "" { return "" } // We include the null character just in case region or failureDomain has a colon // (We do assume there's no null characters in a region or failureDomain) // As a nice side-benefit, the null character is not printed by fmt.Print or glog return region + ":\x00:" + zone } kubernetes-component-helpers-b5afa51/node/topology/helpers_test.go000066400000000000000000000050441476422250100256460ustar00rootroot00000000000000/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package topology import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "testing" ) func Test_GetZoneKey(t *testing.T) { tests := []struct { name string node *v1.Node zone string }{ { name: "has no zone or region keys", node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{}, }, }, zone: "", }, { name: "has beta zone and region keys", node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ v1.LabelFailureDomainBetaZone: "zone1", v1.LabelFailureDomainBetaRegion: "region1", }, }, }, zone: "region1:\x00:zone1", }, { name: "has GA zone and region keys", node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ v1.LabelTopologyZone: "zone1", v1.LabelTopologyRegion: "region1", }, }, }, zone: "region1:\x00:zone1", }, { name: "has both beta and GA zone and region keys", node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ v1.LabelTopologyZone: "zone1", v1.LabelTopologyRegion: "region1", v1.LabelFailureDomainBetaZone: "zone1", v1.LabelFailureDomainBetaRegion: "region1", }, }, }, zone: "region1:\x00:zone1", }, { name: "has both beta and GA zone and region keys, beta labels take precedent", node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ v1.LabelTopologyZone: "zone1", v1.LabelTopologyRegion: "region1", v1.LabelFailureDomainBetaZone: "zone2", v1.LabelFailureDomainBetaRegion: "region2", }, }, }, zone: "region2:\x00:zone2", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { zone := GetZoneKey(test.node) if zone != test.zone { t.Logf("actual zone key: %q", zone) t.Logf("expected zone key: %q", test.zone) t.Errorf("unexpected zone key") } }) } } kubernetes-component-helpers-b5afa51/node/util/000077500000000000000000000000001476422250100217145ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/node/util/cidr.go000066400000000000000000000033561476422250100231730ustar00rootroot00000000000000/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "context" "encoding/json" "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" ) type nodeForCIDRMergePatch struct { Spec nodeSpecForMergePatch `json:"spec"` } type nodeSpecForMergePatch struct { PodCIDR string `json:"podCIDR"` PodCIDRs []string `json:"podCIDRs,omitempty"` } // PatchNodeCIDRs patches the specified node.CIDR=cidrs[0] and node.CIDRs to the given value. func PatchNodeCIDRs(ctx context.Context, c clientset.Interface, node types.NodeName, cidrs []string) error { // set the pod cidrs list and set the old pod cidr field patch := nodeForCIDRMergePatch{ Spec: nodeSpecForMergePatch{ PodCIDR: cidrs[0], PodCIDRs: cidrs, }, } patchBytes, err := json.Marshal(&patch) if err != nil { return fmt.Errorf("failed to json.Marshal CIDR: %v", err) } klog.FromContext(ctx).V(4).Info("cidrs patch bytes", "patchBytes", string(patchBytes)) if _, err := c.CoreV1().Nodes().Patch(ctx, string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { return fmt.Errorf("failed to patch node CIDR: %v", err) } return nil } kubernetes-component-helpers-b5afa51/node/util/conditions.go000066400000000000000000000033461476422250100244220ustar00rootroot00000000000000/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "context" "encoding/json" "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" ) // GetNodeCondition extracts the provided condition from the given status and returns that. // Returns nil and -1 if the condition is not present, and the index of the located condition. func GetNodeCondition(status *v1.NodeStatus, conditionType v1.NodeConditionType) (int, *v1.NodeCondition) { if status == nil { return -1, nil } for i := range status.Conditions { if status.Conditions[i].Type == conditionType { return i, &status.Conditions[i] } } return -1, nil } // SetNodeCondition updates specific node condition with patch operation. func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.NodeCondition) error { condition.LastHeartbeatTime = metav1.NewTime(time.Now()) patch, err := json.Marshal(map[string]interface{}{ "status": map[string]interface{}{ "conditions": []v1.NodeCondition{condition}, }, }) if err != nil { return err } _, err = c.CoreV1().Nodes().PatchStatus(context.TODO(), string(node), patch) return err } kubernetes-component-helpers-b5afa51/node/util/hostname.go000066400000000000000000000025361476422250100240670ustar00rootroot00000000000000/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "fmt" "os" "strings" ) // GetHostname returns OS's hostname if 'hostnameOverride' is empty; otherwise, it returns // 'hostnameOverride'. In either case, the value is canonicalized (trimmed and // lowercased). func GetHostname(hostnameOverride string) (string, error) { hostName := hostnameOverride if len(hostName) == 0 { nodeName, err := os.Hostname() if err != nil { return "", fmt.Errorf("couldn't determine hostname: %w", err) } hostName = nodeName } // Trim whitespaces first to avoid getting an empty hostname // For linux, the hostname is read from file /proc/sys/kernel/hostname directly hostName = strings.TrimSpace(hostName) if len(hostName) == 0 { return "", fmt.Errorf("empty hostname is invalid") } return strings.ToLower(hostName), nil } kubernetes-component-helpers-b5afa51/node/util/hostname_test.go000066400000000000000000000036171476422250100251270ustar00rootroot00000000000000/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "fmt" "os" "strings" "testing" ) func TestGetHostname(t *testing.T) { hostname, err := os.Hostname() testCases := []struct { desc string hostname string result string expectedErr error }{ { desc: "overridden hostname", hostname: "overridden", result: "overridden", expectedErr: nil, }, { desc: "overridden hostname uppercase", hostname: "OVERRIDDEN", result: "overridden", expectedErr: nil, }, { desc: "hostname contains spaces", hostname: " OVERRIDDEN ", result: "overridden", expectedErr: nil, }, { desc: "hostname contains only spaces", hostname: " ", result: "", expectedErr: fmt.Errorf("empty hostname is invalid"), }, { desc: "empty parameter", hostname: "", result: strings.ToLower(hostname), expectedErr: err, }, } for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { result, err := GetHostname(tc.hostname) if err != nil && tc.expectedErr == nil { t.Errorf("unexpected error: %v", err) } if err == nil && tc.expectedErr != nil { t.Errorf("expected error %v, got nil", tc.expectedErr) } if tc.result != result { t.Errorf("unexpected result: %s, expected: %s", result, tc.result) } }) } } kubernetes-component-helpers-b5afa51/node/util/ips.go000066400000000000000000000055041476422250100230420ustar00rootroot00000000000000/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "fmt" "net" "strings" netutils "k8s.io/utils/net" ) const ( cloudProviderNone = "" cloudProviderExternal = "external" ) // parseNodeIP implements ParseNodeIPArgument and ParseNodeIPAnnotation func parseNodeIP(nodeIP string, allowDual, sloppy bool) ([]net.IP, []string, error) { var nodeIPs []net.IP var invalidIPs []string if nodeIP != "" || !sloppy { for _, ip := range strings.Split(nodeIP, ",") { if sloppy { ip = strings.TrimSpace(ip) } parsedNodeIP := netutils.ParseIPSloppy(ip) if parsedNodeIP == nil { invalidIPs = append(invalidIPs, ip) if !sloppy { return nil, invalidIPs, fmt.Errorf("could not parse %q", ip) } } else { nodeIPs = append(nodeIPs, parsedNodeIP) } } } if len(nodeIPs) > 2 || (len(nodeIPs) == 2 && netutils.IsIPv6(nodeIPs[0]) == netutils.IsIPv6(nodeIPs[1])) { return nil, invalidIPs, fmt.Errorf("must contain either a single IP or a dual-stack pair of IPs") } else if len(nodeIPs) == 2 && !allowDual { return nil, invalidIPs, fmt.Errorf("dual-stack not supported in this configuration") } else if len(nodeIPs) == 2 && (nodeIPs[0].IsUnspecified() || nodeIPs[1].IsUnspecified()) { return nil, invalidIPs, fmt.Errorf("dual-stack node IP cannot include '0.0.0.0' or '::'") } return nodeIPs, invalidIPs, nil } // ParseNodeIPArgument parses kubelet's --node-ip argument. // If nodeIP contains invalid values, they will be returned as strings. // This is done also when an error is returned. // The caller then can decide what to do with the invalid values. // Dual-stack node IPs are allowed if cloudProvider is unset or `"external"`. func ParseNodeIPArgument(nodeIP, cloudProvider string) ([]net.IP, []string, error) { var allowDualStack bool if cloudProvider == cloudProviderNone || cloudProvider == cloudProviderExternal { allowDualStack = true } return parseNodeIP(nodeIP, allowDualStack, true) } // ParseNodeIPAnnotation parses the `alpha.kubernetes.io/provided-node-ip` annotation, // which can be either a single IP address or a comma-separated pair of IP addresses. // Unlike with ParseNodeIPArgument, invalid values are considered an error. func ParseNodeIPAnnotation(nodeIP string) ([]net.IP, error) { nodeIps, _, err := parseNodeIP(nodeIP, true, false) return nodeIps, err } kubernetes-component-helpers-b5afa51/node/util/ips_test.go000066400000000000000000000202411476422250100240740ustar00rootroot00000000000000/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "fmt" "net" "reflect" "strings" "testing" netutils "k8s.io/utils/net" ) func TestParseNodeIPArgument(t *testing.T) { testCases := []struct { desc string in string out []net.IP invalids []string err string ssErr string }{ { desc: "empty --node-ip", in: "", out: nil, }, { desc: "just whitespace (ignored)", in: " ", out: nil, invalids: []string{""}, }, { desc: "garbage (ignored)", in: "blah", out: nil, invalids: []string{"blah"}, }, { desc: "single IPv4", in: "1.2.3.4", out: []net.IP{ netutils.ParseIPSloppy("1.2.3.4"), }, }, { desc: "single IPv4 with whitespace", in: " 1.2.3.4 ", out: []net.IP{ netutils.ParseIPSloppy("1.2.3.4"), }, }, { desc: "single IPv4 non-canonical", in: "01.2.3.004", out: []net.IP{ netutils.ParseIPSloppy("1.2.3.4"), }, }, { desc: "single IPv4 invalid (ignored)", in: "1.2.3", out: nil, invalids: []string{"1.2.3"}, }, { desc: "single IPv4 CIDR (ignored)", in: "1.2.3.0/24", out: nil, invalids: []string{"1.2.3.0/24"}, }, { desc: "single IPv4 unspecified", in: "0.0.0.0", out: []net.IP{ net.IPv4zero, }, }, { desc: "single IPv4 plus ignored garbage", in: "1.2.3.4,not-an-IPv6-address", out: []net.IP{ netutils.ParseIPSloppy("1.2.3.4"), }, invalids: []string{"not-an-IPv6-address"}, }, { desc: "single IPv6", in: "abcd::ef01", out: []net.IP{ netutils.ParseIPSloppy("abcd::ef01"), }, }, { desc: "single IPv6 non-canonical", in: "abcd:0abc:00ab:0000:0000::1", out: []net.IP{ netutils.ParseIPSloppy("abcd:abc:ab::1"), }, }, { desc: "simple dual-stack", in: "1.2.3.4,abcd::ef01", out: []net.IP{ netutils.ParseIPSloppy("1.2.3.4"), netutils.ParseIPSloppy("abcd::ef01"), }, ssErr: "not supported in this configuration", }, { desc: "dual-stack with whitespace", in: "abcd::ef01 , 1.2.3.4", out: []net.IP{ netutils.ParseIPSloppy("abcd::ef01"), netutils.ParseIPSloppy("1.2.3.4"), }, ssErr: "not supported in this configuration", }, { desc: "double IPv4", in: "1.2.3.4,5.6.7.8", err: "either a single IP or a dual-stack pair of IPs", }, { desc: "double IPv6", in: "abcd::1,abcd::2", err: "either a single IP or a dual-stack pair of IPs", }, { desc: "dual-stack with unspecified", in: "1.2.3.4,::", err: "cannot include '0.0.0.0' or '::'", ssErr: "not supported in this configuration", }, { desc: "dual-stack with unspecified", in: "0.0.0.0,abcd::1", err: "cannot include '0.0.0.0' or '::'", ssErr: "not supported in this configuration", }, { desc: "dual-stack plus ignored garbage", in: "abcd::ef01 , 1.2.3.4, something else", out: []net.IP{ netutils.ParseIPSloppy("abcd::ef01"), netutils.ParseIPSloppy("1.2.3.4"), }, invalids: []string{"something else"}, ssErr: "not supported in this configuration", }, { desc: "triple stack!", in: "1.2.3.4,abcd::1,5.6.7.8", err: "either a single IP or a dual-stack pair of IPs", }, } configurations := []struct { cloudProvider string dualStackSupported bool }{ {cloudProviderNone, true}, {cloudProviderExternal, true}, {"gce", false}, } for _, tc := range testCases { for _, conf := range configurations { desc := fmt.Sprintf("%s, cloudProvider=%q", tc.desc, conf.cloudProvider) t.Run(desc, func(t *testing.T) { parsed, invalidIPs, err := ParseNodeIPArgument(tc.in, conf.cloudProvider) expectedOut := tc.out expectedInvalids := tc.invalids expectedErr := tc.err if !conf.dualStackSupported { if len(tc.out) == 2 { expectedOut = nil } if tc.ssErr != "" { expectedErr = tc.ssErr } } if !reflect.DeepEqual(parsed, expectedOut) { t.Errorf("expected %#v, got %#v", expectedOut, parsed) } if !reflect.DeepEqual(invalidIPs, expectedInvalids) { t.Errorf("[invalidIps] expected %#v, got %#v", expectedInvalids, invalidIPs) } if err != nil { if expectedErr == "" { t.Errorf("unexpected error %v", err) } else if !strings.Contains(err.Error(), expectedErr) { t.Errorf("expected error with %q, got %v", expectedErr, err) } } else if expectedErr != "" { t.Errorf("expected error with %q, got no error", expectedErr) } }) } } } func TestParseNodeIPAnnotation(t *testing.T) { testCases := []struct { desc string in string out []net.IP err string ssErr string }{ { desc: "empty --node-ip", in: "", err: "could not parse", }, { desc: "just whitespace", in: " ", err: "could not parse", }, { desc: "garbage", in: "blah", err: "could not parse", }, { desc: "single IPv4", in: "1.2.3.4", out: []net.IP{ netutils.ParseIPSloppy("1.2.3.4"), }, }, { desc: "single IPv4 with whitespace", in: " 1.2.3.4 ", err: "could not parse", }, { desc: "single IPv4 non-canonical", in: "01.2.3.004", out: []net.IP{ netutils.ParseIPSloppy("1.2.3.4"), }, }, { desc: "single IPv4 invalid", in: "1.2.3", err: "could not parse", }, { desc: "single IPv4 CIDR", in: "1.2.3.0/24", err: "could not parse", }, { desc: "single IPv4 unspecified", in: "0.0.0.0", out: []net.IP{ net.IPv4zero, }, }, { desc: "single IPv4 plus garbage", in: "1.2.3.4,not-an-IPv6-address", err: "could not parse", }, { desc: "single IPv6", in: "abcd::ef01", out: []net.IP{ netutils.ParseIPSloppy("abcd::ef01"), }, }, { desc: "single IPv6 non-canonical", in: "abcd:0abc:00ab:0000:0000::1", out: []net.IP{ netutils.ParseIPSloppy("abcd:abc:ab::1"), }, }, { desc: "simple dual-stack", in: "1.2.3.4,abcd::ef01", out: []net.IP{ netutils.ParseIPSloppy("1.2.3.4"), netutils.ParseIPSloppy("abcd::ef01"), }, ssErr: "not supported in this configuration", }, { desc: "dual-stack with whitespace", in: "abcd::ef01 , 1.2.3.4", err: "could not parse", }, { desc: "double IPv4", in: "1.2.3.4,5.6.7.8", err: "either a single IP or a dual-stack pair of IPs", }, { desc: "double IPv6", in: "abcd::1,abcd::2", err: "either a single IP or a dual-stack pair of IPs", }, { desc: "dual-stack with unspecified", in: "1.2.3.4,::", err: "cannot include '0.0.0.0' or '::'", ssErr: "not supported in this configuration", }, { desc: "dual-stack with unspecified", in: "0.0.0.0,abcd::1", err: "cannot include '0.0.0.0' or '::'", ssErr: "not supported in this configuration", }, { desc: "dual-stack plus garbage", in: "abcd::ef01 , 1.2.3.4, something else", err: "could not parse", }, { desc: "triple stack!", in: "1.2.3.4,abcd::1,5.6.7.8", err: "either a single IP or a dual-stack pair of IPs", }, } for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { parsed, err := ParseNodeIPAnnotation(tc.in) expectedOut := tc.out expectedErr := tc.err if !reflect.DeepEqual(parsed, expectedOut) { t.Errorf("expected %#v, got %#v", expectedOut, parsed) } if err != nil { if expectedErr == "" { t.Errorf("unexpected error %v", err) } else if !strings.Contains(err.Error(), expectedErr) { t.Errorf("expected error with %q, got %v", expectedErr, err) } } else if expectedErr != "" { t.Errorf("expected error with %q, got no error", expectedErr) } }) } } kubernetes-component-helpers-b5afa51/node/util/status.go000066400000000000000000000105641476422250100235740ustar00rootroot00000000000000/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "context" "encoding/json" "fmt" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" v1core "k8s.io/client-go/kubernetes/typed/core/v1" ) // PatchNodeStatus patches node status. func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, []byte, error) { patchBytes, err := preparePatchBytesforNodeStatus(nodeName, oldNode, newNode) if err != nil { return nil, nil, err } updatedNode, err := c.Nodes().Patch(context.TODO(), string(nodeName), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") if err != nil { return nil, nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err) } return updatedNode, patchBytes, nil } func preparePatchBytesforNodeStatus(nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) ([]byte, error) { oldData, err := json.Marshal(oldNode) if err != nil { return nil, fmt.Errorf("failed to Marshal oldData for node %q: %v", nodeName, err) } // NodeStatus.Addresses is incorrectly annotated as patchStrategy=merge, which // will cause strategicpatch.CreateTwoWayMergePatch to create an incorrect patch // if it changed. manuallyPatchAddresses := (len(oldNode.Status.Addresses) > 0) && !equality.Semantic.DeepEqual(oldNode.Status.Addresses, newNode.Status.Addresses) // Reset spec to make sure only patch for Status or ObjectMeta is generated. // Note that we don't reset ObjectMeta here, because: // 1. This aligns with Nodes().UpdateStatus(). // 2. Some component does use this to update node annotations. diffNode := newNode.DeepCopy() diffNode.Spec = oldNode.Spec if manuallyPatchAddresses { diffNode.Status.Addresses = oldNode.Status.Addresses } newData, err := json.Marshal(diffNode) if err != nil { return nil, fmt.Errorf("failed to Marshal newData for node %q: %v", nodeName, err) } patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) if err != nil { return nil, fmt.Errorf("failed to CreateTwoWayMergePatch for node %q: %v", nodeName, err) } if manuallyPatchAddresses { patchBytes, err = fixupPatchForNodeStatusAddresses(patchBytes, newNode.Status.Addresses) if err != nil { return nil, fmt.Errorf("failed to fix up NodeAddresses in patch for node %q: %v", nodeName, err) } } return patchBytes, nil } // fixupPatchForNodeStatusAddresses adds a replace-strategy patch for Status.Addresses to // the existing patch func fixupPatchForNodeStatusAddresses(patchBytes []byte, addresses []v1.NodeAddress) ([]byte, error) { // Given patchBytes='{"status": {"conditions": [ ... ], "phase": ...}}' and // addresses=[{"type": "InternalIP", "address": "10.0.0.1"}], we need to generate: // // { // "status": { // "conditions": [ ... ], // "phase": ..., // "addresses": [ // { // "type": "InternalIP", // "address": "10.0.0.1" // }, // { // "$patch": "replace" // } // ] // } // } var patchMap map[string]interface{} if err := json.Unmarshal(patchBytes, &patchMap); err != nil { return nil, err } addrBytes, err := json.Marshal(addresses) if err != nil { return nil, err } var addrArray []interface{} if err := json.Unmarshal(addrBytes, &addrArray); err != nil { return nil, err } addrArray = append(addrArray, map[string]interface{}{"$patch": "replace"}) status := patchMap["status"] if status == nil { status = map[string]interface{}{} patchMap["status"] = status } statusMap, ok := status.(map[string]interface{}) if !ok { return nil, fmt.Errorf("unexpected data in patch") } statusMap["addresses"] = addrArray return json.Marshal(patchMap) } kubernetes-component-helpers-b5afa51/node/util/sysctl/000077500000000000000000000000001476422250100232355ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/node/util/sysctl/namespace.go000066400000000000000000000071141476422250100255230ustar00rootroot00000000000000/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package sysctl import ( "strings" ) // Namespace represents a kernel namespace name. type Namespace string const ( // refer to https://man7.org/linux/man-pages/man7/ipc_namespaces.7.html // the Linux IPC namespace IPCNamespace = Namespace("IPC") // refer to https://man7.org/linux/man-pages/man7/network_namespaces.7.html // the network namespace NetNamespace = Namespace("Net") // the zero value if no namespace is known UnknownNamespace = Namespace("") ) var nameToNamespace = map[string]Namespace{ // kernel semaphore parameters: SEMMSL, SEMMNS, SEMOPM, and SEMMNI. "kernel.sem": IPCNamespace, // kernel shared memory limits include shmall, shmmax, shmmni, and shm_rmid_forced. "kernel.shmall": IPCNamespace, "kernel.shmmax": IPCNamespace, "kernel.shmmni": IPCNamespace, "kernel.shm_rmid_forced": IPCNamespace, // make backward compatibility to know the namespace of kernel.shm* "kernel.shm": IPCNamespace, // kernel messages include msgmni, msgmax and msgmnb. "kernel.msgmax": IPCNamespace, "kernel.msgmnb": IPCNamespace, "kernel.msgmni": IPCNamespace, // make backward compatibility to know the namespace of kernel.msg* "kernel.msg": IPCNamespace, } var prefixToNamespace = map[string]Namespace{ "net": NetNamespace, // mqueue filesystem provides the necessary kernel features to enable the creation // of a user space library that implements the POSIX message queues API. "fs.mqueue": IPCNamespace, } // namespaceOf returns the namespace of the Linux kernel for a sysctl, or // unknownNamespace if the sysctl is not known to be namespaced. // The second return is prefixed bool. // It returns true if the key is prefixed with a key in the prefix map func namespaceOf(val string) Namespace { if ns, found := nameToNamespace[val]; found { return ns } for p, ns := range prefixToNamespace { if strings.HasPrefix(val, p+".") { return ns } } return UnknownNamespace } // GetNamespace extracts information from a sysctl string. It returns: // 1. The sysctl namespace, which can be one of the following: IPC, Net, or unknown. // 2. sysctlOrPrefix: the prefix of the sysctl parameter until the first '*'. // If there is no '*', it will be the original string. // 3. 'prefixed' is set to true if the sysctl parameter contains '*' or it is in the prefixToNamespace key list, in most cases, it is a suffix *. // // For example, if the input sysctl is 'net.ipv6.neigh.*', GetNamespace will return: // - The Net namespace // - The sysctlOrPrefix as 'net.ipv6.neigh' // - 'prefixed' set to true // // For the input sysctl 'net.ipv6.conf.all.disable_ipv6', GetNamespace will return: // - The Net namespace // - The sysctlOrPrefix as 'net.ipv6.conf.all.disable_ipv6' // - 'prefixed' set to false. func GetNamespace(sysctl string) (ns Namespace, sysctlOrPrefix string, prefixed bool) { sysctlOrPrefix = NormalizeName(sysctl) firstIndex := strings.IndexAny(sysctlOrPrefix, "*") if firstIndex != -1 { sysctlOrPrefix = sysctlOrPrefix[:firstIndex] prefixed = true } ns = namespaceOf(sysctlOrPrefix) return } kubernetes-component-helpers-b5afa51/node/util/sysctl/namespace_test.go000066400000000000000000000017661476422250100265710ustar00rootroot00000000000000/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package sysctl import ( "testing" ) func TestNamespacedOf(t *testing.T) { tests := map[string]Namespace{ "kernel.shm_rmid_forced": IPCNamespace, "net.a.b.c": NetNamespace, "fs.mqueue.a.b.c": IPCNamespace, "foo": UnknownNamespace, } for sysctl, ns := range tests { if got := namespaceOf(sysctl); got != ns { t.Errorf("wrong namespace for %q: got=%s want=%s", sysctl, got, ns) } } } kubernetes-component-helpers-b5afa51/node/util/sysctl/sysctl.go000066400000000000000000000111071476422250100251050ustar00rootroot00000000000000/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package sysctl import ( "os" "path" "strconv" "strings" ) const ( sysctlBase = "/proc/sys" // VMOvercommitMemory refers to the sysctl variable responsible for defining // the memory over-commit policy used by kernel. VMOvercommitMemory = "vm/overcommit_memory" // VMPanicOnOOM refers to the sysctl variable responsible for defining // the OOM behavior used by kernel. VMPanicOnOOM = "vm/panic_on_oom" // KernelPanic refers to the sysctl variable responsible for defining // the timeout after a panic for the kernel to reboot. KernelPanic = "kernel/panic" // KernelPanicOnOops refers to the sysctl variable responsible for defining // the kernel behavior when an oops or BUG is encountered. KernelPanicOnOops = "kernel/panic_on_oops" // RootMaxKeys refers to the sysctl variable responsible for defining // the maximum number of keys that the root user (UID 0 in the root user namespace) may own. RootMaxKeys = "kernel/keys/root_maxkeys" // RootMaxBytes refers to the sysctl variable responsible for defining // the maximum number of bytes of data that the root user (UID 0 in the root user namespace) // can hold in the payloads of the keys owned by root. RootMaxBytes = "kernel/keys/root_maxbytes" // VMOvercommitMemoryAlways represents that kernel performs no memory over-commit handling. VMOvercommitMemoryAlways = 1 // VMPanicOnOOMInvokeOOMKiller represents that kernel calls the oom_killer function when OOM occurs. VMPanicOnOOMInvokeOOMKiller = 0 // KernelPanicOnOopsAlways represents that kernel panics on kernel oops. KernelPanicOnOopsAlways = 1 // KernelPanicRebootTimeout is the timeout seconds after a panic for the kernel to reboot. KernelPanicRebootTimeout = 10 // RootMaxKeysSetting is the maximum number of keys that the root user (UID 0 in the root user namespace) may own. // Needed since docker creates a new key per container. RootMaxKeysSetting = 1000000 // RootMaxBytesSetting is the maximum number of bytes of data that the root user (UID 0 in the root user namespace) // can hold in the payloads of the keys owned by root. // Allocate 25 bytes per key * number of MaxKeys. RootMaxBytesSetting = RootMaxKeysSetting * 25 ) // Interface is an injectable interface for running sysctl commands. type Interface interface { // GetSysctl returns the value for the specified sysctl setting GetSysctl(sysctl string) (int, error) // SetSysctl modifies the specified sysctl flag to the new value SetSysctl(sysctl string, newVal int) error } // New returns a new Interface for accessing sysctl func New() Interface { return &procSysctl{} } // procSysctl implements Interface by reading and writing files under /proc/sys type procSysctl struct { } // GetSysctl returns the value for the specified sysctl setting func (*procSysctl) GetSysctl(sysctl string) (int, error) { data, err := os.ReadFile(path.Join(sysctlBase, sysctl)) if err != nil { return -1, err } val, err := strconv.Atoi(strings.Trim(string(data), " \n")) if err != nil { return -1, err } return val, nil } // SetSysctl modifies the specified sysctl flag to the new value func (*procSysctl) SetSysctl(sysctl string, newVal int) error { return os.WriteFile(path.Join(sysctlBase, sysctl), []byte(strconv.Itoa(newVal)), 0640) } // NormalizeName can return sysctl variables in dots separator format. // The '/' separator is also accepted in place of a '.'. // Convert the sysctl variables to dots separator format for validation. // More info: // // https://man7.org/linux/man-pages/man8/sysctl.8.html // https://man7.org/linux/man-pages/man5/sysctl.d.5.html func NormalizeName(val string) string { if val == "" { return val } firstSepIndex := strings.IndexAny(val, "./") // if the first found is `.` like `net.ipv4.conf.eno2/100.rp_filter` if firstSepIndex == -1 || val[firstSepIndex] == '.' { return val } // for `net/ipv4/conf/eno2.100/rp_filter`, swap the use of `.` and `/` // to `net.ipv4.conf.eno2/100.rp_filter` f := func(r rune) rune { switch r { case '.': return '/' case '/': return '.' } return r } return strings.Map(f, val) } kubernetes-component-helpers-b5afa51/node/util/sysctl/sysctl_test.go000066400000000000000000000031151476422250100261440ustar00rootroot00000000000000/* Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package sysctl import ( "testing" "github.com/stretchr/testify/assert" ) // TestConvertSysctlVariableToDotsSeparator tests whether the sysctl variable // can be correctly converted to a dot as a separator. func TestConvertSysctlVariableToDotsSeparator(t *testing.T) { type testCase struct { in string out string } valid := []testCase{ {in: "kernel.shm_rmid_forced", out: "kernel.shm_rmid_forced"}, {in: "kernel/shm_rmid_forced", out: "kernel.shm_rmid_forced"}, {in: "net.ipv4.conf.eno2/100.rp_filter", out: "net.ipv4.conf.eno2/100.rp_filter"}, {in: "net/ipv4/conf/eno2.100/rp_filter", out: "net.ipv4.conf.eno2/100.rp_filter"}, {in: "net/ipv4/ip_local_port_range", out: "net.ipv4.ip_local_port_range"}, {in: "kernel/msgmax", out: "kernel.msgmax"}, {in: "kernel/sem", out: "kernel.sem"}, } for _, test := range valid { convertSysctlVal := NormalizeName(test.in) assert.Equalf(t, test.out, convertSysctlVal, "The sysctl variable was not converted correctly. got: %s, want: %s", convertSysctlVal, test.out) } } kubernetes-component-helpers-b5afa51/node/util/sysctl/testing/000077500000000000000000000000001476422250100247125ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/node/util/sysctl/testing/fake.go000066400000000000000000000024551476422250100261550ustar00rootroot00000000000000/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package testing import ( "os" "k8s.io/component-helpers/node/util/sysctl" ) // Fake is a map-backed implementation of sysctl.Interface, for testing/mocking. type Fake struct { Settings map[string]int } // NewFake creates a fake sysctl implementation. func NewFake() *Fake { return &Fake{ Settings: make(map[string]int), } } // GetSysctl returns the value for the specified sysctl setting. func (m *Fake) GetSysctl(sysctl string) (int, error) { v, found := m.Settings[sysctl] if !found { return -1, os.ErrNotExist } return v, nil } // SetSysctl modifies the specified sysctl flag to the new value. func (m *Fake) SetSysctl(sysctl string, newVal int) error { m.Settings[sysctl] = newVal return nil } var _ = sysctl.Interface(&Fake{}) kubernetes-component-helpers-b5afa51/resource/000077500000000000000000000000001476422250100216415ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/resource/OWNERS000066400000000000000000000003431476422250100226010ustar00rootroot00000000000000# See the OWNERS docs at https://go.k8s.io/owners options: no_parent_owners: true approvers: - api-approvers reviewers: - sig-node-reviewers - sig-scheduling labels: - sig/node - sig/scheduling - kind/api-change kubernetes-component-helpers-b5afa51/resource/helpers.go000066400000000000000000000324611476422250100236400ustar00rootroot00000000000000/* Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package resource import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" ) // ContainerType signifies container type type ContainerType int const ( // Containers is for normal containers Containers ContainerType = 1 << iota // InitContainers is for init containers InitContainers ) // PodResourcesOptions controls the behavior of PodRequests and PodLimits. type PodResourcesOptions struct { // Reuse, if provided will be reused to accumulate resources and returned by the PodRequests or PodLimits // functions. All existing values in Reuse will be lost. Reuse v1.ResourceList // UseStatusResources indicates whether resources reported by the PodStatus should be considered // when evaluating the pod resources. This MUST be false if the InPlacePodVerticalScaling // feature is not enabled. UseStatusResources bool // ExcludeOverhead controls if pod overhead is excluded from the calculation. ExcludeOverhead bool // ContainerFn is called with the effective resources required for each container within the pod. ContainerFn func(res v1.ResourceList, containerType ContainerType) // NonMissingContainerRequests if provided will replace any missing container level requests for the specified resources // with the given values. If the requests for those resources are explicitly set, even if zero, they will not be modified. NonMissingContainerRequests v1.ResourceList // SkipPodLevelResources controls whether pod-level resources should be skipped // from the calculation. If pod-level resources are not set in PodSpec, // pod-level resources will always be skipped. SkipPodLevelResources bool } var supportedPodLevelResources = sets.New(v1.ResourceCPU, v1.ResourceMemory) func SupportedPodLevelResources() sets.Set[v1.ResourceName] { return supportedPodLevelResources } // IsSupportedPodLevelResources checks if a given resource is supported by pod-level // resource management through the PodLevelResources feature. Returns true if // the resource is supported. func IsSupportedPodLevelResource(name v1.ResourceName) bool { return supportedPodLevelResources.Has(name) } // IsPodLevelResourcesSet check if PodLevelResources pod-level resources are set. // It returns true if either the Requests or Limits maps are non-empty. func IsPodLevelResourcesSet(pod *v1.Pod) bool { if pod.Spec.Resources == nil { return false } if (len(pod.Spec.Resources.Requests) + len(pod.Spec.Resources.Limits)) == 0 { return false } for resourceName := range pod.Spec.Resources.Requests { if IsSupportedPodLevelResource(resourceName) { return true } } for resourceName := range pod.Spec.Resources.Limits { if IsSupportedPodLevelResource(resourceName) { return true } } return false } // IsPodLevelRequestsSet checks if pod-level requests are set. It returns true if // Requests map is non-empty. func IsPodLevelRequestsSet(pod *v1.Pod) bool { if pod.Spec.Resources == nil { return false } if len(pod.Spec.Resources.Requests) == 0 { return false } for resourceName := range pod.Spec.Resources.Requests { if IsSupportedPodLevelResource(resourceName) { return true } } return false } // PodRequests computes the total pod requests per the PodResourcesOptions supplied. // If PodResourcesOptions is nil, then the requests are returned including pod overhead. // If the PodLevelResources feature is enabled AND the pod-level resources are set, // those pod-level values are used in calculating Pod Requests. // The computation is part of the API and must be reviewed as an API change. func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { reqs := AggregateContainerRequests(pod, opts) if !opts.SkipPodLevelResources && IsPodLevelRequestsSet(pod) { for resourceName, quantity := range pod.Spec.Resources.Requests { if IsSupportedPodLevelResource(resourceName) { reqs[resourceName] = quantity } } } // Add overhead for running a pod to the sum of requests if requested: if !opts.ExcludeOverhead && pod.Spec.Overhead != nil { addResourceList(reqs, pod.Spec.Overhead) } return reqs } // AggregateContainerRequests computes the total resource requests of all the containers // in a pod. This computation folows the formula defined in the KEP for sidecar // containers. See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission // for more details. func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { // attempt to reuse the maps if passed, or allocate otherwise reqs := reuseOrClearResourceList(opts.Reuse) var containerStatuses map[string]*v1.ContainerStatus if opts.UseStatusResources { containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)) for i := range pod.Status.ContainerStatuses { containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i] } } for _, container := range pod.Spec.Containers { containerReqs := container.Resources.Requests if opts.UseStatusResources { cs, found := containerStatuses[container.Name] if found && cs.Resources != nil { if pod.Status.Resize == v1.PodResizeStatusInfeasible { containerReqs = cs.Resources.Requests.DeepCopy() } else { containerReqs = max(container.Resources.Requests, cs.Resources.Requests) } } } if len(opts.NonMissingContainerRequests) > 0 { containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests) } if opts.ContainerFn != nil { opts.ContainerFn(containerReqs, Containers) } addResourceList(reqs, containerReqs) } restartableInitContainerReqs := v1.ResourceList{} initContainerReqs := v1.ResourceList{} // init containers define the minimum of any resource // Note: In-place resize is not allowed for InitContainers, so no need to check for ResizeStatus value // // Let's say `InitContainerUse(i)` is the resource requirements when the i-th // init container is initializing, then // `InitContainerUse(i) = sum(Resources of restartable init containers with index < i) + Resources of i-th init container`. // // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail. for _, container := range pod.Spec.InitContainers { containerReqs := container.Resources.Requests if len(opts.NonMissingContainerRequests) > 0 { containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests) } if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways { // and add them to the resulting cumulative container requests addResourceList(reqs, containerReqs) // track our cumulative restartable init container resources addResourceList(restartableInitContainerReqs, containerReqs) containerReqs = restartableInitContainerReqs } else { tmp := v1.ResourceList{} addResourceList(tmp, containerReqs) addResourceList(tmp, restartableInitContainerReqs) containerReqs = tmp } if opts.ContainerFn != nil { opts.ContainerFn(containerReqs, InitContainers) } maxResourceList(initContainerReqs, containerReqs) } maxResourceList(reqs, initContainerReqs) return reqs } // applyNonMissing will return a copy of the given resource list with any missing values replaced by the nonMissing values func applyNonMissing(reqs v1.ResourceList, nonMissing v1.ResourceList) v1.ResourceList { cp := v1.ResourceList{} for k, v := range reqs { cp[k] = v.DeepCopy() } for k, v := range nonMissing { if _, found := reqs[k]; !found { rk := cp[k] rk.Add(v) cp[k] = rk } } return cp } // PodLimits computes the pod limits per the PodResourcesOptions supplied. If PodResourcesOptions is nil, then // the limits are returned including pod overhead for any non-zero limits. The computation is part of the API and must be reviewed // as an API change. func PodLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { // attempt to reuse the maps if passed, or allocate otherwise limits := AggregateContainerLimits(pod, opts) if !opts.SkipPodLevelResources && IsPodLevelResourcesSet(pod) { for resourceName, quantity := range pod.Spec.Resources.Limits { if IsSupportedPodLevelResource(resourceName) { limits[resourceName] = quantity } } } // Add overhead to non-zero limits if requested: if !opts.ExcludeOverhead && pod.Spec.Overhead != nil { for name, quantity := range pod.Spec.Overhead { if value, ok := limits[name]; ok && !value.IsZero() { value.Add(quantity) limits[name] = value } } } return limits } // AggregateContainerLimits computes the aggregated resource limits of all the containers // in a pod. This computation follows the formula defined in the KEP for sidecar // containers. See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission // for more details. func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { // attempt to reuse the maps if passed, or allocate otherwise limits := reuseOrClearResourceList(opts.Reuse) var containerStatuses map[string]*v1.ContainerStatus if opts.UseStatusResources { containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)) for i := range pod.Status.ContainerStatuses { containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i] } } for _, container := range pod.Spec.Containers { containerLimits := container.Resources.Limits if opts.UseStatusResources { cs, found := containerStatuses[container.Name] if found && cs.Resources != nil { if pod.Status.Resize == v1.PodResizeStatusInfeasible { containerLimits = cs.Resources.Limits.DeepCopy() } else { containerLimits = max(container.Resources.Limits, cs.Resources.Limits) } } } if opts.ContainerFn != nil { opts.ContainerFn(containerLimits, Containers) } addResourceList(limits, containerLimits) } restartableInitContainerLimits := v1.ResourceList{} initContainerLimits := v1.ResourceList{} // init containers define the minimum of any resource // // Let's say `InitContainerUse(i)` is the resource requirements when the i-th // init container is initializing, then // `InitContainerUse(i) = sum(Resources of restartable init containers with index < i) + Resources of i-th init container`. // // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail. for _, container := range pod.Spec.InitContainers { containerLimits := container.Resources.Limits // Is the init container marked as a restartable init container? if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways { addResourceList(limits, containerLimits) // track our cumulative restartable init container resources addResourceList(restartableInitContainerLimits, containerLimits) containerLimits = restartableInitContainerLimits } else { tmp := v1.ResourceList{} addResourceList(tmp, containerLimits) addResourceList(tmp, restartableInitContainerLimits) containerLimits = tmp } if opts.ContainerFn != nil { opts.ContainerFn(containerLimits, InitContainers) } maxResourceList(initContainerLimits, containerLimits) } maxResourceList(limits, initContainerLimits) return limits } // addResourceList adds the resources in newList to list. func addResourceList(list, newList v1.ResourceList) { for name, quantity := range newList { if value, ok := list[name]; !ok { list[name] = quantity.DeepCopy() } else { value.Add(quantity) list[name] = value } } } // maxResourceList sets list to the greater of list/newList for every resource in newList func maxResourceList(list, newList v1.ResourceList) { for name, quantity := range newList { if value, ok := list[name]; !ok || quantity.Cmp(value) > 0 { list[name] = quantity.DeepCopy() } } } // max returns the result of max(a, b) for each named resource and is only used if we can't // accumulate into an existing resource list func max(a v1.ResourceList, b v1.ResourceList) v1.ResourceList { result := v1.ResourceList{} for key, value := range a { if other, found := b[key]; found { if value.Cmp(other) <= 0 { result[key] = other.DeepCopy() continue } } result[key] = value.DeepCopy() } for key, value := range b { if _, found := result[key]; !found { result[key] = value.DeepCopy() } } return result } // reuseOrClearResourceList is a helper for avoiding excessive allocations of // resource lists within the inner loop of resource calculations. func reuseOrClearResourceList(reuse v1.ResourceList) v1.ResourceList { if reuse == nil { return make(v1.ResourceList, 4) } for k := range reuse { delete(reuse, k) } return reuse } kubernetes-component-helpers-b5afa51/resource/helpers_test.go000066400000000000000000001435731476422250100247060ustar00rootroot00000000000000/* Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package resource import ( "testing" "github.com/google/go-cmp/cmp" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/resource" ) func TestPodRequestsAndLimits(t *testing.T) { cases := []struct { pod *v1.Pod cName string expectedRequests v1.ResourceList expectedLimits v1.ResourceList }{ { cName: "just-limit-no-overhead", pod: getPod("foo", podResources{cpuLimit: "9"}), expectedRequests: v1.ResourceList{}, expectedLimits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("9"), }, }, { cName: "just-overhead", pod: getPod("foo", podResources{cpuOverhead: "5", memoryOverhead: "5"}), expectedRequests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("5"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("5"), }, expectedLimits: v1.ResourceList{}, }, { cName: "req-and-overhead", pod: getPod("foo", podResources{cpuRequest: "1", memoryRequest: "10", cpuOverhead: "5", memoryOverhead: "5"}), expectedRequests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("6"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("15"), }, expectedLimits: v1.ResourceList{}, }, { cName: "all-req-lim-and-overhead", pod: getPod("foo", podResources{cpuRequest: "1", cpuLimit: "2", memoryRequest: "10", memoryLimit: "12", cpuOverhead: "5", memoryOverhead: "5"}), expectedRequests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("6"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("15"), }, expectedLimits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("7"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("17"), }, }, { cName: "req-some-lim-and-overhead", pod: getPod("foo", podResources{cpuRequest: "1", cpuLimit: "2", memoryRequest: "10", cpuOverhead: "5", memoryOverhead: "5"}), expectedRequests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("6"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("15"), }, expectedLimits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("7"), }, }, } for idx, tc := range cases { resRequests := PodRequests(tc.pod, PodResourcesOptions{}) resLimits := PodLimits(tc.pod, PodResourcesOptions{}) if !equality.Semantic.DeepEqual(tc.expectedRequests, resRequests) { t.Errorf("test case failure[%d]: %v, requests:\n expected:\t%v\ngot\t\t%v", idx, tc.cName, tc.expectedRequests, resRequests) } if !equality.Semantic.DeepEqual(tc.expectedLimits, resLimits) { t.Errorf("test case failure[%d]: %v, limits:\n expected:\t%v\ngot\t\t%v", idx, tc.cName, tc.expectedLimits, resLimits) } } } func TestPodRequestsAndLimitsWithoutOverhead(t *testing.T) { cases := []struct { pod *v1.Pod name string expectedRequests v1.ResourceList expectedLimits v1.ResourceList }{ { name: "two container no overhead - should just be sum of containers", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "foobar", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("5"), }, Limits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("2"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("10"), }, }, }, { Name: "foobar2", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("4"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("12"), }, Limits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("8"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("24"), }, }, }, }, }, }, expectedRequests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("5"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("17"), }, expectedLimits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("34"), }, }, { name: "two container with overhead - shouldn't consider overhead", pod: &v1.Pod{ Spec: v1.PodSpec{ Overhead: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("3"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("8"), }, Containers: []v1.Container{ { Name: "foobar", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("5"), }, Limits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("2"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("10"), }, }, }, { Name: "foobar2", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("4"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("12"), }, Limits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("8"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("24"), }, }, }, }, }, }, expectedRequests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("5"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("17"), }, expectedLimits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("34"), }, }, { name: "two container with overhead, massive init - should just be the largest init", pod: &v1.Pod{ Spec: v1.PodSpec{ Overhead: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("3"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("8"), }, Containers: []v1.Container{ { Name: "foobar", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("5"), }, Limits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("2"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("10"), }, }, }, { Name: "foobar2", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("4"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("12"), }, Limits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("8"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("24"), }, }, }, }, InitContainers: []v1.Container{ { Name: "small-init", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("5"), }, Limits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("5"), }, }, }, { Name: "big-init", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("40"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("120"), }, Limits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("80"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("240"), }, }, }, }, }, }, expectedRequests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("40"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("120"), }, expectedLimits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("80"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("240"), }, }, } for idx, tc := range cases { resRequests := PodRequests(tc.pod, PodResourcesOptions{ExcludeOverhead: true}) resLimits := PodLimits(tc.pod, PodResourcesOptions{ExcludeOverhead: true}) if !equality.Semantic.DeepEqual(tc.expectedRequests, resRequests) { t.Errorf("test case failure[%d]: %v, requests:\n expected:\t%v\ngot\t\t%v", idx, tc.name, tc.expectedRequests, resRequests) } if !equality.Semantic.DeepEqual(tc.expectedLimits, resLimits) { t.Errorf("test case failure[%d]: %v, limits:\n expected:\t%v\ngot\t\t%v", idx, tc.name, tc.expectedLimits, resLimits) } } } func TestPodResourceRequests(t *testing.T) { restartAlways := v1.ContainerRestartPolicyAlways testCases := []struct { description string options PodResourcesOptions overhead v1.ResourceList podResizeStatus v1.PodResizeStatus initContainers []v1.Container containers []v1.Container containerStatus []v1.ContainerStatus expectedRequests v1.ResourceList }{ { description: "nil options, larger init container", expectedRequests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("4"), }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("4"), }, }, }, }, containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, }, }, }, }, { description: "nil options, larger containers", expectedRequests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("5"), }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, }, containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("3"), }, }, }, }, }, { description: "pod overhead excluded", expectedRequests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("5"), }, options: PodResourcesOptions{ ExcludeOverhead: true, }, overhead: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, }, containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("3"), }, }, }, }, }, { description: "pod overhead included", expectedRequests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("6"), v1.ResourceMemory: resource.MustParse("1Gi"), }, overhead: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi"), }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, }, containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("3"), }, }, }, }, }, { description: "resized, infeasible", expectedRequests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, podResizeStatus: v1.PodResizeStatusInfeasible, options: PodResourcesOptions{UseStatusResources: true}, containers: []v1.Container{ { Name: "container-1", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("4"), }, }, }, }, containerStatus: []v1.ContainerStatus{ { Name: "container-1", Resources: &v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, }, }, { description: "resized, no resize status", expectedRequests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("4"), }, options: PodResourcesOptions{UseStatusResources: true}, containers: []v1.Container{ { Name: "container-1", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("4"), }, }, }, }, containerStatus: []v1.ContainerStatus{ { Name: "container-1", Resources: &v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, }, }, { description: "resized, infeasible, but don't use status", expectedRequests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("4"), }, podResizeStatus: v1.PodResizeStatusInfeasible, options: PodResourcesOptions{UseStatusResources: false}, containers: []v1.Container{ { Name: "container-1", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("4"), }, }, }, }, containerStatus: []v1.ContainerStatus{ { Name: "container-1", Resources: &v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, }, }, { description: "restartable init container", expectedRequests: v1.ResourceList{ // restartable init + regular container v1.ResourceCPU: resource.MustParse("2"), }, initContainers: []v1.Container{ { Name: "restartable-init-1", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, }, }, }, containers: []v1.Container{ { Name: "container-1", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, }, }, }, }, { description: "multiple restartable init containers", expectedRequests: v1.ResourceList{ // max(5, restartable init containers(3+2+1) + regular(1)) = 7 v1.ResourceCPU: resource.MustParse("7"), }, initContainers: []v1.Container{ { Name: "init-1", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("5"), }, }, }, { Name: "restartable-init-1", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, }, }, { Name: "restartable-init-2", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, { Name: "restartable-init-3", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("3"), }, }, }, }, containers: []v1.Container{ { Name: "container-1", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, }, }, }, }, { description: "multiple restartable and regular init containers", expectedRequests: v1.ResourceList{ // init-2 requires 5 + the previously running restartable init // containers(1+2) = 8, the restartable init container that starts // after it doesn't count v1.ResourceCPU: resource.MustParse("8"), }, initContainers: []v1.Container{ { Name: "init-1", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("5"), }, }, }, { Name: "restartable-init-1", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, }, }, { Name: "restartable-init-2", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, { Name: "init-2", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("5"), }, }, }, { Name: "restartable-init-3", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("3"), }, }, }, }, containers: []v1.Container{ { Name: "container-1", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, }, }, }, }, { description: "restartable-init, init and regular", expectedRequests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("210"), }, initContainers: []v1.Container{ { Name: "restartable-init-1", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("10"), }, }, }, { Name: "init-1", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("200"), }, }, }, }, containers: []v1.Container{ { Name: "container-1", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("100"), }, }, }, }, }, } for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { p := &v1.Pod{ Spec: v1.PodSpec{ Containers: tc.containers, InitContainers: tc.initContainers, Overhead: tc.overhead, }, Status: v1.PodStatus{ ContainerStatuses: tc.containerStatus, Resize: tc.podResizeStatus, }, } request := PodRequests(p, tc.options) if diff := cmp.Diff(request, tc.expectedRequests); diff != "" { t.Errorf("got=%v, want=%v, diff=%s", request, tc.expectedRequests, diff) } }) } } func TestPodResourceRequestsReuse(t *testing.T) { expectedRequests := v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), } p := &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: expectedRequests, }, }, }, }, } opts := PodResourcesOptions{ Reuse: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("25"), }, } requests := PodRequests(p, opts) if diff := cmp.Diff(requests, expectedRequests); diff != "" { t.Errorf("got=%v, want=%v, diff=%s", requests, expectedRequests, diff) } // should re-use the maps we passed in if diff := cmp.Diff(opts.Reuse, expectedRequests); diff != "" { t.Errorf("got=%v, want=%v, diff=%s", requests, expectedRequests, diff) } } func TestPodResourceLimits(t *testing.T) { restartAlways := v1.ContainerRestartPolicyAlways testCases := []struct { description string options PodResourcesOptions overhead v1.ResourceList initContainers []v1.Container containers []v1.Container containerStatuses []v1.ContainerStatus expectedLimits v1.ResourceList }{ { description: "nil options, larger init container", expectedLimits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("4"), }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("4"), }, }, }, }, containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, }, }, }, }, { description: "nil options, larger containers", expectedLimits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("5"), }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, }, containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("3"), }, }, }, }, }, { description: "pod overhead excluded", expectedLimits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("5"), }, options: PodResourcesOptions{ ExcludeOverhead: true, }, overhead: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, }, containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("3"), }, }, }, }, }, { description: "pod overhead included", overhead: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi"), }, expectedLimits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("6"), // overhead is only added to non-zero limits, so there will be no expected memory limit }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, }, containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("3"), }, }, }, }, }, { description: "no limited containers should result in no limits for the pod", expectedLimits: v1.ResourceList{}, initContainers: []v1.Container{}, containers: []v1.Container{ { // Unlimited container }, }, }, { description: "one limited and one unlimited container should result in the limited container's limits for the pod", expectedLimits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("2Gi"), }, initContainers: []v1.Container{}, containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("2Gi"), }, }, }, { // Unlimited container }, }, }, { description: "one limited and one unlimited init container should result in the limited init container's limits for the pod", expectedLimits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("2Gi"), }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("2Gi"), }, }, }, { // Unlimited init container }, }, containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi"), }, }, }, }, }, { description: "restartable init container", expectedLimits: v1.ResourceList{ // restartable init + regular container v1.ResourceCPU: resource.MustParse("2"), }, initContainers: []v1.Container{ { Name: "restartable-init-1", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, }, }, }, containers: []v1.Container{ { Name: "container-1", Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, }, }, }, }, { description: "multiple restartable init containers", expectedLimits: v1.ResourceList{ // max(5, restartable init containers(3+2+1) + regular(1)) = 7 v1.ResourceCPU: resource.MustParse("7"), }, initContainers: []v1.Container{ { Name: "init-1", Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("5"), }, }, }, { Name: "restartable-init-1", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, }, }, { Name: "restartable-init-2", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, { Name: "restartable-init-3", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("3"), }, }, }, }, containers: []v1.Container{ { Name: "container-1", Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, }, }, }, }, { description: "multiple restartable and regular init containers", expectedLimits: v1.ResourceList{ // init-2 requires 5 + the previously running restartable init // containers(1+2) = 8, the restartable init container that starts // after it doesn't count v1.ResourceCPU: resource.MustParse("8"), }, initContainers: []v1.Container{ { Name: "init-1", Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("5"), }, }, }, { Name: "restartable-init-1", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, }, }, { Name: "restartable-init-2", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("2"), }, }, }, { Name: "init-2", Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("5"), }, }, }, { Name: "restartable-init-3", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("3"), }, }, }, }, containers: []v1.Container{ { Name: "container-1", Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), }, }, }, }, }, { description: "restartable-init, init and regular", expectedLimits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("210"), }, initContainers: []v1.Container{ { Name: "restartable-init-1", RestartPolicy: &restartAlways, Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("10"), }, }, }, { Name: "init-1", Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("200"), }, }, }, }, containers: []v1.Container{ { Name: "container-1", Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("100"), }, }, }, }, }, { description: "pod scaled up", expectedLimits: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("2Gi"), }, options: PodResourcesOptions{UseStatusResources: true}, containers: []v1.Container{ { Name: "container-1", Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("2Gi"), }, }, }, }, containerStatuses: []v1.ContainerStatus{ { Name: "container-1", Resources: &v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("1Gi"), }, }, }, }, }, { description: "pod scaled down", expectedLimits: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("2Gi"), }, options: PodResourcesOptions{UseStatusResources: true}, containers: []v1.Container{ { Name: "container-1", Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("1Gi"), }, }, }, }, containerStatuses: []v1.ContainerStatus{ { Name: "container-1", Resources: &v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("2Gi"), }, }, }, }, }, { description: "pod scaled down, don't use status", expectedLimits: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("1Gi"), }, options: PodResourcesOptions{UseStatusResources: false}, containers: []v1.Container{ { Name: "container-1", Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("1Gi"), }, }, }, }, containerStatuses: []v1.ContainerStatus{ { Name: "container-1", Resources: &v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("2Gi"), }, }, }, }, }, } for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { p := &v1.Pod{ Spec: v1.PodSpec{ Containers: tc.containers, InitContainers: tc.initContainers, Overhead: tc.overhead, }, Status: v1.PodStatus{ ContainerStatuses: tc.containerStatuses, }, } limits := PodLimits(p, tc.options) if diff := cmp.Diff(limits, tc.expectedLimits); diff != "" { t.Errorf("got=%v, want=%v, diff=%s", limits, tc.expectedLimits, diff) } }) } } func TestIsPodLevelResourcesSet(t *testing.T) { testCases := []struct { name string podResources *v1.ResourceRequirements expected bool }{ { name: "nil resources struct", expected: false, }, { name: "empty resources struct", podResources: &v1.ResourceRequirements{}, expected: false, }, { name: "only unsupported resource requests set", podResources: &v1.ResourceRequirements{ Requests: v1.ResourceList{v1.ResourceEphemeralStorage: resource.MustParse("1Mi")}, }, expected: false, }, { name: "only unsupported resource limits set", podResources: &v1.ResourceRequirements{ Limits: v1.ResourceList{v1.ResourceEphemeralStorage: resource.MustParse("1Mi")}, }, expected: false, }, { name: "unsupported and suported resources requests set", podResources: &v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceEphemeralStorage: resource.MustParse("1Mi"), v1.ResourceCPU: resource.MustParse("1m"), }, }, expected: true, }, { name: "unsupported and suported resources limits set", podResources: &v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceEphemeralStorage: resource.MustParse("1Mi"), v1.ResourceCPU: resource.MustParse("1m"), }, }, expected: true, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { testPod := &v1.Pod{Spec: v1.PodSpec{Resources: tc.podResources}} if got := IsPodLevelResourcesSet(testPod); got != tc.expected { t.Errorf("got=%t, want=%t", got, tc.expected) } }) } } func TestPodLevelResourceRequests(t *testing.T) { restartAlways := v1.ContainerRestartPolicyAlways testCases := []struct { name string opts PodResourcesOptions podResources v1.ResourceRequirements overhead v1.ResourceList initContainers []v1.Container containers []v1.Container expectedRequests v1.ResourceList }{ { name: "nil", expectedRequests: v1.ResourceList{}, }, { name: "pod level memory resource with SkipPodLevelResources true", podResources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("2Mi")}}, opts: PodResourcesOptions{SkipPodLevelResources: true}, expectedRequests: v1.ResourceList{}, }, { name: "pod level memory resource with SkipPodLevelResources false", podResources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("2Mi")}}, opts: PodResourcesOptions{SkipPodLevelResources: false}, expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("2Mi")}, }, { name: "pod level memory and container level cpu resources with SkipPodLevelResources false", podResources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("2Mi")}}, containers: []v1.Container{ { Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2m")}}, }, }, opts: PodResourcesOptions{SkipPodLevelResources: false}, expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("2Mi"), v1.ResourceCPU: resource.MustParse("2m")}, }, { name: "pod level unsupported resources set at both pod-level and container-level with SkipPodLevelResources false", podResources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("2Mi")}}, containers: []v1.Container{ { Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("3Mi")}}, }, }, opts: PodResourcesOptions{SkipPodLevelResources: false}, expectedRequests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("3Mi")}, }, { name: "pod level unsupported resources set at pod-level with SkipPodLevelResources false", podResources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("2Mi")}}, containers: []v1.Container{ { Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("3Mi")}}, }, }, opts: PodResourcesOptions{SkipPodLevelResources: false}, expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("3Mi")}, }, { name: "only container level resources set with SkipPodLevelResources false", containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("3Mi"), v1.ResourceCPU: resource.MustParse("2m"), }, }, }, }, opts: PodResourcesOptions{SkipPodLevelResources: false}, expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("3Mi"), v1.ResourceCPU: resource.MustParse("2m")}, }, { name: "both container-level and pod-level resources set with SkipPodLevelResources false", podResources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("6Mi"), v1.ResourceCPU: resource.MustParse("8m"), }, }, containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("3Mi"), v1.ResourceCPU: resource.MustParse("2m"), }, }, }, }, opts: PodResourcesOptions{SkipPodLevelResources: false}, expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("6Mi"), v1.ResourceCPU: resource.MustParse("8m")}, }, { name: "container-level resources and init container set with SkipPodLevelResources false", containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("3Mi"), v1.ResourceCPU: resource.MustParse("2m"), }, }, }, }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("5Mi"), v1.ResourceCPU: resource.MustParse("4m"), }, }, }, }, opts: PodResourcesOptions{SkipPodLevelResources: false}, expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("5Mi"), v1.ResourceCPU: resource.MustParse("4m")}, }, { name: "container-level resources and init container set with SkipPodLevelResources false", containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("3Mi"), v1.ResourceCPU: resource.MustParse("2m"), }, }, }, }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("5Mi"), v1.ResourceCPU: resource.MustParse("4m"), }, }, }, }, opts: PodResourcesOptions{SkipPodLevelResources: true}, expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("5Mi"), v1.ResourceCPU: resource.MustParse("4m")}, }, { name: "container-level resources and sidecar container set with SkipPodLevelResources false", containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("3Mi"), v1.ResourceCPU: resource.MustParse("2m"), }, }, }, }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("5Mi"), v1.ResourceCPU: resource.MustParse("4m"), }, }, RestartPolicy: &restartAlways, }, }, opts: PodResourcesOptions{SkipPodLevelResources: false}, expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("8Mi"), v1.ResourceCPU: resource.MustParse("6m")}, }, { name: "container-level resources, init and sidecar container set with SkipPodLevelResources false", containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("3Mi"), v1.ResourceCPU: resource.MustParse("2m"), }, }, }, }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("5Mi"), v1.ResourceCPU: resource.MustParse("4m"), }, }, RestartPolicy: &restartAlways, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("6Mi"), v1.ResourceCPU: resource.MustParse("8m"), }, }, }, }, opts: PodResourcesOptions{SkipPodLevelResources: false}, expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("11Mi"), v1.ResourceCPU: resource.MustParse("12m")}, }, { name: "pod-level resources, container-level resources, init and sidecar container set with SkipPodLevelResources false", podResources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("15Mi"), v1.ResourceCPU: resource.MustParse("18m"), }, }, containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("3Mi"), v1.ResourceCPU: resource.MustParse("2m"), }, }, }, }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("5Mi"), v1.ResourceCPU: resource.MustParse("4m"), }, }, RestartPolicy: &restartAlways, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("6Mi"), v1.ResourceCPU: resource.MustParse("8m"), }, }, }, }, opts: PodResourcesOptions{SkipPodLevelResources: false}, expectedRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("15Mi"), v1.ResourceCPU: resource.MustParse("18m")}, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { podReqs := PodRequests(getPodLevelResourcesPod(tc.podResources, tc.overhead, tc.containers, tc.initContainers), tc.opts) if diff := cmp.Diff(podReqs, tc.expectedRequests); diff != "" { t.Errorf("got=%v, want=%v, diff=%s", podReqs, tc.expectedRequests, diff) } }) } } func TestAggregateContainerRequestsAndLimits(t *testing.T) { restartAlways := v1.ContainerRestartPolicyAlways cases := []struct { containers []v1.Container initContainers []v1.Container name string expectedRequests v1.ResourceList expectedLimits v1.ResourceList }{ { name: "one container with limits", containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, }, }, }, expectedRequests: v1.ResourceList{}, expectedLimits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("9"), }, }, { name: "two containers with limits", containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, }, }, { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, }, }, }, expectedRequests: v1.ResourceList{}, expectedLimits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("18"), }, }, { name: "one container with requests", containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, }, }, }, expectedRequests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("9"), }, expectedLimits: v1.ResourceList{}, }, { name: "two containers with requests", containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, }, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, }, }, }, expectedRequests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("18"), }, expectedLimits: v1.ResourceList{}, }, { name: "regular and init containers with requests", containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, }, }, }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, }, }, }, expectedRequests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("9"), }, expectedLimits: v1.ResourceList{}, }, { name: "regular, init and sidecar containers with requests", containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, }, }, }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("8")}, }, RestartPolicy: &restartAlways, }, { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("6")}, }, }, }, expectedRequests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("17"), }, expectedLimits: v1.ResourceList{}, }, { name: "regular and init containers with limits", containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, }, }, }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, }, }, }, expectedRequests: v1.ResourceList{}, expectedLimits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("9"), }, }, { name: "regular, init and sidecar containers with limits", containers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("9")}, }, }, }, initContainers: []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("8")}, }, RestartPolicy: &restartAlways, }, { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{v1.ResourceName(v1.ResourceCPU): resource.MustParse("6")}, }, }, }, expectedRequests: v1.ResourceList{}, expectedLimits: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("17"), }, }, } for idx, tc := range cases { testPod := &v1.Pod{Spec: v1.PodSpec{Containers: tc.containers, InitContainers: tc.initContainers}} resRequests := AggregateContainerRequests(testPod, PodResourcesOptions{}) resLimits := AggregateContainerLimits(testPod, PodResourcesOptions{}) if !equality.Semantic.DeepEqual(tc.expectedRequests, resRequests) { t.Errorf("test case failure[%d]: %v, requests:\n expected:\t%v\ngot\t\t%v", idx, tc.name, tc.expectedRequests, resRequests) } if !equality.Semantic.DeepEqual(tc.expectedLimits, resLimits) { t.Errorf("test case failure[%d]: %v, limits:\n expected:\t%v\ngot\t\t%v", idx, tc.name, tc.expectedLimits, resLimits) } } } type podResources struct { cpuRequest, cpuLimit, memoryRequest, memoryLimit, cpuOverhead, memoryOverhead string } func getPodLevelResourcesPod(podResources v1.ResourceRequirements, overhead v1.ResourceList, containers, initContainers []v1.Container) *v1.Pod { return &v1.Pod{ Spec: v1.PodSpec{ Resources: &podResources, Containers: containers, InitContainers: initContainers, Overhead: overhead, }, } } // TODO(ndixita): refactor to re-use getPodResourcesPod() func getPod(cname string, resources podResources) *v1.Pod { r := v1.ResourceRequirements{ Limits: make(v1.ResourceList), Requests: make(v1.ResourceList), } overhead := make(v1.ResourceList) if resources.cpuLimit != "" { r.Limits[v1.ResourceCPU] = resource.MustParse(resources.cpuLimit) } if resources.memoryLimit != "" { r.Limits[v1.ResourceMemory] = resource.MustParse(resources.memoryLimit) } if resources.cpuRequest != "" { r.Requests[v1.ResourceCPU] = resource.MustParse(resources.cpuRequest) } if resources.memoryRequest != "" { r.Requests[v1.ResourceMemory] = resource.MustParse(resources.memoryRequest) } if resources.cpuOverhead != "" { overhead[v1.ResourceCPU] = resource.MustParse(resources.cpuOverhead) } if resources.memoryOverhead != "" { overhead[v1.ResourceMemory] = resource.MustParse(resources.memoryOverhead) } return &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: cname, Resources: r, }, }, InitContainers: []v1.Container{ { Name: "init-" + cname, Resources: r, }, }, Overhead: overhead, }, } } kubernetes-component-helpers-b5afa51/scheduling/000077500000000000000000000000001476422250100221375ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/scheduling/OWNERS000066400000000000000000000002261476422250100230770ustar00rootroot00000000000000# See the OWNERS docs at https://go.k8s.io/owners approvers: - sig-scheduling-maintainers reviewers: - sig-scheduling labels: - sig/scheduling kubernetes-component-helpers-b5afa51/scheduling/corev1/000077500000000000000000000000001476422250100233365ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/scheduling/corev1/doc.go000066400000000000000000000016651476422250100244420ustar00rootroot00000000000000/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package corev1 defines functions which should satisfy one of the following: // // - Be used by more than one core component (kube-scheduler, kubelet, kube-apiserver, etc.) // - Be used by a core component and another kubernetes project (cluster-autoscaler, descheduler) // // And be a scheduling feature. package corev1 // import "k8s.io/component-helpers/scheduling/corev1" kubernetes-component-helpers-b5afa51/scheduling/corev1/helpers.go000066400000000000000000000063221476422250100253320ustar00rootroot00000000000000/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package corev1 import ( "encoding/json" v1 "k8s.io/api/core/v1" "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" ) // PodPriority returns priority of the given pod. func PodPriority(pod *v1.Pod) int32 { if pod.Spec.Priority != nil { return *pod.Spec.Priority } // When priority of a running pod is nil, it means it was created at a time // that there was no global default priority class and the priority class // name of the pod was empty. So, we resolve to the static default priority. return 0 } // MatchNodeSelectorTerms checks whether the node labels and fields match node selector terms in ORed; // nil or empty term matches no objects. func MatchNodeSelectorTerms( node *v1.Node, nodeSelector *v1.NodeSelector, ) (bool, error) { if node == nil { return false, nil } return nodeaffinity.NewLazyErrorNodeSelector(nodeSelector).Match(node) } // GetAvoidPodsFromNodeAnnotations scans the list of annotations and // returns the pods that needs to be avoided for this node from scheduling func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (v1.AvoidPods, error) { var avoidPods v1.AvoidPods if len(annotations) > 0 && annotations[v1.PreferAvoidPodsAnnotationKey] != "" { err := json.Unmarshal([]byte(annotations[v1.PreferAvoidPodsAnnotationKey]), &avoidPods) if err != nil { return avoidPods, err } } return avoidPods, nil } // TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations. func TolerationsTolerateTaint(tolerations []v1.Toleration, taint *v1.Taint) bool { for i := range tolerations { if tolerations[i].ToleratesTaint(taint) { return true } } return false } type taintsFilterFunc func(*v1.Taint) bool // FindMatchingUntoleratedTaint checks if the given tolerations tolerates // all the filtered taints, and returns the first taint without a toleration // Returns true if there is an untolerated taint // Returns false if all taints are tolerated func FindMatchingUntoleratedTaint(taints []v1.Taint, tolerations []v1.Toleration, inclusionFilter taintsFilterFunc) (v1.Taint, bool) { filteredTaints := getFilteredTaints(taints, inclusionFilter) for _, taint := range filteredTaints { if !TolerationsTolerateTaint(tolerations, &taint) { return taint, true } } return v1.Taint{}, false } // getFilteredTaints returns a list of taints satisfying the filter predicate func getFilteredTaints(taints []v1.Taint, inclusionFilter taintsFilterFunc) []v1.Taint { if inclusionFilter == nil { return taints } filteredTaints := []v1.Taint{} for _, taint := range taints { if !inclusionFilter(&taint) { continue } filteredTaints = append(filteredTaints, taint) } return filteredTaints } kubernetes-component-helpers-b5afa51/scheduling/corev1/helpers_test.go000066400000000000000000000517301476422250100263740ustar00rootroot00000000000000/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package corev1 import ( "reflect" "testing" v1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // TestPodPriority tests PodPriority function. func TestPodPriority(t *testing.T) { p := int32(20) tests := []struct { name string pod *v1.Pod expectedPriority int32 }{ { name: "no priority pod resolves to static default priority", pod: &v1.Pod{ Spec: v1.PodSpec{Containers: []v1.Container{ {Name: "container", Image: "image"}}, }, }, expectedPriority: 0, }, { name: "pod with priority resolves correctly", pod: &v1.Pod{ Spec: v1.PodSpec{Containers: []v1.Container{ {Name: "container", Image: "image"}}, Priority: &p, }, }, expectedPriority: p, }, } for _, test := range tests { if PodPriority(test.pod) != test.expectedPriority { t.Errorf("expected pod priority: %v, got %v", test.expectedPriority, PodPriority(test.pod)) } } } func TestMatchNodeSelectorTerms(t *testing.T) { type args struct { nodeSelector *v1.NodeSelector node *v1.Node } tests := []struct { name string args args want bool }{ { name: "nil terms", args: args{ nodeSelector: nil, node: nil, }, want: false, }, { name: "node label matches matchExpressions terms", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"label_1": "label_1_val"}}}, }, want: true, }, { name: "node field matches matchFields terms", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1"}}, }, want: true, }, { name: "invalid node field requirement", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1", "host_2"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1"}}, }, want: false, }, { name: "fieldSelectorTerm with node labels", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "not_host_1", Labels: map[string]string{ "metadata.name": "host_1", }}}, }, want: false, }, { name: "labelSelectorTerm with node fields", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1"}}, }, want: false, }, { name: "labelSelectorTerm and fieldSelectorTerm was set, but only node fields", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1"}}, }, want: false, }, { name: "labelSelectorTerm and fieldSelectorTerm was set, both node fields and labels (both matched)", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }}, }}, }, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1", Labels: map[string]string{ "label_1": "label_1_val", }}}, }, want: true, }, { name: "labelSelectorTerm and fieldSelectorTerm was set, both node fields and labels (one mismatched)", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1", Labels: map[string]string{ "label_1": "label_1_val-failed", }}}, }, want: false, }, { name: "multi-selector was set, both node fields and labels (one mismatched)", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val"}, }}, }, { MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1", Labels: map[string]string{ "label_1": "label_1_val-failed", }}}, }, want: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got, _ := MatchNodeSelectorTerms(tt.args.node, tt.args.nodeSelector); got != tt.want { t.Errorf("MatchNodeSelectorTermsORed() = %v, want %v", got, tt.want) } }) } } // TestMatchNodeSelectorTermsStateless ensures MatchNodeSelectorTerms() // is invoked in a "stateless" manner, i.e. nodeSelector should NOT // be deeply modified after invoking func TestMatchNodeSelectorTermsStateless(t *testing.T) { type args struct { nodeSelector *v1.NodeSelector node *v1.Node } tests := []struct { name string args args want *v1.NodeSelector }{ { name: "nil terms", args: args{ nodeSelector: nil, node: nil, }, want: nil, }, { name: "nodeLabels: preordered matchExpressions and nil matchFields", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val", "label_2_val"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"label_1": "label_1_val"}}}, }, want: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val", "label_2_val"}, }}, }, }}, }, { name: "nodeLabels: unordered matchExpressions and nil matchFields", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_2_val", "label_1_val"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"label_1": "label_1_val"}}}, }, want: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_2_val", "label_1_val"}, }}, }, }}, }, { name: "nodeFields: nil matchExpressions and preordered matchFields", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1", "host_2"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1"}}, }, want: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1", "host_2"}, }}, }, }}, }, { name: "nodeFields: nil matchExpressions and unordered matchFields", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_2", "host_1"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1"}}, }, want: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_2", "host_1"}, }}, }, }}, }, { name: "nodeLabels and nodeFields: ordered matchExpressions and ordered matchFields", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val", "label_2_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1", "host_2"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1", Labels: map[string]string{ "label_1": "label_1_val", }}}, }, want: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val", "label_2_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1", "host_2"}, }}, }, }}, }, { name: "nodeLabels and nodeFields: ordered matchExpressions and unordered matchFields", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val", "label_2_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_2", "host_1"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1", Labels: map[string]string{ "label_1": "label_1_val", }}}, }, want: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val", "label_2_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_2", "host_1"}, }}, }, }}, }, { name: "nodeLabels and nodeFields: unordered matchExpressions and ordered matchFields", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_2_val", "label_1_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1", "host_2"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1", Labels: map[string]string{ "label_1": "label_1_val", }}}, }, want: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_2_val", "label_1_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1", "host_2"}, }}, }, }}, }, { name: "nodeLabels and nodeFields: unordered matchExpressions and unordered matchFields", args: args{ nodeSelector: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_2_val", "label_1_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_2", "host_1"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1", Labels: map[string]string{ "label_1": "label_1_val", }}}, }, want: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_2_val", "label_1_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_2", "host_1"}, }}, }, }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { _, _ = MatchNodeSelectorTerms(tt.args.node, tt.args.nodeSelector) if !apiequality.Semantic.DeepEqual(tt.args.nodeSelector, tt.want) { // fail when tt.args.nodeSelector is deeply modified t.Errorf("MatchNodeSelectorTerms() got = %v, want %v", tt.args.nodeSelector, tt.want) } }) } } func TestGetAvoidPodsFromNode(t *testing.T) { controllerFlag := true testCases := []struct { node *v1.Node expectValue v1.AvoidPods expectErr bool }{ { node: &v1.Node{}, expectValue: v1.AvoidPods{}, expectErr: false, }, { node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ v1.PreferAvoidPodsAnnotationKey: ` { "preferAvoidPods": [ { "podSignature": { "podController": { "apiVersion": "v1", "kind": "ReplicationController", "name": "foo", "uid": "abcdef123456", "controller": true } }, "reason": "some reason", "message": "some message" } ] }`, }, }, }, expectValue: v1.AvoidPods{ PreferAvoidPods: []v1.PreferAvoidPodsEntry{ { PodSignature: v1.PodSignature{ PodController: &metav1.OwnerReference{ APIVersion: "v1", Kind: "ReplicationController", Name: "foo", UID: "abcdef123456", Controller: &controllerFlag, }, }, Reason: "some reason", Message: "some message", }, }, }, expectErr: false, }, { node: &v1.Node{ // Missing end symbol of "podController" and "podSignature" ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ v1.PreferAvoidPodsAnnotationKey: ` { "preferAvoidPods": [ { "podSignature": { "podController": { "kind": "ReplicationController", "apiVersion": "v1" "reason": "some reason", "message": "some message" } ] }`, }, }, }, expectValue: v1.AvoidPods{}, expectErr: true, }, } for i, tc := range testCases { v, err := GetAvoidPodsFromNodeAnnotations(tc.node.Annotations) if err == nil && tc.expectErr { t.Errorf("[%v]expected error but got none.", i) } if err != nil && !tc.expectErr { t.Errorf("[%v]did not expect error but got: %v", i, err) } if !reflect.DeepEqual(tc.expectValue, v) { t.Errorf("[%v]expect value %v but got %v with %v", i, tc.expectValue, v, v.PreferAvoidPods[0].PodSignature.PodController.Controller) } } } func TestFindMatchingUntoleratedTaint(t *testing.T) { testCases := []struct { description string tolerations []v1.Toleration taints []v1.Taint applyFilter taintsFilterFunc expectTolerated bool }{ { description: "empty tolerations tolerate empty taints", tolerations: []v1.Toleration{}, taints: []v1.Taint{}, applyFilter: func(t *v1.Taint) bool { return true }, expectTolerated: true, }, { description: "non-empty tolerations tolerate empty taints", tolerations: []v1.Toleration{ { Key: "foo", Operator: "Exists", Effect: v1.TaintEffectNoSchedule, }, }, taints: []v1.Taint{}, applyFilter: func(t *v1.Taint) bool { return true }, expectTolerated: true, }, { description: "tolerations match all taints, expect tolerated", tolerations: []v1.Toleration{ { Key: "foo", Operator: "Exists", Effect: v1.TaintEffectNoSchedule, }, }, taints: []v1.Taint{ { Key: "foo", Effect: v1.TaintEffectNoSchedule, }, }, applyFilter: func(t *v1.Taint) bool { return true }, expectTolerated: true, }, { description: "tolerations don't match taints, but no taints apply to the filter, expect tolerated", tolerations: []v1.Toleration{ { Key: "foo", Operator: "Exists", Effect: v1.TaintEffectNoSchedule, }, }, taints: []v1.Taint{ { Key: "bar", Effect: v1.TaintEffectNoSchedule, }, }, applyFilter: func(t *v1.Taint) bool { return false }, expectTolerated: true, }, { description: "no filterFunc indicated, means all taints apply to the filter, tolerations don't match taints, expect untolerated", tolerations: []v1.Toleration{ { Key: "foo", Operator: "Exists", Effect: v1.TaintEffectNoSchedule, }, }, taints: []v1.Taint{ { Key: "bar", Effect: v1.TaintEffectNoSchedule, }, }, applyFilter: nil, expectTolerated: false, }, { description: "tolerations match taints, expect tolerated", tolerations: []v1.Toleration{ { Key: "foo", Operator: "Exists", Effect: v1.TaintEffectNoExecute, }, }, taints: []v1.Taint{ { Key: "foo", Effect: v1.TaintEffectNoExecute, }, { Key: "bar", Effect: v1.TaintEffectNoSchedule, }, }, applyFilter: func(t *v1.Taint) bool { return t.Effect == v1.TaintEffectNoExecute }, expectTolerated: true, }, } for _, tc := range testCases { _, untolerated := FindMatchingUntoleratedTaint(tc.taints, tc.tolerations, tc.applyFilter) if tc.expectTolerated != !untolerated { filteredTaints := []v1.Taint{} for _, taint := range tc.taints { if tc.applyFilter != nil && !tc.applyFilter(&taint) { continue } filteredTaints = append(filteredTaints, taint) } t.Errorf("[%s] expect tolerations %+v tolerate filtered taints %+v in taints %+v", tc.description, tc.tolerations, filteredTaints, tc.taints) } } } kubernetes-component-helpers-b5afa51/scheduling/corev1/nodeaffinity/000077500000000000000000000000001476422250100260155ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/scheduling/corev1/nodeaffinity/nodeaffinity.go000066400000000000000000000240241476422250100310250ustar00rootroot00000000000000/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package nodeaffinity import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/validation/field" ) // NodeSelector is a runtime representation of v1.NodeSelector. type NodeSelector struct { lazy LazyErrorNodeSelector } // LazyErrorNodeSelector is a runtime representation of v1.NodeSelector that // only reports parse errors when no terms match. type LazyErrorNodeSelector struct { terms []nodeSelectorTerm } // NewNodeSelector returns a NodeSelector or aggregate parsing errors found. func NewNodeSelector(ns *v1.NodeSelector, opts ...field.PathOption) (*NodeSelector, error) { lazy := NewLazyErrorNodeSelector(ns, opts...) var errs []error for _, term := range lazy.terms { if len(term.parseErrs) > 0 { errs = append(errs, term.parseErrs...) } } if len(errs) != 0 { return nil, errors.Flatten(errors.NewAggregate(errs)) } return &NodeSelector{lazy: *lazy}, nil } // NewLazyErrorNodeSelector creates a NodeSelector that only reports parse // errors when no terms match. func NewLazyErrorNodeSelector(ns *v1.NodeSelector, opts ...field.PathOption) *LazyErrorNodeSelector { p := field.ToPath(opts...) parsedTerms := make([]nodeSelectorTerm, 0, len(ns.NodeSelectorTerms)) path := p.Child("nodeSelectorTerms") for i, term := range ns.NodeSelectorTerms { // nil or empty term selects no objects if isEmptyNodeSelectorTerm(&term) { continue } p := path.Index(i) parsedTerms = append(parsedTerms, newNodeSelectorTerm(&term, p)) } return &LazyErrorNodeSelector{ terms: parsedTerms, } } // Match checks whether the node labels and fields match the selector terms, ORed; // nil or empty term matches no objects. func (ns *NodeSelector) Match(node *v1.Node) bool { // parse errors are reported in NewNodeSelector. match, _ := ns.lazy.Match(node) return match } // Match checks whether the node labels and fields match the selector terms, ORed; // nil or empty term matches no objects. // Parse errors are only returned if no terms matched. func (ns *LazyErrorNodeSelector) Match(node *v1.Node) (bool, error) { if node == nil { return false, nil } nodeLabels := labels.Set(node.Labels) nodeFields := extractNodeFields(node) var errs []error for _, term := range ns.terms { match, tErrs := term.match(nodeLabels, nodeFields) if len(tErrs) > 0 { errs = append(errs, tErrs...) continue } if match { return true, nil } } return false, errors.Flatten(errors.NewAggregate(errs)) } // PreferredSchedulingTerms is a runtime representation of []v1.PreferredSchedulingTerms. type PreferredSchedulingTerms struct { terms []preferredSchedulingTerm } // NewPreferredSchedulingTerms returns a PreferredSchedulingTerms or all the parsing errors found. // If a v1.PreferredSchedulingTerm has a 0 weight, its parsing is skipped. func NewPreferredSchedulingTerms(terms []v1.PreferredSchedulingTerm, opts ...field.PathOption) (*PreferredSchedulingTerms, error) { p := field.ToPath(opts...) var errs []error parsedTerms := make([]preferredSchedulingTerm, 0, len(terms)) for i, term := range terms { path := p.Index(i) if term.Weight == 0 || isEmptyNodeSelectorTerm(&term.Preference) { continue } parsedTerm := preferredSchedulingTerm{ nodeSelectorTerm: newNodeSelectorTerm(&term.Preference, path), weight: int(term.Weight), } if len(parsedTerm.parseErrs) > 0 { errs = append(errs, parsedTerm.parseErrs...) } else { parsedTerms = append(parsedTerms, parsedTerm) } } if len(errs) != 0 { return nil, errors.Flatten(errors.NewAggregate(errs)) } return &PreferredSchedulingTerms{terms: parsedTerms}, nil } // Score returns a score for a Node: the sum of the weights of the terms that // match the Node. func (t *PreferredSchedulingTerms) Score(node *v1.Node) int64 { var score int64 nodeLabels := labels.Set(node.Labels) nodeFields := extractNodeFields(node) for _, term := range t.terms { // parse errors are reported in NewPreferredSchedulingTerms. if ok, _ := term.match(nodeLabels, nodeFields); ok { score += int64(term.weight) } } return score } func isEmptyNodeSelectorTerm(term *v1.NodeSelectorTerm) bool { return len(term.MatchExpressions) == 0 && len(term.MatchFields) == 0 } func extractNodeFields(n *v1.Node) fields.Set { f := make(fields.Set) if len(n.Name) > 0 { f["metadata.name"] = n.Name } return f } type nodeSelectorTerm struct { matchLabels labels.Selector matchFields fields.Selector parseErrs []error } func newNodeSelectorTerm(term *v1.NodeSelectorTerm, path *field.Path) nodeSelectorTerm { var parsedTerm nodeSelectorTerm var errs []error if len(term.MatchExpressions) != 0 { p := path.Child("matchExpressions") parsedTerm.matchLabels, errs = nodeSelectorRequirementsAsSelector(term.MatchExpressions, p) if errs != nil { parsedTerm.parseErrs = append(parsedTerm.parseErrs, errs...) } } if len(term.MatchFields) != 0 { p := path.Child("matchFields") parsedTerm.matchFields, errs = nodeSelectorRequirementsAsFieldSelector(term.MatchFields, p) if errs != nil { parsedTerm.parseErrs = append(parsedTerm.parseErrs, errs...) } } return parsedTerm } func (t *nodeSelectorTerm) match(nodeLabels labels.Set, nodeFields fields.Set) (bool, []error) { if t.parseErrs != nil { return false, t.parseErrs } if t.matchLabels != nil && !t.matchLabels.Matches(nodeLabels) { return false, nil } if t.matchFields != nil && len(nodeFields) > 0 && !t.matchFields.Matches(nodeFields) { return false, nil } return true, nil } var validSelectorOperators = []v1.NodeSelectorOperator{ v1.NodeSelectorOpIn, v1.NodeSelectorOpNotIn, v1.NodeSelectorOpExists, v1.NodeSelectorOpDoesNotExist, v1.NodeSelectorOpGt, v1.NodeSelectorOpLt, } // nodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements // labels.Selector. func nodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement, path *field.Path) (labels.Selector, []error) { if len(nsm) == 0 { return labels.Nothing(), nil } var errs []error selector := labels.NewSelector() for i, expr := range nsm { p := path.Index(i) var op selection.Operator switch expr.Operator { case v1.NodeSelectorOpIn: op = selection.In case v1.NodeSelectorOpNotIn: op = selection.NotIn case v1.NodeSelectorOpExists: op = selection.Exists case v1.NodeSelectorOpDoesNotExist: op = selection.DoesNotExist case v1.NodeSelectorOpGt: op = selection.GreaterThan case v1.NodeSelectorOpLt: op = selection.LessThan default: errs = append(errs, field.NotSupported(p.Child("operator"), expr.Operator, validSelectorOperators)) continue } r, err := labels.NewRequirement(expr.Key, op, expr.Values, field.WithPath(p)) if err != nil { errs = append(errs, err) } else { selector = selector.Add(*r) } } if len(errs) != 0 { return nil, errs } return selector, nil } var validFieldSelectorOperators = []v1.NodeSelectorOperator{ v1.NodeSelectorOpIn, v1.NodeSelectorOpNotIn, } // nodeSelectorRequirementsAsFieldSelector converts the []NodeSelectorRequirement core type into a struct that implements // fields.Selector. func nodeSelectorRequirementsAsFieldSelector(nsr []v1.NodeSelectorRequirement, path *field.Path) (fields.Selector, []error) { if len(nsr) == 0 { return fields.Nothing(), nil } var errs []error var selectors []fields.Selector for i, expr := range nsr { p := path.Index(i) switch expr.Operator { case v1.NodeSelectorOpIn: if len(expr.Values) != 1 { errs = append(errs, field.Invalid(p.Child("values"), expr.Values, "must have one element")) } else { selectors = append(selectors, fields.OneTermEqualSelector(expr.Key, expr.Values[0])) } case v1.NodeSelectorOpNotIn: if len(expr.Values) != 1 { errs = append(errs, field.Invalid(p.Child("values"), expr.Values, "must have one element")) } else { selectors = append(selectors, fields.OneTermNotEqualSelector(expr.Key, expr.Values[0])) } default: errs = append(errs, field.NotSupported(p.Child("operator"), expr.Operator, validFieldSelectorOperators)) } } if len(errs) != 0 { return nil, errs } return fields.AndSelectors(selectors...), nil } type preferredSchedulingTerm struct { nodeSelectorTerm weight int } type RequiredNodeAffinity struct { labelSelector labels.Selector nodeSelector *LazyErrorNodeSelector } // GetRequiredNodeAffinity returns the parsing result of pod's nodeSelector and nodeAffinity. func GetRequiredNodeAffinity(pod *v1.Pod) RequiredNodeAffinity { var selector labels.Selector if len(pod.Spec.NodeSelector) > 0 { selector = labels.SelectorFromSet(pod.Spec.NodeSelector) } // Use LazyErrorNodeSelector for backwards compatibility of parsing errors. var affinity *LazyErrorNodeSelector if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { affinity = NewLazyErrorNodeSelector(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution) } return RequiredNodeAffinity{labelSelector: selector, nodeSelector: affinity} } // Match checks whether the pod is schedulable onto nodes according to // the requirements in both nodeSelector and nodeAffinity. func (s RequiredNodeAffinity) Match(node *v1.Node) (bool, error) { if s.labelSelector != nil { if !s.labelSelector.Matches(labels.Set(node.Labels)) { return false, nil } } if s.nodeSelector != nil { return s.nodeSelector.Match(node) } return true, nil } kubernetes-component-helpers-b5afa51/scheduling/corev1/nodeaffinity/nodeaffinity_test.go000066400000000000000000000656461476422250100321030ustar00rootroot00000000000000/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package nodeaffinity import ( "reflect" "testing" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/validation/field" ) var ( ignoreBadValue = cmpopts.IgnoreFields(field.Error{}, "BadValue") ) func TestNodeSelectorMatch(t *testing.T) { tests := []struct { name string nodeSelector v1.NodeSelector node *v1.Node wantErr error wantMatch bool }{ { name: "nil node", wantMatch: false, }, { name: "invalid field selector and label selector", nodeSelector: v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1", "host_2"}, }}, }, { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }}, }, { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "invalid key", Operator: v1.NodeSelectorOpIn, Values: []string{"label_value"}, }}, }, }}, wantErr: field.ErrorList{ &field.Error{ Type: field.ErrorTypeInvalid, Field: "nodeSelectorTerms[0].matchFields[0].values", Detail: "must have one element", }, &field.Error{ Type: field.ErrorTypeInvalid, Field: "nodeSelectorTerms[2].matchExpressions[0].key", Detail: `name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')`, }, }.ToAggregate(), }, { name: "node matches field selector, but not labels", nodeSelector: v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1"}}, }, { name: "node matches field selector and label selector", nodeSelector: v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1", Labels: map[string]string{"label_1": "label_1_val"}}}, wantMatch: true, }, { name: "second term matches", nodeSelector: v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val"}, }}, }, { MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }}, }, }}, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1"}}, wantMatch: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { nodeSelector, err := NewNodeSelector(&tt.nodeSelector) if diff := cmp.Diff(tt.wantErr, err, ignoreBadValue); diff != "" { t.Errorf("NewNodeSelector returned unexpected error (-want,+got):\n%s", diff) } if tt.wantErr != nil { return } match := nodeSelector.Match(tt.node) if match != tt.wantMatch { t.Errorf("NodeSelector.Match returned %t, want %t", match, tt.wantMatch) } }) } } func TestPreferredSchedulingTermsScore(t *testing.T) { tests := []struct { name string prefSchedTerms []v1.PreferredSchedulingTerm node *v1.Node wantErr error wantScore int64 }{ { name: "invalid field selector and label selector", prefSchedTerms: []v1.PreferredSchedulingTerm{ { Weight: 1, Preference: v1.NodeSelectorTerm{ MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1", "host_2"}, }}, }, }, { Weight: 1, Preference: v1.NodeSelectorTerm{ MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val"}, }}, MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }}, }, }, { Weight: 1, Preference: v1.NodeSelectorTerm{ MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "invalid key", Operator: v1.NodeSelectorOpIn, Values: []string{"label_value"}, }}, }, }, }, wantErr: field.ErrorList{ &field.Error{ Type: field.ErrorTypeInvalid, Field: "[0].matchFields[0].values", Detail: "must have one element", }, &field.Error{ Type: field.ErrorTypeInvalid, Field: "[2].matchExpressions[0].key", Detail: `name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')`, }, }.ToAggregate(), }, { name: "invalid field selector but no weight, error not reported", prefSchedTerms: []v1.PreferredSchedulingTerm{ { Weight: 0, Preference: v1.NodeSelectorTerm{ MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1", "host_2"}, }}, }, }, }, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1"}}, }, { name: "first and third term match", prefSchedTerms: []v1.PreferredSchedulingTerm{ { Weight: 5, Preference: v1.NodeSelectorTerm{ MatchFields: []v1.NodeSelectorRequirement{{ Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{"host_1"}, }}, }, }, { Weight: 7, Preference: v1.NodeSelectorTerm{ MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "unknown_label", Operator: v1.NodeSelectorOpIn, Values: []string{"unknown_label_val"}, }}, }, }, { Weight: 11, Preference: v1.NodeSelectorTerm{ MatchExpressions: []v1.NodeSelectorRequirement{{ Key: "label_1", Operator: v1.NodeSelectorOpIn, Values: []string{"label_1_val"}, }}, }, }, }, node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "host_1", Labels: map[string]string{"label_1": "label_1_val"}}}, wantScore: 16, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { prefSchedTerms, err := NewPreferredSchedulingTerms(tt.prefSchedTerms) if diff := cmp.Diff(tt.wantErr, err, ignoreBadValue); diff != "" { t.Errorf("NewPreferredSchedulingTerms returned unexpected error (-want,+got):\n%s", diff) } if tt.wantErr != nil { return } score := prefSchedTerms.Score(tt.node) if score != tt.wantScore { t.Errorf("PreferredSchedulingTerms.Score returned %d, want %d", score, tt.wantScore) } }) } } func TestNodeSelectorRequirementsAsSelector(t *testing.T) { matchExpressions := []v1.NodeSelectorRequirement{{ Key: "foo", Operator: v1.NodeSelectorOpIn, Values: []string{"bar", "baz"}, }} mustParse := func(s string) labels.Selector { out, e := labels.Parse(s) if e != nil { panic(e) } return out } tc := []struct { in []v1.NodeSelectorRequirement out labels.Selector wantErr []error }{ {in: nil, out: labels.Nothing()}, {in: []v1.NodeSelectorRequirement{}, out: labels.Nothing()}, { in: matchExpressions, out: mustParse("foo in (baz,bar)"), }, { in: []v1.NodeSelectorRequirement{{ Key: "foo", Operator: v1.NodeSelectorOpExists, Values: []string{"bar", "baz"}, }}, wantErr: []error{ field.ErrorList{ field.Invalid(field.NewPath("root").Index(0).Child("values"), nil, "values set must be empty for exists and does not exist"), }.ToAggregate(), }, }, { in: []v1.NodeSelectorRequirement{{ Key: "foo", Operator: v1.NodeSelectorOpGt, Values: []string{"1"}, }}, out: mustParse("foo>1"), }, { in: []v1.NodeSelectorRequirement{{ Key: "bar", Operator: v1.NodeSelectorOpLt, Values: []string{"7"}, }}, out: mustParse("bar<7"), }, { in: []v1.NodeSelectorRequirement{{ Key: "baz", Operator: "invalid", Values: []string{"5"}, }}, wantErr: []error{ field.NotSupported(field.NewPath("root").Index(0).Child("operator"), v1.NodeSelectorOperator("invalid"), validSelectorOperators), }, }, } for i, tc := range tc { out, err := nodeSelectorRequirementsAsSelector(tc.in, field.NewPath("root")) if diff := cmp.Diff(tc.wantErr, err, ignoreBadValue); diff != "" { t.Errorf("nodeSelectorRequirementsAsSelector returned unexpected error (-want,+got):\n%s", diff) } if !reflect.DeepEqual(out, tc.out) { t.Errorf("[%v]expected:\n\t%+v\nbut got:\n\t%+v", i, tc.out, out) } } } func TestPodMatchesNodeSelectorAndAffinityTerms(t *testing.T) { tests := []struct { name string pod *v1.Pod labels map[string]string nodeName string want bool }{ { name: "no selector", pod: &v1.Pod{}, want: true, }, { name: "missing labels", pod: &v1.Pod{ Spec: v1.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", }, }, }, want: false, }, { name: "same labels", pod: &v1.Pod{ Spec: v1.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", }, }, }, labels: map[string]string{ "foo": "bar", }, want: true, }, { name: "node labels are superset", pod: &v1.Pod{ Spec: v1.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", }, }, }, labels: map[string]string{ "foo": "bar", "baz": "blah", }, want: true, }, { name: "node labels are subset", pod: &v1.Pod{ Spec: v1.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", "baz": "blah", }, }, }, labels: map[string]string{ "foo": "bar", }, want: false, }, { name: "Pod with matchExpressions using In operator that matches the existing node", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "foo", Operator: v1.NodeSelectorOpIn, Values: []string{"bar", "value2"}, }, }, }, }, }, }, }, }, }, labels: map[string]string{ "foo": "bar", }, want: true, }, { name: "Pod with matchExpressions using Gt operator that matches the existing node", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "kernel-version", Operator: v1.NodeSelectorOpGt, Values: []string{"0204"}, }, }, }, }, }, }, }, }, }, labels: map[string]string{ // We use two digit to denote major version and two digit for minor version. "kernel-version": "0206", }, want: true, }, { name: "Pod with matchExpressions using NotIn operator that matches the existing node", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "mem-type", Operator: v1.NodeSelectorOpNotIn, Values: []string{"DDR", "DDR2"}, }, }, }, }, }, }, }, }, }, labels: map[string]string{ "mem-type": "DDR3", }, want: true, }, { name: "Pod with matchExpressions using Exists operator that matches the existing node", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "GPU", Operator: v1.NodeSelectorOpExists, }, }, }, }, }, }, }, }, }, labels: map[string]string{ "GPU": "NVIDIA-GRID-K1", }, want: true, }, { pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "foo", Operator: v1.NodeSelectorOpIn, Values: []string{"value1", "value2"}, }, }, }, }, }, }, }, }, }, labels: map[string]string{ "foo": "bar", }, want: false, name: "Pod with affinity that don't match node's labels won't schedule onto the node", }, { pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: nil, }, }, }, }, }, labels: map[string]string{ "foo": "bar", }, want: false, name: "Pod with a nil []NodeSelectorTerm in affinity, can't match the node's labels and won't schedule onto the node", }, { pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{}, }, }, }, }, }, labels: map[string]string{ "foo": "bar", }, want: false, name: "Pod with an empty []NodeSelectorTerm in affinity, can't match the node's labels and won't schedule onto the node", }, { name: "Pod with empty MatchExpressions is not a valid value will match no objects and won't schedule onto the node", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{}, }, }, }, }, }, }, }, labels: map[string]string{ "foo": "bar", }, want: false, }, { name: "Pod with no Affinity will schedule onto a node", pod: &v1.Pod{}, labels: map[string]string{ "foo": "bar", }, want: true, }, { name: "Pod with Affinity but nil NodeSelector will schedule onto a node", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: nil, }, }, }, }, labels: map[string]string{ "foo": "bar", }, want: true, }, { name: "Pod with multiple matchExpressions ANDed that matches the existing node", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "GPU", Operator: v1.NodeSelectorOpExists, }, { Key: "GPU", Operator: v1.NodeSelectorOpNotIn, Values: []string{"AMD", "INTER"}, }, }, }, }, }, }, }, }, }, labels: map[string]string{ "GPU": "NVIDIA-GRID-K1", }, want: true, }, { name: "Pod with multiple matchExpressions ANDed that doesn't match the existing node", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "GPU", Operator: v1.NodeSelectorOpExists, }, { Key: "GPU", Operator: v1.NodeSelectorOpIn, Values: []string{"AMD", "INTER"}, }, }, }, }, }, }, }, }, }, labels: map[string]string{ "GPU": "NVIDIA-GRID-K1", }, want: false, }, { name: "Pod with multiple NodeSelectorTerms ORed in affinity, matches the node's labels and will schedule onto the node", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "foo", Operator: v1.NodeSelectorOpIn, Values: []string{"bar", "value2"}, }, }, }, { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "diffkey", Operator: v1.NodeSelectorOpIn, Values: []string{"wrong", "value2"}, }, }, }, }, }, }, }, }, }, labels: map[string]string{ "foo": "bar", }, want: true, }, { name: "Pod with an Affinity and a PodSpec.NodeSelector(the old thing that we are deprecating) " + "both are satisfied, will schedule onto the node", pod: &v1.Pod{ Spec: v1.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", }, Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "foo", Operator: v1.NodeSelectorOpExists, }, }, }, }, }, }, }, }, }, labels: map[string]string{ "foo": "bar", }, want: true, }, { name: "Pod with an Affinity matches node's labels but the PodSpec.NodeSelector(the old thing that we are deprecating) " + "is not satisfied, won't schedule onto the node", pod: &v1.Pod{ Spec: v1.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", }, Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "foo", Operator: v1.NodeSelectorOpExists, }, }, }, }, }, }, }, }, }, labels: map[string]string{ "foo": "barrrrrr", }, want: false, }, { name: "Pod with an invalid value in Affinity term won't be scheduled onto the node", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "foo", Operator: v1.NodeSelectorOpNotIn, Values: []string{"invalid value: ___@#$%^"}, }, }, }, }, }, }, }, }, }, labels: map[string]string{ "foo": "bar", }, want: false, }, { name: "Pod with matchFields using In operator that matches the existing node", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchFields: []v1.NodeSelectorRequirement{ { Key: metav1.ObjectNameField, Operator: v1.NodeSelectorOpIn, Values: []string{"node_1"}, }, }, }, }, }, }, }, }, }, nodeName: "node_1", want: true, }, { name: "Pod with matchFields using In operator that does not match the existing node", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchFields: []v1.NodeSelectorRequirement{ { Key: metav1.ObjectNameField, Operator: v1.NodeSelectorOpIn, Values: []string{"node_1"}, }, }, }, }, }, }, }, }, }, nodeName: "node_2", want: false, }, { name: "Pod with two terms: matchFields does not match, but matchExpressions matches", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchFields: []v1.NodeSelectorRequirement{ { Key: metav1.ObjectNameField, Operator: v1.NodeSelectorOpIn, Values: []string{"node_1"}, }, }, }, { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "foo", Operator: v1.NodeSelectorOpIn, Values: []string{"bar"}, }, }, }, }, }, }, }, }, }, nodeName: "node_2", labels: map[string]string{"foo": "bar"}, want: true, }, { name: "Pod with one term: matchFields does not match, but matchExpressions matches", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchFields: []v1.NodeSelectorRequirement{ { Key: metav1.ObjectNameField, Operator: v1.NodeSelectorOpIn, Values: []string{"node_1"}, }, }, MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "foo", Operator: v1.NodeSelectorOpIn, Values: []string{"bar"}, }, }, }, }, }, }, }, }, }, nodeName: "node_2", labels: map[string]string{"foo": "bar"}, want: false, }, { name: "Pod with one term: both matchFields and matchExpressions match", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchFields: []v1.NodeSelectorRequirement{ { Key: metav1.ObjectNameField, Operator: v1.NodeSelectorOpIn, Values: []string{"node_1"}, }, }, MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "foo", Operator: v1.NodeSelectorOpIn, Values: []string{"bar"}, }, }, }, }, }, }, }, }, }, nodeName: "node_1", labels: map[string]string{"foo": "bar"}, want: true, }, { name: "Pod with two terms: both matchFields and matchExpressions do not match", pod: &v1.Pod{ Spec: v1.PodSpec{ Affinity: &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchFields: []v1.NodeSelectorRequirement{ { Key: metav1.ObjectNameField, Operator: v1.NodeSelectorOpIn, Values: []string{"node_1"}, }, }, }, { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "foo", Operator: v1.NodeSelectorOpIn, Values: []string{"not-match-to-bar"}, }, }, }, }, }, }, }, }, }, nodeName: "node_2", labels: map[string]string{"foo": "bar"}, want: false, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { node := v1.Node{ObjectMeta: metav1.ObjectMeta{ Name: test.nodeName, Labels: test.labels, }} got, _ := GetRequiredNodeAffinity(test.pod).Match(&node) if test.want != got { t.Errorf("expected: %v got %v", test.want, got) } }) } } kubernetes-component-helpers-b5afa51/storage/000077500000000000000000000000001476422250100214565ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/storage/OWNERS000066400000000000000000000002251476422250100224150ustar00rootroot00000000000000# See the OWNERS docs at https://go.k8s.io/owners approvers: - sig-storage-approvers reviewers: - sig-storage-reviewers labels: - sig/storage kubernetes-component-helpers-b5afa51/storage/ephemeral/000077500000000000000000000000001476422250100234205ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/storage/ephemeral/ephemeral.go000066400000000000000000000042201476422250100257070ustar00rootroot00000000000000/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package ephemeral provides code that supports the usual pattern // for accessing the PVC that provides a generic ephemeral inline volume: // // - determine the PVC name that corresponds to the inline volume source // - retrieve the PVC // - verify that the PVC is owned by the pod // - use the PVC package ephemeral import ( "fmt" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // VolumeClaimName returns the name of the PersistentVolumeClaim // object that gets created for the generic ephemeral inline volume. The // name is deterministic and therefore this function does not need any // additional information besides the Pod name and volume name and it // will never fail. // // Before using the PVC for the Pod, the caller must check that it is // indeed the PVC that was created for the Pod by calling IsUsable. func VolumeClaimName(pod *v1.Pod, volume *v1.Volume) string { return pod.Name + "-" + volume.Name } // VolumeIsForPod checks that the PVC is the ephemeral volume that // was created for the Pod. It returns an error that is informative // enough to be returned by the caller without adding further details // about the Pod or PVC. func VolumeIsForPod(pod *v1.Pod, pvc *v1.PersistentVolumeClaim) error { // Checking the namespaces is just a precaution. The caller should // never pass in a PVC that isn't from the same namespace as the // Pod. if pvc.Namespace != pod.Namespace || !metav1.IsControlledBy(pvc, pod) { return fmt.Errorf("PVC %s/%s was not created for pod %s/%s (pod is not owner)", pvc.Namespace, pvc.Name, pod.Namespace, pod.Name) } return nil } kubernetes-component-helpers-b5afa51/storage/ephemeral/ephemeral_test.go000066400000000000000000000061531476422250100267550ustar00rootroot00000000000000/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ephemeral import ( "fmt" "testing" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) func TestVolumeIsForPod(t *testing.T) { uid := 0 newUID := func() types.UID { uid++ return types.UID(fmt.Sprintf("%d", uid)) } isController := true podNotOwner := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: "kube-system", Name: "podNotOwner", UID: newUID(), }, } podOwner := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: "kube-system", Name: "podOwner", UID: newUID(), }, } pvcNoOwner := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Namespace: "kube-system", Name: "pvcNoOwner", UID: newUID(), }, } pvcWithOwner := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Namespace: "kube-system", Name: "pvcNoOwner", UID: newUID(), OwnerReferences: []metav1.OwnerReference{ { UID: podOwner.UID, Controller: &isController, }, }, }, } userPVCWithOwner := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Namespace: "user-namespace", Name: "userPVCWithOwner", UID: newUID(), OwnerReferences: []metav1.OwnerReference{ { UID: podOwner.UID, Controller: &isController, }, }, }, } testcases := map[string]struct { pod *v1.Pod pvc *v1.PersistentVolumeClaim expectedError string }{ "owned": { pod: podOwner, pvc: pvcWithOwner, }, "other-pod": { pod: podNotOwner, pvc: pvcWithOwner, expectedError: `PVC kube-system/pvcNoOwner was not created for pod kube-system/podNotOwner (pod is not owner)`, }, "no-owner": { pod: podOwner, pvc: pvcNoOwner, expectedError: `PVC kube-system/pvcNoOwner was not created for pod kube-system/podOwner (pod is not owner)`, }, "different-namespace": { pod: podOwner, pvc: userPVCWithOwner, expectedError: `PVC user-namespace/userPVCWithOwner was not created for pod kube-system/podOwner (pod is not owner)`, }, } for name, tc := range testcases { t.Run(name, func(t *testing.T) { err := VolumeIsForPod(tc.pod, tc.pvc) if tc.expectedError == "" { if err != nil { t.Errorf("expected no error, got %v", err) } } else { if err == nil { t.Errorf("expected error %q, got nil", tc.expectedError) } else if tc.expectedError != err.Error() { t.Errorf("expected error %q, got %v", tc.expectedError, err) } } }) } } kubernetes-component-helpers-b5afa51/storage/volume/000077500000000000000000000000001476422250100227655ustar00rootroot00000000000000kubernetes-component-helpers-b5afa51/storage/volume/helpers.go000066400000000000000000000046371476422250100247700ustar00rootroot00000000000000/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package volume import ( "fmt" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/component-helpers/scheduling/corev1" ) // PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. func PersistentVolumeClaimHasClass(claim *v1.PersistentVolumeClaim) bool { // Use beta annotation first if _, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { return true } if claim.Spec.StorageClassName != nil { return true } return false } // GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was // requested, it returns "". func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { // Use beta annotation first if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { return class } if claim.Spec.StorageClassName != nil { return *claim.Spec.StorageClassName } return "" } // GetPersistentVolumeClass returns StorageClassName. func GetPersistentVolumeClass(volume *v1.PersistentVolume) string { // Use beta annotation first if class, found := volume.Annotations[v1.BetaStorageClassAnnotation]; found { return class } return volume.Spec.StorageClassName } // CheckNodeAffinity looks at the PV node affinity, and checks if the node has the same corresponding labels // This ensures that we don't mount a volume that doesn't belong to this node func CheckNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]string) error { if pv.Spec.NodeAffinity == nil { return nil } if pv.Spec.NodeAffinity.Required != nil { node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: nodeLabels}} terms := pv.Spec.NodeAffinity.Required if matches, err := corev1.MatchNodeSelectorTerms(node, terms); err != nil { return err } else if !matches { return fmt.Errorf("no matching NodeSelectorTerms") } } return nil } kubernetes-component-helpers-b5afa51/storage/volume/helpers_test.go000066400000000000000000000146271476422250100260270ustar00rootroot00000000000000/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package volume import ( "testing" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" ) var nodeLabels = map[string]string{ "test-key1": "test-value1", "test-key2": "test-value2", } func TestCheckVolumeNodeAffinity(t *testing.T) { type affinityTest struct { name string expectSuccess bool pv *v1.PersistentVolume } cases := []affinityTest{ { name: "valid-nil", expectSuccess: true, pv: testVolumeWithNodeAffinity(t, nil), }, { name: "valid-no-constraints", expectSuccess: true, pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{}), }, { name: "select-nothing", expectSuccess: false, pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{Required: &v1.NodeSelector{}}), }, { name: "select-nothing-empty-terms", expectSuccess: false, pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{ Required: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{}, }, }, }, }), }, { name: "valid-multiple-terms", expectSuccess: true, pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{ Required: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "test-key3", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value1", "test-value3"}, }, }, }, { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "test-key2", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value0", "test-value2"}, }, }, }, }, }, }), }, { name: "valid-multiple-match-expressions", expectSuccess: true, pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{ Required: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "test-key1", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value1", "test-value3"}, }, { Key: "test-key2", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value0", "test-value2"}, }, }, }, }, }, }), }, { name: "invalid-multiple-match-expressions-key", expectSuccess: false, pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{ Required: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "test-key1", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value1", "test-value3"}, }, { Key: "test-key3", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value0", "test-value2"}, }, }, }, }, }, }), }, { name: "invalid-multiple-match-expressions-values", expectSuccess: false, pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{ Required: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "test-key1", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value3", "test-value4"}, }, { Key: "test-key2", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value0", "test-value2"}, }, }, }, }, }, }), }, { name: "invalid-multiple-terms", expectSuccess: false, pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{ Required: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "test-key3", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value1", "test-value3"}, }, }, }, { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: "test-key2", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value0", "test-value1"}, }, }, }, }, }, }), }, } for _, c := range cases { err := CheckNodeAffinity(c.pv, nodeLabels) if err != nil && c.expectSuccess { t.Errorf("CheckTopology %v returned error: %v", c.name, err) } if err == nil && !c.expectSuccess { t.Errorf("CheckTopology %v returned success, expected error", c.name) } } } func testVolumeWithNodeAffinity(t *testing.T, affinity *v1.VolumeNodeAffinity) *v1.PersistentVolume { return &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{Name: "test-constraints"}, Spec: v1.PersistentVolumeSpec{ NodeAffinity: affinity, }, } } func TestPersistentVolumeClaimHasClass(t *testing.T) { testCases := []struct { name string pvc *v1.PersistentVolumeClaim want bool }{ { name: "no storage class", pvc: &v1.PersistentVolumeClaim{}, want: false, }, { name: "storage class set on annotation", pvc: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ v1.BetaStorageClassAnnotation: "", }, }, }, want: true, }, { name: "storage class set on spec", pvc: &v1.PersistentVolumeClaim{ Spec: v1.PersistentVolumeClaimSpec{ StorageClassName: ptr.To(""), }, }, want: true, }, } for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { got := PersistentVolumeClaimHasClass(tc.pvc) if got != tc.want { t.Errorf("PersistentVolumeClaimHasClass() = %v, want %v", got, tc.want) } }) } } kubernetes-component-helpers-b5afa51/storage/volume/pv_helpers.go000066400000000000000000000314261476422250100254710ustar00rootroot00000000000000/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package volume import ( "fmt" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes/scheme" storagelisters "k8s.io/client-go/listers/storage/v1" "k8s.io/client-go/tools/reference" "k8s.io/utils/ptr" ) const ( // AnnBindCompleted Annotation applies to PVCs. It indicates that the lifecycle // of the PVC has passed through the initial setup. This information changes how // we interpret some observations of the state of the objects. Value of this // Annotation does not matter. AnnBindCompleted = "pv.kubernetes.io/bind-completed" // AnnBoundByController annotation applies to PVs and PVCs. It indicates that // the binding (PV->PVC or PVC->PV) was installed by the controller. The // absence of this annotation means the binding was done by the user (i.e. // pre-bound). Value of this annotation does not matter. // External PV binders must bind PV the same way as PV controller, otherwise PV // controller may not handle it correctly. AnnBoundByController = "pv.kubernetes.io/bound-by-controller" // AnnSelectedNode annotation is added to a PVC that has been triggered by scheduler to // be dynamically provisioned. Its value is the name of the selected node. AnnSelectedNode = "volume.kubernetes.io/selected-node" // NotSupportedProvisioner is a special provisioner name which can be set // in storage class to indicate dynamic provisioning is not supported by // the storage. NotSupportedProvisioner = "kubernetes.io/no-provisioner" // AnnDynamicallyProvisioned annotation is added to a PV that has been dynamically provisioned by // Kubernetes. Its value is name of volume plugin that created the volume. // It serves both user (to show where a PV comes from) and Kubernetes (to // recognize dynamically provisioned PVs in its decisions). AnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by" // AnnMigratedTo annotation is added to a PVC and PV that is supposed to be // dynamically provisioned/deleted by by its corresponding CSI driver // through the CSIMigration feature flags. When this annotation is set the // Kubernetes components will "stand-down" and the external-provisioner will // act on the objects AnnMigratedTo = "pv.kubernetes.io/migrated-to" // AnnStorageProvisioner annotation is added to a PVC that is supposed to be dynamically // provisioned. Its value is name of volume plugin that is supposed to provision // a volume for this PVC. // TODO: remove beta anno once deprecation period ends AnnStorageProvisioner = "volume.kubernetes.io/storage-provisioner" AnnBetaStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner" //PVDeletionProtectionFinalizer is the finalizer added by the external-provisioner on the PV PVDeletionProtectionFinalizer = "external-provisioner.volume.kubernetes.io/finalizer" // PVDeletionInTreeProtectionFinalizer is the finalizer added to protect PV deletion for in-tree volumes. PVDeletionInTreeProtectionFinalizer = "kubernetes.io/pv-controller" ) // IsDelayBindingProvisioning checks if claim provisioning with selected-node annotation func IsDelayBindingProvisioning(claim *v1.PersistentVolumeClaim) bool { // When feature VolumeScheduling enabled, // Scheduler signal to the PV controller to start dynamic // provisioning by setting the "AnnSelectedNode" annotation // in the PVC _, ok := claim.Annotations[AnnSelectedNode] return ok } // IsDelayBindingMode checks if claim is in delay binding mode. func IsDelayBindingMode(claim *v1.PersistentVolumeClaim, classLister storagelisters.StorageClassLister) (bool, error) { className := GetPersistentVolumeClaimClass(claim) if className == "" { return false, nil } class, err := classLister.Get(className) if err != nil { if apierrors.IsNotFound(err) { return false, nil } return false, err } if class.VolumeBindingMode == nil { return false, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", className) } return *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer, nil } // GetBindVolumeToClaim returns a new volume which is bound to given claim. In // addition, it returns a bool which indicates whether we made modification on // original volume. func GetBindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, bool, error) { dirty := false // Check if the volume was already bound (either by user or by controller) shouldSetBoundByController := false if !IsVolumeBoundToClaim(volume, claim) { shouldSetBoundByController = true } // The volume from method args can be pointing to watcher cache. We must not // modify these, therefore create a copy. volumeClone := volume.DeepCopy() // Bind the volume to the claim if it is not bound yet if volume.Spec.ClaimRef == nil || volume.Spec.ClaimRef.Name != claim.Name || volume.Spec.ClaimRef.Namespace != claim.Namespace || volume.Spec.ClaimRef.UID != claim.UID { claimRef, err := reference.GetReference(scheme.Scheme, claim) if err != nil { return nil, false, fmt.Errorf("unexpected error getting claim reference: %w", err) } volumeClone.Spec.ClaimRef = claimRef dirty = true } // Set AnnBoundByController if it is not set yet if shouldSetBoundByController && !metav1.HasAnnotation(volumeClone.ObjectMeta, AnnBoundByController) { metav1.SetMetaDataAnnotation(&volumeClone.ObjectMeta, AnnBoundByController, "yes") dirty = true } return volumeClone, dirty, nil } // IsVolumeBoundToClaim returns true, if given volume is pre-bound or bound // to specific claim. Both claim.Name and claim.Namespace must be equal. // If claim.UID is present in volume.Spec.ClaimRef, it must be equal too. func IsVolumeBoundToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) bool { if volume.Spec.ClaimRef == nil { return false } if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace { return false } if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID { return false } return true } // FindMatchingVolume goes through the list of volumes to find the best matching volume // for the claim. // // This function is used by both the PV controller and scheduler. // // delayBinding is true only in the PV controller path. When set, prebound PVs are still returned // as a match for the claim, but unbound PVs are skipped. // // node is set only in the scheduler path. When set, the PV node affinity is checked against // the node's labels. // // excludedVolumes is only used in the scheduler path, and is needed for evaluating multiple // unbound PVCs for a single Pod at one time. As each PVC finds a matching PV, the chosen // PV needs to be excluded from future matching. func FindMatchingVolume( claim *v1.PersistentVolumeClaim, volumes []*v1.PersistentVolume, node *v1.Node, excludedVolumes map[string]*v1.PersistentVolume, delayBinding bool, vacEnabled bool) (*v1.PersistentVolume, error) { if !vacEnabled { claimVAC := ptr.Deref(claim.Spec.VolumeAttributesClassName, "") if claimVAC != "" { return nil, fmt.Errorf("unsupported volumeAttributesClassName is set on claim %s when the feature-gate VolumeAttributesClass is disabled", claimToClaimKey(claim)) } } var smallestVolume *v1.PersistentVolume var smallestVolumeQty resource.Quantity requestedQty := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestedClass := GetPersistentVolumeClaimClass(claim) var selector labels.Selector if claim.Spec.Selector != nil { internalSelector, err := metav1.LabelSelectorAsSelector(claim.Spec.Selector) if err != nil { return nil, fmt.Errorf("error creating internal label selector for claim: %v: %v", claimToClaimKey(claim), err) } selector = internalSelector } // Go through all available volumes with two goals: // - find a volume that is either pre-bound by user or dynamically // provisioned for this claim. Because of this we need to loop through // all volumes. // - find the smallest matching one if there is no volume pre-bound to // the claim. for _, volume := range volumes { if _, ok := excludedVolumes[volume.Name]; ok { // Skip volumes in the excluded list continue } if volume.Spec.ClaimRef != nil && !IsVolumeBoundToClaim(volume, claim) { continue } volumeQty := volume.Spec.Capacity[v1.ResourceStorage] if volumeQty.Cmp(requestedQty) < 0 { continue } // filter out mismatching volumeModes if CheckVolumeModeMismatches(&claim.Spec, &volume.Spec) { continue } claimVAC := ptr.Deref(claim.Spec.VolumeAttributesClassName, "") volumeVAC := ptr.Deref(volume.Spec.VolumeAttributesClassName, "") // filter out mismatching volumeAttributesClassName if vacEnabled && claimVAC != volumeVAC { continue } if !vacEnabled && volumeVAC != "" { // when the feature gate is disabled, the PV object has VAC set, then we should not bind at all. continue } // check if PV's DeletionTimeStamp is set, if so, skip this volume. if volume.ObjectMeta.DeletionTimestamp != nil { continue } nodeAffinityValid := true if node != nil { // Scheduler path, check that the PV NodeAffinity // is satisfied by the node // CheckNodeAffinity is the most expensive call in this loop. // We should check cheaper conditions first or consider optimizing this function. err := CheckNodeAffinity(volume, node.Labels) if err != nil { nodeAffinityValid = false } } if IsVolumeBoundToClaim(volume, claim) { // If PV node affinity is invalid, return no match. // This means the prebound PV (and therefore PVC) // is not suitable for this node. if !nodeAffinityValid { return nil, nil } return volume, nil } if node == nil && delayBinding { // PV controller does not bind this claim. // Scheduler will handle binding unbound volumes // Scheduler path will have node != nil continue } // filter out: // - volumes in non-available phase // - volumes whose labels don't match the claim's selector, if specified // - volumes in Class that is not requested // - volumes whose NodeAffinity does not match the node if volume.Status.Phase != v1.VolumeAvailable { // We ignore volumes in non-available phase, because volumes that // satisfies matching criteria will be updated to available, binding // them now has high chance of encountering unnecessary failures // due to API conflicts. continue } else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) { continue } if GetPersistentVolumeClass(volume) != requestedClass { continue } if !nodeAffinityValid { continue } if node != nil { // Scheduler path // Check that the access modes match if !CheckAccessModes(claim, volume) { continue } } if smallestVolume == nil || smallestVolumeQty.Cmp(volumeQty) > 0 { smallestVolume = volume smallestVolumeQty = volumeQty } } if smallestVolume != nil { // Found a matching volume return smallestVolume, nil } return nil, nil } // CheckVolumeModeMismatches is a convenience method that checks volumeMode for PersistentVolume // and PersistentVolumeClaims func CheckVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) bool { // In HA upgrades, we cannot guarantee that the apiserver is on a version >= controller-manager. // So we default a nil volumeMode to filesystem requestedVolumeMode := v1.PersistentVolumeFilesystem if pvcSpec.VolumeMode != nil { requestedVolumeMode = *pvcSpec.VolumeMode } pvVolumeMode := v1.PersistentVolumeFilesystem if pvSpec.VolumeMode != nil { pvVolumeMode = *pvSpec.VolumeMode } return requestedVolumeMode != pvVolumeMode } // CheckAccessModes returns true if PV satisfies all the PVC's requested AccessModes func CheckAccessModes(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) bool { pvModesMap := map[v1.PersistentVolumeAccessMode]bool{} for _, mode := range volume.Spec.AccessModes { pvModesMap[mode] = true } for _, mode := range claim.Spec.AccessModes { _, ok := pvModesMap[mode] if !ok { return false } } return true } func claimToClaimKey(claim *v1.PersistentVolumeClaim) string { return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name) } kubernetes-component-helpers-b5afa51/storage/volume/pv_helpers_test.go000066400000000000000000000320021476422250100265170ustar00rootroot00000000000000/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package volume import ( "fmt" "testing" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" ) var ( classNotHere = "not-here" classNoMode = "no-mode" classImmediateMode = "immediate-mode" classWaitMode = "wait-mode" classGold = "gold" classSilver = "silver" modeImmediate = storagev1.VolumeBindingImmediate modeWait = storagev1.VolumeBindingWaitForFirstConsumer ) func makePVCClass(scName *string) *v1.PersistentVolumeClaim { claim := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{}, }, Spec: v1.PersistentVolumeClaimSpec{ StorageClassName: scName, }, } return claim } func makeStorageClass(scName string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass { return &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: scName, }, VolumeBindingMode: mode, } } func TestDelayBindingMode(t *testing.T) { tests := map[string]struct { pvc *v1.PersistentVolumeClaim shouldDelay bool shouldFail bool }{ "nil-class": { pvc: makePVCClass(nil), shouldDelay: false, }, "class-not-found": { pvc: makePVCClass(&classNotHere), shouldDelay: false, }, "no-mode-class": { pvc: makePVCClass(&classNoMode), shouldDelay: false, shouldFail: true, }, "immediate-mode-class": { pvc: makePVCClass(&classImmediateMode), shouldDelay: false, }, "wait-mode-class": { pvc: makePVCClass(&classWaitMode), shouldDelay: true, }, } classes := []*storagev1.StorageClass{ makeStorageClass(classNoMode, nil), makeStorageClass(classImmediateMode, &modeImmediate), makeStorageClass(classWaitMode, &modeWait), } client := &fake.Clientset{} informerFactory := informers.NewSharedInformerFactory(client, 0) classInformer := informerFactory.Storage().V1().StorageClasses() for _, class := range classes { if err := classInformer.Informer().GetIndexer().Add(class); err != nil { t.Fatalf("Failed to add storage class %q: %v", class.Name, err) } } for name, test := range tests { shouldDelay, err := IsDelayBindingMode(test.pvc, classInformer.Lister()) if err != nil && !test.shouldFail { t.Errorf("Test %q returned error: %v", name, err) } if err == nil && test.shouldFail { t.Errorf("Test %q returned success, expected error", name) } if shouldDelay != test.shouldDelay { t.Errorf("Test %q returned unexpected %v", name, test.shouldDelay) } } } // makeVolumeNodeAffinity returns a VolumeNodeAffinity for given key and value. func makeNodeAffinity(key string, value string) *v1.VolumeNodeAffinity { return &v1.VolumeNodeAffinity{ Required: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}, }, }, }, }, }, } } func TestFindMatchVolumeWithNode(t *testing.T) { volumes := []*v1.PersistentVolume{ makeTestVolume("local-small", "local001", "5G", true, nil), makeTestVolume("local-pd-very-large", "local002", "200E", true, func(pv *v1.PersistentVolume) { pv.Spec.StorageClassName = "large" }), makeTestVolume("affinity-pv", "affinity001", "100G", true, func(pv *v1.PersistentVolume) { pv.Spec.StorageClassName = "wait" pv.Spec.NodeAffinity = makeNodeAffinity("key1", "value1") }), makeTestVolume("affinity-pv2", "affinity002", "150G", true, func(pv *v1.PersistentVolume) { pv.Spec.StorageClassName = "wait" pv.Spec.NodeAffinity = makeNodeAffinity("key1", "value1") }), makeTestVolume("affinity-prebound", "affinity003", "100G", true, func(pv *v1.PersistentVolume) { pv.Spec.StorageClassName = "wait" pv.Spec.ClaimRef = &v1.ObjectReference{Name: "claim02", Namespace: "myns"} pv.Spec.NodeAffinity = makeNodeAffinity("key1", "value1") }), makeTestVolume("affinity-pv3", "affinity003", "200G", true, func(pv *v1.PersistentVolume) { pv.Spec.StorageClassName = "wait" pv.Spec.NodeAffinity = makeNodeAffinity("key1", "value3") }), makeTestVolume("affinity-pv4", "affinity004", "200G", false, func(pv *v1.PersistentVolume) { pv.Spec.StorageClassName = "wait" pv.Spec.NodeAffinity = makeNodeAffinity("key1", "value4") }), } var volumesWithVAC = func(name string, input []*v1.PersistentVolume) []*v1.PersistentVolume { output := make([]*v1.PersistentVolume, len(input)) for i, volume := range input { output[i] = volume.DeepCopy() output[i].Spec.VolumeAttributesClassName = &name } return output } node1 := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"key1": "value1"}, }, } node2 := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"key1": "value2"}, }, } node3 := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"key1": "value3"}, }, } node4 := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"key1": "value4"}, }, } scenarios := map[string]struct { expectedMatch string expectErr bool claim *v1.PersistentVolumeClaim node *v1.Node volumes []*v1.PersistentVolume excludedVolumes map[string]*v1.PersistentVolume vacEnabled []bool }{ "success-match": { expectedMatch: "affinity-pv", volumes: volumes, claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node1, vacEnabled: []bool{true, false}, }, "success-prebound": { expectedMatch: "affinity-prebound", volumes: volumes, claim: makeTestPersistentVolumeClaim("claim02", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node1, vacEnabled: []bool{true, false}, }, "success-exclusion": { expectedMatch: "affinity-pv2", volumes: volumes, claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node1, excludedVolumes: map[string]*v1.PersistentVolume{"affinity001": nil}, vacEnabled: []bool{true, false}, }, "fail-exclusion": { expectedMatch: "", volumes: volumes, claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node1, excludedVolumes: map[string]*v1.PersistentVolume{"affinity001": nil, "affinity002": nil, "affinity002-vac": nil}, vacEnabled: []bool{true, false}, }, "fail-accessmode": { expectedMatch: "", volumes: volumes, claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, nil), node: node1, vacEnabled: []bool{true, false}, }, "fail-nodeaffinity": { expectedMatch: "", volumes: volumes, claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node2, vacEnabled: []bool{true, false}, }, "fail-prebound-node-affinity": { expectedMatch: "", volumes: volumes, claim: makeTestPersistentVolumeClaim("claim02", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node3, vacEnabled: []bool{true, false}, }, "fail-nonavaliable": { expectedMatch: "", volumes: volumes, claim: makeTestPersistentVolumeClaim("claim04", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node4, vacEnabled: []bool{true, false}, }, "success-bad-and-good-node-affinity": { expectedMatch: "affinity-pv3", volumes: volumes, claim: makeTestPersistentVolumeClaim("claim03", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node3, vacEnabled: []bool{true, false}, }, "success-match-with-vac": { expectedMatch: "affinity-pv", volumes: volumesWithVAC(classGold, volumes), claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, func(pvc *v1.PersistentVolumeClaim) { pvc.Spec.VolumeAttributesClassName = &classGold }), node: node1, vacEnabled: []bool{true}, }, "fail-vac": { // claim has a given vac and volumes don't have the same vac. expectedMatch: "", volumes: volumes, claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, func(pvc *v1.PersistentVolumeClaim) { pvc.Spec.VolumeAttributesClassName = &classSilver }), node: node1, vacEnabled: []bool{true}, }, "fail-prebound-vac": { // claim has a given vac and volume name but the given volume has a different vac. expectedMatch: "", volumes: volumesWithVAC(classGold, volumes), claim: makeTestPersistentVolumeClaim("claim02", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, func(pvc *v1.PersistentVolumeClaim) { pvc.Spec.VolumeAttributesClassName = &classSilver }), node: node1, vacEnabled: []bool{true}, }, "fail-on-error": { // claim has a given vac when feature-gate is disabled. expectedMatch: "", expectErr: true, volumes: volumes, claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, func(pvc *v1.PersistentVolumeClaim) { pvc.Spec.VolumeAttributesClassName = &classGold }), node: node1, vacEnabled: []bool{false}, }, "fail-volumes-vac": { // claim has no vac and all volumes have vac when feature-gate is disabled. expectedMatch: "", volumes: volumesWithVAC(classGold, volumes), claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node1, vacEnabled: []bool{false}, }, } for name, scenario := range scenarios { for _, enabled := range scenario.vacEnabled { name := fmt.Sprintf("[VolumeAttributiesClass: %v] %s", enabled, name) volume, err := FindMatchingVolume(scenario.claim, scenario.volumes, scenario.node, scenario.excludedVolumes, true, enabled) if scenario.expectErr && err == nil { t.Errorf("Expected error for scenario: %s", name) } if !scenario.expectErr && err != nil { t.Errorf("Unexpected error matching volume by claim: %v", err) } if len(scenario.expectedMatch) != 0 && volume == nil { t.Errorf("Expected match but received nil volume for scenario: %s", name) } if len(scenario.expectedMatch) != 0 && volume != nil && string(volume.UID) != scenario.expectedMatch { t.Errorf("Expected %s but got volume %s in scenario %s", scenario.expectedMatch, volume.UID, name) } if len(scenario.expectedMatch) == 0 && volume != nil { t.Errorf("Unexpected match for scenario: %s, matched with %s instead", name, volume.UID) } } } } func makeTestPersistentVolumeClaim(name string, size string, accessMode []v1.PersistentVolumeAccessMode, modfn func(*v1.PersistentVolumeClaim)) *v1.PersistentVolumeClaim { fs := v1.PersistentVolumeFilesystem sc := "wait" pvc := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "myns", }, Spec: v1.PersistentVolumeClaimSpec{ AccessModes: accessMode, Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceStorage): resource.MustParse(size), }, }, StorageClassName: &sc, VolumeMode: &fs, }, } if modfn != nil { modfn(pvc) } return pvc } func makeTestVolume(uid types.UID, name string, capacity string, available bool, modfn func(*v1.PersistentVolume)) *v1.PersistentVolume { var status v1.PersistentVolumeStatus if available { status = v1.PersistentVolumeStatus{ Phase: v1.VolumeAvailable, } } fs := v1.PersistentVolumeFilesystem pv := v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ UID: uid, Name: name, }, Spec: v1.PersistentVolumeSpec{ Capacity: v1.ResourceList{ v1.ResourceName(v1.ResourceStorage): resource.MustParse(capacity), }, PersistentVolumeSource: v1.PersistentVolumeSource{ Local: &v1.LocalVolumeSource{}, }, AccessModes: []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, v1.ReadOnlyMany, }, VolumeMode: &fs, }, Status: status, } if modfn != nil { modfn(&pv) } return &pv }