diff --git a/Makefile b/Makefile index 45bfe80c8..4b7b159de 100644 --- a/Makefile +++ b/Makefile @@ -173,7 +173,7 @@ manifests: controller-gen ${KUSTOMIZE} edit add resource ../config/crd ${KUSTOMIZE} build manifests/ -o manifests/crds.yaml @cd manifests && \ - for variant in default-cc default-cm nowebhooks-cc ha-webhooks-cc ; do \ + for variant in default-cc default-cm nowebhooks-cc ha-webhooks-cc hrq ; do \ echo "Building $${variant} manifest"; \ rm kustomization.yaml; \ touch kustomization.yaml && \ @@ -242,9 +242,13 @@ deploy-ha: docker-push kubectl manifests -kubectl -n hnc-system delete deployment --all kubectl apply -f manifests/ha.yaml -ha-deploy-watch-ha: +deploy-watch-ha: kubectl logs -n hnc-system --follow deployment/hnc-controller-manager-ha manager +deploy-hrq: docker-push kubectl manifests + -kubectl -n hnc-system delete deployment --all + kubectl apply -f manifests/hrq.yaml + # No need to delete the HA configuration here - everything "extra" that it # installs is in hnc-system, which gets deleted by the default manifest. undeploy: manifests diff --git a/api/v1alpha2/hierarchicalresourcequota_types.go b/api/v1alpha2/hierarchicalresourcequota_types.go new file mode 100644 index 000000000..a2d0dea3c --- /dev/null +++ b/api/v1alpha2/hierarchicalresourcequota_types.go @@ -0,0 +1,69 @@ +package v1alpha2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // HRQLabelCleanup is added to resources created by HRQ (specifically the RQ + // singletons) for easier cleanup later by a selector. + HRQLabelCleanup = MetaGroup + "/hrq" + + // NonPropagateAnnotation is added to RQ singletons so that they are not + // overwritten by ancestors. + NonPropagateAnnotation = AnnotationPropagatePrefix + "/none" + + // EventCannotWriteResourceQuota is for events when the reconcilers cannot + // write ResourceQuota from an HRQ. Usually it means the HRQ has invalid + // resource quota types. The error message will point to the HRQ object. + EventCannotWriteResourceQuota string = "CannotWriteResourceQuota" +) + +// HierarchicalResourceQuotaSpec defines the desired hard limits to enforce for +// a namespace and descendant namespaces +type HierarchicalResourceQuotaSpec struct { + // Hard is the set of desired hard limits for each named resource + // +optional + Hard corev1.ResourceList `json:"hard,omitempty"` +} + +// HierarchicalResourceQuotaStatus defines the enforced hard limits and observed +// use for a namespace and descendant namespaces +type HierarchicalResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource + // +optional + Hard corev1.ResourceList `json:"hard,omitempty"` + // Used is the current observed total usage of the resource in the namespace + // and its descendant namespaces. + // +optional + Used corev1.ResourceList `json:"used,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=hierarchicalresourcequotas,shortName=hrq,scope=Namespaced + +// HierarchicalResourceQuota sets aggregate quota restrictions enforced for a +// namespace and descendant namespaces +type HierarchicalResourceQuota struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired quota + Spec HierarchicalResourceQuotaSpec `json:"spec,omitempty"` + // Status defines the actual enforced quota and its current usage + Status HierarchicalResourceQuotaStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HierarchicalResourceQuotaList contains a list of HierarchicalResourceQuota +type HierarchicalResourceQuotaList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HierarchicalResourceQuota `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HierarchicalResourceQuota{}, &HierarchicalResourceQuotaList{}) +} diff --git a/api/v1alpha2/zz_generated.deepcopy.go b/api/v1alpha2/zz_generated.deepcopy.go index c9df1c31d..7fa5c6e46 100644 --- a/api/v1alpha2/zz_generated.deepcopy.go +++ b/api/v1alpha2/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1alpha2 import ( + "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -148,6 +149,116 @@ func (in *HNCConfigurationStatus) DeepCopy() *HNCConfigurationStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HierarchicalResourceQuota) DeepCopyInto(out *HierarchicalResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HierarchicalResourceQuota. +func (in *HierarchicalResourceQuota) DeepCopy() *HierarchicalResourceQuota { + if in == nil { + return nil + } + out := new(HierarchicalResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HierarchicalResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HierarchicalResourceQuotaList) DeepCopyInto(out *HierarchicalResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HierarchicalResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HierarchicalResourceQuotaList. +func (in *HierarchicalResourceQuotaList) DeepCopy() *HierarchicalResourceQuotaList { + if in == nil { + return nil + } + out := new(HierarchicalResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HierarchicalResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HierarchicalResourceQuotaSpec) DeepCopyInto(out *HierarchicalResourceQuotaSpec) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HierarchicalResourceQuotaSpec. +func (in *HierarchicalResourceQuotaSpec) DeepCopy() *HierarchicalResourceQuotaSpec { + if in == nil { + return nil + } + out := new(HierarchicalResourceQuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HierarchicalResourceQuotaStatus) DeepCopyInto(out *HierarchicalResourceQuotaStatus) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HierarchicalResourceQuotaStatus. +func (in *HierarchicalResourceQuotaStatus) DeepCopy() *HierarchicalResourceQuotaStatus { + if in == nil { + return nil + } + out := new(HierarchicalResourceQuotaStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HierarchyConfiguration) DeepCopyInto(out *HierarchyConfiguration) { *out = *in diff --git a/cmd/manager/main.go b/cmd/manager/main.go index f74a6c245..4fe4a6692 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -73,6 +73,7 @@ var ( managedNamespaceAnnots arrayArg includedNamespacesRegex string webhooksOnly bool + enableHRQ bool ) // init preloads some global vars before main() starts. Since this is the top-level module, I'm not @@ -149,6 +150,7 @@ func parseFlags() { flag.Var(&managedNamespaceLabels, "managed-namespace-label", "A regex indicating the labels on namespaces that are managed by HNC. These labels may only be set via the HierarchyConfiguration object. All regexes are implictly wrapped by \"^...$\". This argument can be specified multiple times. See the user guide for more information.") flag.Var(&managedNamespaceAnnots, "managed-namespace-annotation", "A regex indicating the annotations on namespaces that are managed by HNC. These annotations may only be set via the HierarchyConfiguration object. All regexes are implictly wrapped by \"^...$\". This argument can be specified multiple times. See the user guide for more information.") flag.BoolVar(&webhooksOnly, "webhooks-only", false, "Disables the controllers so HNC can be run in HA webhook mode") + flag.BoolVar(&enableHRQ, "enable-hrq", false, "Enables hierarchical resource quotas") flag.Parse() // Assign the array args to the configuration variables after the args are parsed. @@ -303,6 +305,7 @@ func startControllers(mgr ctrl.Manager, certsReady chan struct{}) { NoWebhooks: noWebhooks, MaxReconciles: maxReconciles, ReadOnly: webhooksOnly, + HRQ: enableHRQ, } setup.Create(setupLog, mgr, f, opts) diff --git a/config/crd/bases/hnc.x-k8s.io_hierarchicalresourcequotas.yaml b/config/crd/bases/hnc.x-k8s.io_hierarchicalresourcequotas.yaml new file mode 100644 index 000000000..9206d0812 --- /dev/null +++ b/config/crd/bases/hnc.x-k8s.io_hierarchicalresourcequotas.yaml @@ -0,0 +1,85 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: hierarchicalresourcequotas.hnc.x-k8s.io +spec: + group: hnc.x-k8s.io + names: + kind: HierarchicalResourceQuota + listKind: HierarchicalResourceQuotaList + plural: hierarchicalresourcequotas + shortNames: + - hrq + singular: hierarchicalresourcequota + scope: Namespaced + versions: + - name: v1alpha2 + schema: + openAPIV3Schema: + description: HierarchicalResourceQuota sets aggregate quota restrictions enforced + for a namespace and descendant namespaces + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired quota + properties: + hard: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Hard is the set of desired hard limits for each named + resource + type: object + type: object + status: + description: Status defines the actual enforced quota and its current + usage + properties: + hard: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Hard is the set of enforced hard limits for each named + resource + type: object + used: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Used is the current observed total usage of the resource + in the namespace and its descendant namespaces. + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index dc1722081..6b2fcc3d2 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -5,6 +5,7 @@ resources: - bases/hnc.x-k8s.io_hierarchyconfigurations.yaml - bases/hnc.x-k8s.io_hncconfigurations.yaml - bases/hnc.x-k8s.io_subnamespaceanchors.yaml +- bases/hnc.x-k8s.io_hierarchicalresourcequotas.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -16,11 +17,6 @@ patchesStrategicMerge: # - patches/webhook_in_subnamespaceanchors.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch -# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. -# patches here are for enabling the CA injection for each CRD -#- patches/cainjection_in_hierarchies.yaml -# +kubebuilder:scaffold:crdkustomizecainjectionpatch - # the following config is for teaching kustomize how to do kustomization for CRDs. configurations: - kustomizeconfig.yaml diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index bc6413574..23f0963f2 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -5,6 +5,18 @@ metadata: creationTimestamp: null name: manager-role rules: +- apiGroups: + - "" + resources: + - resourcequotas + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - '*' resources: @@ -21,6 +33,26 @@ rules: - patch - update - watch +- apiGroups: + - hnc.x-k8s.io + resources: + - hierarchicalresourcequotas + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - hnc.x-k8s.io + resources: + - hierarchicalresourcequotas/status + verbs: + - get + - patch + - update - apiGroups: - hnc.x-k8s.io resources: diff --git a/config/variants/hrq/README b/config/variants/hrq/README new file mode 100644 index 000000000..658aea23f --- /dev/null +++ b/config/variants/hrq/README @@ -0,0 +1,2 @@ +This is identical to default_cc (and should probably use that as a base in the +future) except that it enables hierarchical resource quotas (HRQ). diff --git a/config/variants/hrq/kustomization.yaml b/config/variants/hrq/kustomization.yaml new file mode 100644 index 000000000..656e84e76 --- /dev/null +++ b/config/variants/hrq/kustomization.yaml @@ -0,0 +1,36 @@ +# Adds namespace to all resources. +namespace: hnc-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: hnc- + +bases: +- ../../crd +- ../../internalcert +- ../../manager +- ../../rbac +- ../../webhook + +patchesStrategicMerge: +- webhook_patch.yaml + +patches: +- patch: |- + - op: add + path: /spec/template/spec/containers/0/args/- + value: --enable-internal-cert-management + - op: add + path: /spec/template/spec/containers/0/args/- + value: --cert-restart-on-secret-refresh + - op: add + path: /spec/template/spec/containers/0/args/- + value: --enable-hrq + target: + group: apps + version: v1 + kind: Deployment + name: controller-manager diff --git a/config/variants/hrq/webhook_patch.yaml b/config/variants/hrq/webhook_patch.yaml new file mode 100644 index 000000000..b54e64e2b --- /dev/null +++ b/config/variants/hrq/webhook_patch.yaml @@ -0,0 +1,46 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration +webhooks: +- name: hrq.hnc.x-k8s.io + admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-hnc-x-k8s-io-v1alpha2-hrq + failurePolicy: Fail + rules: + - apiGroups: + - hnc.x-k8s.io + apiVersions: + - v1alpha2 + operations: + - CREATE + - UPDATE + resources: + - hierarchicalresourcequotas + sideEffects: None +- name: resourcesquotasstatus.hnc.x-k8s.io + admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-hnc-x-k8s-io-v1alpha2-resourcequotasstatus + failurePolicy: Ignore + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - UPDATE + resources: + - resourcequotas/status + sideEffects: None diff --git a/internal/forest/forest.go b/internal/forest/forest.go index 9027c58fb..8a196a9dc 100644 --- a/internal/forest/forest.go +++ b/internal/forest/forest.go @@ -92,6 +92,7 @@ func (f *Forest) Get(nm string) *Namespace { name: nm, children: namedNamespaces{}, sourceObjects: objects{}, + quotas: quotas{limits: limits{}, used: usage{}}, } f.namespaces[nm] = ns return ns diff --git a/internal/forest/namespace.go b/internal/forest/namespace.go index 47287fb05..dfdbd5694 100644 --- a/internal/forest/namespace.go +++ b/internal/forest/namespace.go @@ -59,6 +59,9 @@ type Namespace struct { // managed by HNC. Any other value means that the namespace is an "external" namespace, whose // metadata (e.g. labels) are set outside of HNC. Manager string + + // quotas stores information about the hierarchical quotas and resource usage in this namespace + quotas quotas } // Name returns the name of the namespace, of "" if the namespace is nil. diff --git a/internal/forest/namespacehrq.go b/internal/forest/namespacehrq.go new file mode 100644 index 000000000..5ea086ff0 --- /dev/null +++ b/internal/forest/namespacehrq.go @@ -0,0 +1,270 @@ +package forest + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "sigs.k8s.io/hierarchical-namespaces/internal/hrq/utils" +) + +type quotas struct { + // limits stores the resource limits specified by the HRQs in this namespace + limits limits + + // used stores the resource usage in the subtree rooted in this namespace + used usage +} + +// limits maps from the name of the HRQ to the limits it specifies +type limits map[string]v1.ResourceList + +// usage stores local and subtree resource usages of a namespace. +type usage struct { + // local stores resource usages in the namespace. The resource types in `local` + // is a union of types specified in quotas.limits of the namespace and its ancestors. + local v1.ResourceList + + // subtree stores the aggregate resource usages in the namespace and its descendants. + // The resource types in `subtree` are the types specified in `local` usages + // of the namespace and its descendants. + // + // We need to keep track of all types in `local` usages so that when adding a + // new limit for a type that exists only in HierarchicalResourceQuota objects + // in descendants of the current namespace, we still have aggregate resource + // usage for the type. + subtree v1.ResourceList +} + +// TryUseResources checks resource limits in the namespace and its ancestors +// when given proposed absolute (not delta) resource usages in the namespace. If +// the proposed resource usages are the same as the current usages, of course +// it's allowed and we do nothing. If there are any changes in the usages, we +// only check to see if the proposed changing usages are allowed. If any of them +// exceed resource limits, it returns an error; otherwise, it returns nil. +// Callers of this method are responsible for updating resource usage status of +// the HierarchicalResourceQuota objects. +// +// If the proposed resource usages changes are allowed, the method also updates +// in-memory local resource usages of the current namespace and the subtree +// resource usages of its ancestors (including itself). +// +// TryUseResources is called by the HRQ admission controller to decide if a ResourceQuota.Status +// update issued by the K8s ResourceQuota admission controller is allowed. Since UseResources() +// modifies resource usages in the in-memory forest, the forest lock should be held while calling +// the method. +// +// Normally, admission controllers shouldn't assume that if they allow a change, that this change +// will actually be performed, since another admission controller can be called later and deny it. +// Uniquely for Resource Quotas, this isn't true - the K8s apiserver only attempts to update the RQ +// status when _all_ other admission controllers have passed and the resources are about to be +// consumed. In rare cases, of course, the resources may not be consumed (e.g. due to an error in +// etcd) but the apiserver runs a cleanup process that occasionally syncs up actual usage with the +// usage recorded in RQs. When the RQs are changed, we'll be updated too. +// +// Based on observations, the K8s ResourceQuota admission controller is called only +// when a resource is consumed, not when a resource is released. Therefore, in most cases, +// the proposed resource usages that the HRQ admission controller received should +// be larger than in-memory resource usages. +// +// The proposed usage can be smaller when in-memory resource usages are out of sync +// with ResourceQuota.Status.Used. In this case, TryUseResources will still update +// in-memory usages. +// +// For example, when a user deletes a pod that consumes +// 100m cpu and immediately creates another pod that consumes 1m cpu in a namespace, +// the HRQ ResourceQuota reconciler will be triggered by ResourceQuota.Status changes twice: +// 1) when pod is deleted. +// 2) when pod is created. +// The HRQ ResourceQuota reconciler will compare `local` with ResourceQuota.Status.Used +// and will update `local` (and `subtree` in namespace and its ancestors) if it +// sees `local` is different from ResourceQuota.Status.Used. +// +// The HRQ admission coontroller will be triggered once when the pod is created. +// If the HRQ admission coontroller locks the in-memory forest before step 1) +// of the HRQ ResourceQuota reconciler, the HRQ admission controller will see proposed +// usages are smaller than in-memory usages and will update in-memory usages. +// When ResourceQuota reconciler receives the lock later, it will notice the +// ResourceQuota.Status.Used is the same as in-memory usages and will not +// update in-memory usages. +func (n *Namespace) TryUseResources(rl v1.ResourceList) error { + // The proposed resource usages are allowed if they are the same as current + // resource usages in the namespace. + if utils.Equals(rl, n.quotas.used.local) { + return nil + } + + if err := n.canUseResources(rl); err != nil { + // At least one of the proposed usage exceeds resource limits. + return err + } + + // At this point we are confident that no proposed resource usage exceeds + // resource limits because the forest lock is held by the caller of this method. + n.UseResources(rl) + return nil +} + +// canUseResources checks if subtree resource usages exceed resource limits +// in the namespace and its ancestors if proposed resource usages were consumed. +// The method returns an error if any *changing* subtree resource usages exceed +// the corresponding limits; otherwise, it returns nil. Note: if there's no +// *change* on a subtree resource usage and it already exceeds limits, we will +// ignore it because we don't want to block other valid resource usages. +func (n *Namespace) canUseResources(u v1.ResourceList) error { + // For each resource, delta = proposed usage - current usage. + delta := utils.Subtract(u, n.quotas.used.local) + delta = utils.OmitZeroQuantity(delta) + + for _, nsnm := range n.AncestryNames() { + ns := n.forest.Get(nsnm) + r := utils.Copy(ns.quotas.used.subtree) + r = utils.AddIfExists(delta, r) + allowed, nm, exceeded := checkLimits(ns.quotas.limits, r) + if !allowed { + // Construct the error message similar to the RQ exceeded quota error message - + // "exceeded quota: gke-hc-hrq, requested: configmaps=1, used: configmaps=2, limited: configmaps=2" + msg := fmt.Sprintf("exceeded hierarchical quota in namespace %q: %q", ns.name, nm) + for _, er := range exceeded { + rnm := er.String() + // Get the requested, used, limited quantity of the exceeded resource. + rq := delta[er] + uq := ns.quotas.used.subtree[er] + lq := ns.quotas.limits[nm][er] + msg += fmt.Sprintf(", requested: %s=%v, used: %s=%v, limited: %s=%v", + rnm, &rq, rnm, &uq, rnm, &lq) + } + return fmt.Errorf(msg) + } + } + + return nil +} + +// UseResources sets the absolute resource usage in this namespace, and should +// be called when we're being informed of a new set of resource usage. It also +// updates the subtree usage in this namespace and all its ancestors. +// +// The callers will typically then enqueue all ancestor HRQs to update their +// usages with apiserver. +// +// UseResources can be called in the following scenarios: +// - Called by the HRQ admission controller when a request is allowed +// - Called by the HRQ ResourceQuota reconciler when it observes `local` +// usages are different from ResourceQuota.Status.Used +// - Called by the HRQ Namespace reconciler to remove `local` usages of a +// namespace from the subtree usages of the previous ancestors of the namespace. +func (n *Namespace) UseResources(newUsage v1.ResourceList) { + oldUsage := n.quotas.used.local + // We only consider limited usages. + newUsage = utils.CleanupUnneeded(newUsage, n.Limits()) + // Early exit if there's no usages change. It's safe because the forest would + // remain unchanged and the caller would always enqueue all ancestor HRQs. + if utils.Equals(oldUsage, newUsage) { + return + } + n.quotas.used.local = newUsage + + // Determine the delta in resource usage as this now needs to be applied to each ancestor. + delta := utils.Subtract(newUsage, oldUsage) + + // Update subtree usages in the ancestors (including itself). The incremental + // change here is safe because there's a goroutine periodically calculating + // subtree usages from-scratch to make sure the forest is not out-of-sync. If + // all goes well, the periodic sync isn't needed - it's *purely* there in case + // there's a bug. + for _, nsnm := range n.AncestryNames() { + ns := n.forest.Get(nsnm) + + // Get the new subtree usage and remove no longer limited usages. + newSubUsg := utils.Add(delta, ns.quotas.used.subtree) + ns.UpdateSubtreeUsages(newSubUsg) + } +} + +// checkLimits checks if resource usages exceed resource limits specified in +// HierarchicalResourceQuota objects of a namespace. If resource usages exceed +// resource limits in a HierarchicalResourceQuota object, it will return false, the +// name of the HierarchicalResourceQuota Object that defines the resource limit, and +// the name(s) of the resource(s) that exceed the limits; otherwise, it will return +// true. +func checkLimits(l limits, u v1.ResourceList) (bool, string, []v1.ResourceName) { + for nm, rl := range l { + if allowed, exceeded := utils.LessThanOrEqual(u, rl); !allowed { + return allowed, nm, exceeded + } + } + return true, "", nil +} + +// HRQNames returns the names of every HRQ object in this namespace +func (n *Namespace) HRQNames() []string { + names := []string{} + for nm := range n.quotas.limits { + names = append(names, nm) + } + return names +} + +// Limits returns limits limits specified in quotas.limits of the current namespace and +// its ancestors. If there are more than one limits for a resource type, the +// most strictest limit will be returned. +func (n *Namespace) Limits() v1.ResourceList { + rs := v1.ResourceList{} + for _, nsnm := range n.AncestryNames() { + ns := n.forest.Get(nsnm) + for _, l := range ns.quotas.limits { + rs = utils.Min(rs, l) + } + } + + return rs +} + +// GetLocalUsages returns a copy of local resource usages. +func (n *Namespace) GetLocalUsages() v1.ResourceList { + u := n.quotas.used.local.DeepCopy() + return u +} + +// GetSubtreeUsages returns a copy of subtree resource usages. +func (n *Namespace) GetSubtreeUsages() v1.ResourceList { + u := n.quotas.used.subtree.DeepCopy() + return u +} + +// UpdateSubtreeUsages sets the subtree resource usages. +func (n *Namespace) UpdateSubtreeUsages(rl v1.ResourceList) { + n.quotas.used.subtree = utils.CleanupUnneeded(rl, n.Limits()) +} + +// RemoveLimits removes limits specified by the HierarchicalResourceQuota object +// of the given name. +func (n *Namespace) RemoveLimits(nm string) { + delete(n.quotas.limits, nm) +} + +// UpdateLimits updates in-memory limits of the HierarchicalResourceQuota +// object of the given name. Returns true if there's a difference. +func (n *Namespace) UpdateLimits(nm string, l v1.ResourceList) bool { + if n.quotas.limits == nil { + n.quotas.limits = limits{} + } + if utils.Equals(n.quotas.limits[nm], l) { + return false + } + n.quotas.limits[nm] = l + return true +} + +// DeleteUsages removes the local usages of a namespace. The usages are also +// removed from the subtree usages of the ancestors of the namespace. The caller +// should enqueue the ancestor HRQs to update the usages. +func (n *Namespace) DeleteUsages() { + local := v1.ResourceList{} + for r := range n.quotas.used.local { + local[r] = resource.MustParse("0") + } + n.UseResources(local) +} diff --git a/internal/forest/namespacehrq_test.go b/internal/forest/namespacehrq_test.go new file mode 100644 index 000000000..42ad1d551 --- /dev/null +++ b/internal/forest/namespacehrq_test.go @@ -0,0 +1,743 @@ +package forest + +import ( + "fmt" + "reflect" + "sort" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "sigs.k8s.io/hierarchical-namespaces/internal/hrq/utils" +) + +// testNS represents a namespace and how it looks in the forest, with ancestors, +// one aggregate hrq, etc. +type testNS struct { + nm string + // There may be multiple ancestors. The names are divided by a space. + ancs string + // There is at most one hrq in each namespace in this test. Omitting the field + // means there's no hrq in the namespace. + limits string + local string + subtree string +} + +// testReq represents a request with namespace name and resource usages. Note: +// the usages are absolute usage numbers. +type testReq struct { + ns string + use string +} + +// usages maps a namespace name to the resource usages in string, +// e.g. "foo": "secrets 1", "bar": "secrets 2 pods 1". +type usages map[string]string + +// hrqs maps an hrq name to the resource limits in string, +// e.g. "H1": "secrets 1", "H2": "secrets 2 pods 1". +type hrqs map[string]string + +// wantError represents an error result. +type wantError struct { + // The namespace that contains the HierarchicalResourceQuota + // object whose limits are exceeded. + ns string + // The name of a HierarchicalResourceQuota object. + nm string + // Resources that exceed HRQ limits, with requested, used, limited quota. + exceeded []exceeded +} + +// exceeded represents one exceeded resource with requested, used, limited quota. +// Please note "exceeded" here means exceeding HRQ, not regular RQ. The quota +// string must be in the form that can be parsed by resource.MustParse, e.g. +// "5", "100m", etc. +type exceeded struct { + name string + // requested is the additional quantity to use in the namespace, which resourceEquals + // to (proposed usage - local usage). + requested string + // used is the quantity used for HRQ, not a local usage. + used string + // limited is the quota limited by HRQ. + limited string +} + +func TestTryUseResources(t *testing.T) { + tests := []struct { + name string + setup []testNS + req testReq + expected usages + error []wantError + }{{ + name: "allow nil change when under limit in single namespace", + setup: []testNS{{nm: "foo", limits: "secrets 3", local: "secrets 2", subtree: "secrets 2"}}, + req: testReq{ns: "foo", use: "secrets 2"}, + expected: usages{"foo": "secrets 2"}, + }, { + name: "allow increase in child when under limit in both parent and child", + setup: []testNS{ + {nm: "foo", limits: "secrets 3", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100", local: "secrets 1", subtree: "secrets 1"}, + }, + req: testReq{ns: "bar", use: "secrets 2"}, + expected: usages{"foo": "secrets 3", "bar": "secrets 2"}, + }, { + name: "allow increase in child when under limit in both parent and child, while not affecting other resource usages", + setup: []testNS{ + {nm: "foo", limits: "secrets 3", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100 pods 2", local: "secrets 1 pods 1", subtree: "secrets 1 pods 1"}, + }, + req: testReq{ns: "bar", use: "secrets 2 pods 1"}, + expected: usages{"foo": "secrets 3", "bar": "secrets 2 pods 1"}, + }, { + name: "allow underused resources to increase even though other resources are over their limits", + setup: []testNS{ + {nm: "foo", limits: "secrets 3", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100 pods 2", local: "secrets 1 pods 4", subtree: "secrets 1 pods 4"}, + }, + req: testReq{ns: "bar", use: "secrets 2 pods 4"}, + expected: usages{"foo": "secrets 3", "bar": "secrets 2 pods 4"}, + }, { + name: "deny increase when it would exceed limit in parent but not child", + setup: []testNS{ + {nm: "foo", limits: "secrets 2", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100", local: "secrets 1", subtree: "secrets 1"}, + }, + req: testReq{ns: "bar", use: "secrets 2"}, + error: []wantError{{ns: "foo", nm: "fooHrq", exceeded: []exceeded{{name: "secrets", requested: "1", used: "2", limited: "2"}}}}, + // Usages should not be updated. + expected: usages{"foo": "secrets 2", "bar": "secrets 1"}, + }, { + name: "deny increase in sibling when it would exceed limit in parent but not in children", + setup: []testNS{ + {nm: "foo", limits: "secrets 2", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100", local: "secrets 1", subtree: "secrets 1"}, + {nm: "baz", ancs: "foo"}, + }, + req: testReq{ns: "baz", use: "secrets 1"}, + error: []wantError{{ns: "foo", nm: "fooHrq", exceeded: []exceeded{{name: "secrets", requested: "1", used: "2", limited: "2"}}}}, + // Usages should not be updated. Note "bar" is not here because the expected + // usages here are for the request namespace and its ancestors. + expected: usages{"foo": "secrets 2"}, + }, { + name: "deny increase when it would exceed limit in parent but not child, while not affecting other resource usages", + setup: []testNS{ + {nm: "foo", limits: "secrets 2", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100 pods 2", local: "secrets 1 pods 1", subtree: "secrets 1 pods 1"}, + }, + req: testReq{ns: "bar", use: "secrets 2 pods 1"}, + error: []wantError{{ns: "foo", nm: "fooHrq", exceeded: []exceeded{{name: "secrets", requested: "1", used: "2", limited: "2"}}}}, + // Usages should not be updated. + expected: usages{"foo": "secrets 2", "bar": "secrets 1 pods 1"}, + }, { + name: "allow decrease under limit in both parent and child", + setup: []testNS{ + {nm: "foo", limits: "secrets 3", local: "secrets 1", subtree: "secrets 3"}, + {nm: "bar", ancs: "foo", limits: "secrets 100", local: "secrets 2", subtree: "secrets 2"}, + }, + req: testReq{ns: "bar", use: "secrets 1"}, + expected: usages{"foo": "secrets 2", "bar": "secrets 1"}, + }, { + name: "allow decrease under limit in both parent and child, while not affecting other resource usages", + setup: []testNS{ + {nm: "foo", limits: "secrets 3", local: "secrets 1", subtree: "secrets 3"}, + {nm: "bar", ancs: "foo", limits: "secrets 100 pods 2", local: "secrets 2 pods 1", subtree: "secrets 2 pods 1"}, + }, + req: testReq{ns: "bar", use: "secrets 1 pods 1"}, + expected: usages{"foo": "secrets 2", "bar": "secrets 1 pods 1"}, + }, { + // Note: we are expecting an error here because this is a unit test for + // the `TryUseResources` function. In reality, negative resource usage + // changes won't get an error since K8s resource quota admission + // controller is not called, thus neither our webhook nor this function + // will be called to prevent users from deleting exceeded resources. + name: "deny decrease if it still exceeds limits", + setup: []testNS{ + {nm: "foo", limits: "secrets 2", local: "secrets 1", subtree: "secrets 5"}, + {nm: "bar", ancs: "foo", limits: "secrets 100", local: "secrets 4", subtree: "secrets 4"}, + }, + req: testReq{ns: "bar", use: "secrets 2"}, + // In reality it won't show negative number since neither K8s rq validator + // nor our rq validator will be called to output this message. + error: []wantError{{ns: "foo", nm: "fooHrq", exceeded: []exceeded{{name: "secrets", requested: "-2", used: "5", limited: "2"}}}}, + // Usages should not be updated. + expected: usages{"foo": "secrets 5", "bar": "secrets 4"}, + }, { + // Note: we are expecting an error here for the same reason in the above test. + name: "deny decrease if it still exceeds limits, while not affecting other resource usages", + setup: []testNS{ + {nm: "foo", limits: "secrets 2", local: "secrets 1", subtree: "secrets 5"}, + {nm: "bar", ancs: "foo", limits: "secrets 100 pods 2", local: "secrets 4 pods 1", subtree: "secrets 4 pods 1"}, + }, + req: testReq{ns: "bar", use: "secrets 2 pods 1"}, + // In reality it won't show negative number since neither K8s rq validator + // nor our rq validator will be called to output this message. + error: []wantError{{ns: "foo", nm: "fooHrq", exceeded: []exceeded{{name: "secrets", requested: "-2", used: "5", limited: "2"}}}}, + // Usages should not be updated. + expected: usages{"foo": "secrets 5", "bar": "secrets 4 pods 1"}, + }} + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + forest := buildForest(t, tc.setup) + n := forest.Get(tc.req.ns) + gotError := n.TryUseResources(stringToResourceList(t, tc.req.use)) + errMsg := checkError(gotError, tc.error) + if errMsg != "" { + t.Error(errMsg) + } + msg := checkUsages(t, n, tc.expected) + if msg != "" { + t.Error(msg) + } + }) + } +} + +func TestCanUseResource(t *testing.T) { + tests := []struct { + name string + setup []testNS + req testReq + want []wantError + }{{ + // `req.use` (i.e., ResourceQuota.Status.Used updates) is nil in this case + // because when there is no parent and no limit, the corresponding + // ResourceQuota object should not have any hard limit in ResourceQuota.Hard; + // therefore, there is no ResourceQuota.Status.Used updates. + name: "allow nil change when no parent no limit", + setup: []testNS{{nm: "foo"}}, + req: testReq{ns: "foo"}, + }, { + name: "allow change when no parent no limit", + setup: []testNS{{nm: "foo"}}, + req: testReq{ns: "foo", use: "secrets 2"}, + }, { + name: "allow change under limit when no parent", + setup: []testNS{{nm: "foo", limits: "secrets 3", local: "secrets 1", subtree: "secrets 1"}}, + req: testReq{ns: "foo", use: "secrets 1"}, + }, { + name: "deny increase exceeding local limit when no parent", + setup: []testNS{{nm: "foo", limits: "secrets 3", local: "secrets 1", subtree: "secrets 1"}}, + req: testReq{ns: "foo", use: "secrets 4"}, + want: []wantError{{ns: "foo", nm: "fooHrq", exceeded: []exceeded{{name: "secrets", requested: "3", used: "1", limited: "3"}}}}, + }, { + name: "deny increase exceeding parent limit", + setup: []testNS{ + {nm: "foo", limits: "secrets 2", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100", local: "secrets 1", subtree: "secrets 1"}, + }, + req: testReq{ns: "bar", use: "secrets 3"}, + want: []wantError{{ns: "foo", nm: "fooHrq", exceeded: []exceeded{{name: "secrets", requested: "2", used: "2", limited: "2"}}}}, + }, { + name: "deny increase in sibling exceeding parent limit", + setup: []testNS{ + {nm: "foo", limits: "secrets 2", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100", local: "secrets 1", subtree: "secrets 1"}, + {nm: "baz", ancs: "foo"}, + }, + req: testReq{ns: "baz", use: "secrets 1"}, + want: []wantError{{ns: "foo", nm: "fooHrq", exceeded: []exceeded{{name: "secrets", requested: "1", used: "2", limited: "2"}}}}, + }, { + name: "deny increase exceeding parent limit, while not affecting other resource usages", + setup: []testNS{ + {nm: "foo", limits: "secrets 2", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100 pods 2", local: "secrets 1 pods 1", subtree: "secrets 1 pods 1"}, + }, + req: testReq{ns: "bar", use: "secrets 3 pods 1"}, + want: []wantError{{ns: "foo", nm: "fooHrq", exceeded: []exceeded{{name: "secrets", requested: "2", used: "2", limited: "2"}}}}, + }, { + // Note: we are expecting an error here because this is a unit test for + // the `CanUseResources` function. In reality, negative resource usage + // changes won't get an error since K8s resource quota admission + // controller is not called, thus neither our webhook nor this function + // will be called to prevent users from deleting exceeded resources. + name: "deny decrease exceeding local limit when has parent", + setup: []testNS{ + {nm: "foo", limits: "secrets 2", local: "secrets 1", subtree: "secrets 1"}, + {nm: "bar", ancs: "foo", limits: "pods 2", local: "pods 4", subtree: "pods 4"}, + }, + req: testReq{ns: "bar", use: "pods 3"}, + // In reality it won't show negative number since neither K8s rq validator + // nor our rq validator will be called to output this message. + want: []wantError{{ns: "bar", nm: "barHrq", exceeded: []exceeded{{name: "pods", requested: "-1", used: "4", limited: "2"}}}}, + }, { + // Note: we are expecting an error here for the same reason in the above test. + name: "deny decrease exceeding local limit when has parent, while not affecting other resource usages", + setup: []testNS{ + {nm: "foo", limits: "secrets 2", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100 pods 2", local: "secrets 1 pods 4", subtree: "secrets 1 pods 4"}, + }, + req: testReq{ns: "bar", use: "secrets 1 pods 3"}, + // In reality it won't show negative number since neither K8s rq validator + // nor our rq validator will be called to output this message. + want: []wantError{{ns: "bar", nm: "barHrq", exceeded: []exceeded{{name: "pods", requested: "-1", used: "4", limited: "2"}}}}, + }, { + // Note: we are expecting an error here for the same reason in the above test. + name: "deny decrease exceeding parent limit when has parent", + setup: []testNS{ + {nm: "foo", limits: "secrets 2", local: "secrets 1", subtree: "secrets 5"}, + {nm: "bar", ancs: "foo", limits: "secrets 100", local: "secrets 4", subtree: "secrets 4"}, + }, + req: testReq{ns: "bar", use: "secrets 3"}, + // In reality it won't show negative number since neither K8s rq validator + // nor our rq validator will be called to output this message. + want: []wantError{{ns: "foo", nm: "fooHrq", exceeded: []exceeded{{name: "secrets", requested: "-1", used: "5", limited: "2"}}}}, + }, { + // Note: we are expecting an error here for the same reason in the above test. + name: "deny decrease exceeding parent limit when has parent, while not affecting other resource usages", + setup: []testNS{ + {nm: "foo", limits: "secrets 2", local: "secrets 1", subtree: "secrets 5"}, + {nm: "bar", ancs: "foo", limits: "secrets 100 pods 2", local: "secrets 4 pods 2", subtree: "secrets 4 pods 2"}, + }, + req: testReq{ns: "bar", use: "secrets 3 pods 2"}, + // In reality it won't show negative number since neither K8s rq validator + // nor our rq validator will be called to output this message. + want: []wantError{{ns: "foo", nm: "fooHrq", exceeded: []exceeded{{name: "secrets", requested: "-1", used: "5", limited: "2"}}}}, + }, { + name: "deny multiple resources exceeding limits", + setup: []testNS{ + {nm: "foo", limits: "secrets 2", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100 pods 2", local: "secrets 1 pods 1", subtree: "secrets 1 pods 1"}, + }, + req: testReq{ns: "bar", use: "secrets 3 pods 4"}, + want: []wantError{ + {ns: "foo", nm: "fooHrq", exceeded: []exceeded{{name: "secrets", requested: "2", used: "2", limited: "2"}}}, + {ns: "bar", nm: "barHrq", exceeded: []exceeded{{name: "pods", requested: "3", used: "1", limited: "2"}}}, + }, + }} + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + f := buildForest(t, tc.setup) + got := f.Get(tc.req.ns).canUseResources(stringToResourceList(t, tc.req.use)) + errMsg := checkError(got, tc.want) + if errMsg != "" { + t.Error(errMsg) + } + }) + } +} + +func TestUseResource(t *testing.T) { + tests := []struct { + name string + setup []testNS + req testReq + expected usages + }{{ + // `req.use` (i.e., ResourceQuota.Status.Used updates) is nil in this case + // because when there is no parent and no limit, the corresponding + // ResourceQuota object should not have any hard limit in ResourceQuota.Hard; + // therefore there is no ResourceQuota.Status.Used updates. + name: "nil change when no parent no limit", + setup: []testNS{{nm: "foo"}}, + req: testReq{ns: "foo"}, + }, { + name: "increase when no parent", + setup: []testNS{{nm: "foo", limits: "secrets 3", local: "secrets 1", subtree: "secrets 1"}}, + req: testReq{ns: "foo", use: "secrets 2"}, + expected: usages{"foo": "secrets 2"}, + }, { + name: "increase in child when has parent", + setup: []testNS{ + {nm: "foo", limits: "secrets 3", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100", local: "secrets 1", subtree: "secrets 1"}, + }, + req: testReq{ns: "bar", use: "secrets 2"}, + expected: usages{"foo": "secrets 3", "bar": "secrets 2"}, + }, { + name: "increase in sibling when has parent", + setup: []testNS{ + {nm: "foo", limits: "secrets 3", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100", local: "secrets 1", subtree: "secrets 1"}, + {nm: "baz", ancs: "foo"}, + }, + req: testReq{ns: "baz", use: "secrets 1"}, + // Note: "bar" is not listed here because the expected usages are for + // requested namespace and its ancestors. + expected: usages{"foo": "secrets 3", "baz": "secrets 1"}, + }, { + name: "increase in child when has parent, while not affecting other resource usages", + setup: []testNS{ + {nm: "foo", limits: "secrets 3", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100 pods 2", local: "secrets 1 pods 4", subtree: "secrets 1 pods 4"}, + }, + req: testReq{ns: "bar", use: "secrets 2 pods 4"}, + expected: usages{"foo": "secrets 3", "bar": "secrets 2 pods 4"}, + }, { + name: "decrease in child when has parent", + setup: []testNS{ + {nm: "foo", limits: "secrets 3", local: "secrets 1", subtree: "secrets 3"}, + {nm: "bar", ancs: "foo", limits: "secrets 100", local: "secrets 2", subtree: "secrets 2"}, + }, + req: testReq{ns: "bar", use: "secrets 1"}, + expected: usages{"foo": "secrets 2", "bar": "secrets 1"}, + }, { + name: "decrease in child when has parent, while not affecting other resource usages", + setup: []testNS{ + {nm: "foo", limits: "secrets 3", local: "secrets 1", subtree: "secrets 3"}, + {nm: "bar", ancs: "foo", limits: "secrets 100 pods 2", local: "secrets 2 pods 1", subtree: "secrets 2 pods 1"}, + }, + req: testReq{ns: "bar", use: "secrets 1 pods 1"}, + expected: usages{"foo": "secrets 2", "bar": "secrets 1 pods 1"}, + }} + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + f := buildForest(t, tc.setup) + n := f.Get(tc.req.ns) + n.UseResources(stringToResourceList(t, tc.req.use)) + msg := checkUsages(t, n, tc.expected) + if msg != "" { + t.Error(msg) + } + }) + } +} + +func TestCheckLimits(t *testing.T) { + type wantError struct { + nm string + exceeded []corev1.ResourceName + } + tests := []struct { + name string + limit hrqs + usage string + wantError wantError + }{{ + name: "no limits no usages", + limit: hrqs{}, + usage: "", + }, { + name: "no limits has usages", + limit: hrqs{}, + usage: "secrets 1 pods 2", + }, { + name: "usage not in the limits", + limit: hrqs{ + "H1": "cpu 100m", + }, + usage: "secrets 1 pods 2", + }, { + name: "usage not exceeds limits", + limit: hrqs{"H1": "cpu 100m"}, + usage: "cpu 10m", + }, { + name: "usages exceeds limits", + limit: hrqs{"H1": "cpu 100m"}, + usage: "cpu 200m", + wantError: wantError{ + nm: "H1", + exceeded: []corev1.ResourceName{corev1.ResourceCPU}, + }, + }, { + name: "multiple usages exceed limits", + limit: hrqs{"H1": "cpu 100m secrets 10"}, + usage: "cpu 200m secrets 100", + wantError: wantError{ + nm: "H1", + exceeded: []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceSecrets}, + }, + }, { + name: "multiple limits; usages exceed one limit", + limit: hrqs{ + "H1": "cpu 100m secrets 10", + "H2": "configmaps 3", + }, + usage: "cpu 10m secrets 8 configmaps 5", + wantError: wantError{ + nm: "H2", + exceeded: []corev1.ResourceName{corev1.ResourceConfigMaps}, + }, + }} + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + limits := limits{} + for nm, l := range tc.limit { + limits[nm] = stringToResourceList(t, l) + } + allowed, nm, exceeded := checkLimits(limits, stringToResourceList(t, tc.usage)) + expectAllowed := tc.wantError.nm == "" + if allowed != expectAllowed || + nm != tc.wantError.nm || + !resourceEquals(exceeded, tc.wantError.exceeded) { + t.Errorf("%s \n"+ + "wantError:\n"+ + "allowed: %t\n"+ + "nm: %s\n"+ + "exceeded: %v\n"+ + "got:\n"+ + "allowed: %t\n"+ + "nm: %s\n"+ + "exceeded: %v\n", + tc.name, + expectAllowed, tc.wantError.nm, tc.wantError.exceeded, + allowed, nm, exceeded) + } + }) + } +} + +func TestLimits(t *testing.T) { + tests := []struct { + name string + setup []testNS + ns string + want string + }{{ + name: "no parent no limit", + setup: []testNS{{nm: "foo"}}, + ns: "foo", + want: "", + }, { + name: "no parent", + setup: []testNS{{nm: "foo", limits: "secrets 3", local: "secrets 1", subtree: "secrets 1"}}, + ns: "foo", + want: "secrets 3", + }, { + name: "has parent", + setup: []testNS{ + {nm: "foo", limits: "secrets 2", local: "secrets 1", subtree: "secrets 2"}, + {nm: "bar", ancs: "foo", limits: "secrets 100 pods 2", local: "secrets 1 pods 1", subtree: "secrets 1 pods 1"}, + }, + ns: "bar", + want: "secrets 2 pods 2", + }} + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + f := buildForest(t, tc.setup) + n := f.Get(tc.ns) + got := n.Limits() + if result := utils.Equals(stringToResourceList(t, tc.want), got); !result { + t.Errorf("%s wantError: %v, got: %v", tc.name, tc.want, got) + } + }) + } +} + +// buildForest builds a forest from a list of testNS. +func buildForest(t *testing.T, nss []testNS) *Forest { + t.Helper() + f := &Forest{namespaces: namedNamespaces{}} + // Create all namespaces in the forest, with their hrqs if there's one. + for _, ns := range nss { + if ns.nm == "" { + t.Fatal("namespace has an empty name") + } + cur := f.Get(ns.nm) + if ns.limits == "" { + continue + } + cur.quotas = quotas{ + limits: limits{ns.nm + "Hrq": stringToResourceList(t, ns.limits)}, + used: usage{ + local: stringToResourceList(t, ns.local), + subtree: stringToResourceList(t, ns.subtree), + }, + } + } + // Set ancestors from existing namespaces. + for _, ns := range nss { + cur := f.Get(ns.nm) + for _, anc := range strings.Fields(ns.ancs) { + next := f.Get(anc) + cur.SetParent(next) + cur = next + } + } + return f +} + +// checkError checks if an error is expected. It returns a message with details +// if the error is not ONE of the expected; otherwise, it returns an empty string. +func checkError(err error, wes []wantError) string { + if err == nil { + if len(wes) > 0 { + return "expects error while actual run does " + + "not return any error" + } + } else { + if len(wes) > 0 { + expected := "" + matches := false + for _, we := range wes { + expected += fmt.Sprintf("%s, or ", we) + if errorMatches(err, we) { + matches = true + } + } + if !matches { + // The output would look like this, e.g. + // error does not match + // wantError error: contains substring: foo, H1, requested: secrets=8, used: secrets=4, limited: secrets=6, requested: pods=9, used: pods=3, limited: pods=4 + // got error: exceeded hierarchical quota in namespace "foo": "H1", requested: secrets=8, used: secrets=5, limited: secrets=6, requested: pods=9, used: pods=3, limited: pods=4 + return fmt.Sprintf("error does not match\n"+ + "wantError error: contains substring: %s\n"+ + "got error: %s", + strings.TrimSuffix(expected, ", or "), err.Error()) + } + } else { + return fmt.Sprintf("expects no error while actual run "+ + "returns error: %s", err.Error()) + } + } + + return "" +} + +// errorMatches returns true if the actual error is consistent with the expected +// error result; otherwise, returns false. +func errorMatches(err error, r wantError) bool { + if !strings.Contains(err.Error(), r.nm) || + !strings.Contains(err.Error(), r.ns) { + return false + } + for _, rs := range r.exceeded { + if !strings.Contains(err.Error(), rs.String()) { + return false + } + } + return true +} + +// checkUsages checks if local resource usages of a namespace and subtree +// resource usages of the namespace and its ancestors are expected. If any of the +// resource usage is not expected, it returns false with a message with details; +// otherwise, it returns true. +func checkUsages(t *testing.T, n *Namespace, u usages) string { + t.Helper() + local := n.quotas.used.local + if !utils.Equals(local, stringToResourceList(t, u[n.name])) { + return fmt.Sprintf("want local resource usages: %v\n"+ + "got local resource usages: %v\n", + u[n.name], local) + } + + subtree := getSubtreeUsages(n) + if !usagesEquals(subtree, parseUsages(t, u)) { + return fmt.Sprintf("want subtree resource usages:\n%s"+ + "got subtree resource usages:\n%s\n", + parseUsages(t, u), + subtree) + } + + return "" +} + +// getSubtreeUsages returns subtree usages of the namespace and its ancestors. +func getSubtreeUsages(ns *Namespace) parsedUsages { + var t parsedUsages + for ns != nil { + if len(ns.quotas.used.subtree) != 0 { + if t == nil { + t = parsedUsages{} + } + t[ns.Name()] = ns.quotas.used.subtree + } + ns = ns.Parent() + } + return t +} + +// usagesEquals returns true if two usages equal; otherwise, returns false. +func usagesEquals(a parsedUsages, b parsedUsages) bool { + if len(a) != len(b) { + return false + } + + if len(a) == 0 && len(b) == 0 { + return true + } + + for n, u := range a { + o, ok := b[n] + if !ok || !utils.Equals(o, u) { + return false + } + } + for n, u := range b { + o, ok := a[n] + if !ok || !utils.Equals(o, u) { + return false + } + } + return true +} + +func parseUsages(t *testing.T, inString usages) parsedUsages { + t.Helper() + su := parsedUsages{} + for nm, s := range inString { + su[nm] = stringToResourceList(t, s) + } + return su +} + +// resourceEquals returns true if two corev1.ResourceName slices contain the +// same elements, regardless of the order. It returns false if two slices do not +// contain the same elements. +func resourceEquals(a []corev1.ResourceName, b []corev1.ResourceName) bool { + sort.Slice(a, func(i, j int) bool { + return a[i] < a[j] + }) + sort.Slice(b, func(i, j int) bool { + return b[i] < b[j] + }) + return reflect.DeepEqual(a, b) +} + +// stringToResourceList converts a string to resource list. The input string is +// a list of resource names and quantities alternatively separated by spaces, +// e.g. "secrets 2 pods 1". +func stringToResourceList(t *testing.T, s string) corev1.ResourceList { + t.Helper() + if s == "" { + return corev1.ResourceList{} + } + args := strings.Fields(s) + list := map[corev1.ResourceName]resource.Quantity{} + if len(args)%2 != 0 { + t.Error("Need even number of arguments") + } + for i := 0; i < len(args); i += 2 { + list[corev1.ResourceName(args[i])] = resource.MustParse(args[i+1]) + } + return list +} + +// parsedUsages maps a namespace name to the resource usages in the namespace. +// This type is not used in the tests, but used in the comparison functions. +type parsedUsages map[string]corev1.ResourceList + +// E.g. namespace: foo; subtree: map[secrets:{{2 0} {} DecimalSI}] +func (a parsedUsages) String() string { + s := "" + for n, u := range a { + s += fmt.Sprintf("namespace: %s; subtree: %v\n", n, u) + } + return s +} + +// For example, if the wantError is exceeding "secrets" and "pods" limits of hrq +// called "H1" in namespace "foo", it would get something like: +// foo, H1, requested: secrets=8, used: secrets=4, limited: secrets=6, requested: pods=9, used: pods=3, limited: pods=4 +func (we wantError) String() string { + msg := we.ns + ", " + we.nm + ", " + for _, e := range we.exceeded { + msg += e.String() + ", " + } + return strings.TrimSuffix(msg, ", ") +} + +// For example, if the exceeded resource is "secrets", it would look like: +// requested: secrets=8, used: secrets=5, limited: secrets=6 +func (e exceeded) String() string { + return fmt.Sprintf("requested: %s=%s, used: %s=%s, limited: %s=%s", + e.name, e.requested, e.name, e.used, e.name, e.limited) +} diff --git a/internal/hrq/hrqreconciler.go b/internal/hrq/hrqreconciler.go new file mode 100644 index 000000000..f9cb8ad69 --- /dev/null +++ b/internal/hrq/hrqreconciler.go @@ -0,0 +1,182 @@ +package hrq + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" + + api "sigs.k8s.io/hierarchical-namespaces/api/v1alpha2" + "sigs.k8s.io/hierarchical-namespaces/internal/forest" + "sigs.k8s.io/hierarchical-namespaces/internal/hrq/utils" + "sigs.k8s.io/hierarchical-namespaces/internal/logutils" +) + +// RQEnqueuer enqueues ResourceQuota objects in a namespace and its descendants. +// ResourceQuotaReconciler implements the interface so that it can be called by +// the HierarchicalResourceQuotaReconciler when HierarchicalResourceQuota objects +// change. +type RQEnqueuer interface { + // EnqueueSubtree enqueues ResourceQuota objects in a namespace and its descendants. It's used by + // the HRQ reconciler when an HRQ has been updated and all the RQs that implement it need to be + // updated too. + EnqueueSubtree(log logr.Logger, nsnm string) +} + +// HierarchicalResourceQuotaReconciler reconciles a HierarchicalResourceQuota object. It has three key +// purposes: +// 1. Update the in-memory forest with all the limits defined in the HRQ spec, so that they can be +// used during admission control to reject requests. +// 2. Write all the usages from the in-memory forest back to the HRQ status. +// 3. Enqueue all relevant RQs when an HRQ changes. +type HierarchicalResourceQuotaReconciler struct { + client.Client + Log logr.Logger + + // Forest is the in-memory data structure that is shared with all other reconcilers. + Forest *forest.Forest + // trigger is a channel of event.GenericEvent (see "Watching Channels" in + // https://book-v1.book.kubebuilder.io/beyond_basics/controller_watches.html) + // that is used to enqueue the singleton to trigger reconciliation. + trigger chan event.GenericEvent + // RQEnqueuer enqueues ResourceQuota objects when HierarchicalResourceQuota + // objects change. + RQR RQEnqueuer +} + +// +kubebuilder:rbac:groups=hnc.x-k8s.io,resources=hierarchicalresourcequotas,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=hnc.x-k8s.io,resources=hierarchicalresourcequotas/status,verbs=get;update;patch + +func (r *HierarchicalResourceQuotaReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := logutils.WithRID(r.Log).WithValues("trigger", req.NamespacedName) + + inst := &api.HierarchicalResourceQuota{} + err := r.Get(ctx, req.NamespacedName, inst) + if err != nil && !errors.IsNotFound(err) { + log.Error(err, "Couldn't read the object") + return ctrl.Result{}, err + } + + // If an object is deleted, assign the name and namespace of the request to + // the object so that they can be used to sync with the forest. + if isDeleted(inst) { + inst.ObjectMeta.Name = req.NamespacedName.Name + inst.ObjectMeta.Namespace = req.NamespacedName.Namespace + } + oldUsages := inst.Status.Used + + // Sync with forest to update the HRQ or the in-memory forest. + updatedInst, updatedForest := r.syncWithForest(inst) + + // If an object is deleted, we will not write anything back to the api server. + if !isDeleted(inst) && updatedInst { + log.V(1).Info("Updating HRQ", "oldUsages", oldUsages, "newUsages", inst.Status.Used, "limits", inst.Spec.Hard) + if err := r.Update(ctx, inst); err != nil { + log.Error(err, "Couldn't write the object") + return ctrl.Result{}, err + } + } + + // Enqueue ResourceQuota objects in the current namespace and its descendants + // if hard limits of the HRQ object are changed or first-time synced. This + // includes the case when the object is deleted and all its hard limits are + // removed as a result. + if updatedForest { + log.Info("HRQ hard limits updated", "limits", inst.Spec.Hard) + reason := fmt.Sprintf("Updated hard limits in subtree %q", inst.GetNamespace()) + r.RQR.EnqueueSubtree(log.WithValues("reason", reason), inst.GetNamespace()) + } + + return ctrl.Result{}, nil +} + +// syncWithForest syncs resource limits and resource usages with the in-memory +// forest. The first return value is true if the HRQ object is updated; the +// second return value is true if the forest is updated. +func (r *HierarchicalResourceQuotaReconciler) syncWithForest(inst *api.HierarchicalResourceQuota) (bool, bool) { + r.Forest.Lock() + defer r.Forest.Unlock() + + updatedInst := false + // Update HRQ limits if they are different in the spec and status. + if !utils.Equals(inst.Spec.Hard, inst.Status.Hard) { + inst.Status.Hard = inst.Spec.Hard + updatedInst = true + } + oldUsages := inst.Status.Used + + // Update the forest if the HRQ limits are changed or first-time synced. + updatedForest := r.syncLimits(inst) + + // Update HRQ usages if they are changed. + r.syncUsages(inst) + updatedInst = updatedInst || !utils.Equals(oldUsages, inst.Status.Used) + + return updatedInst, updatedForest +} + +// syncLimits syncs in-memory resource limits with the limits specified in the +// spec. Returns true if there's a difference. +func (r *HierarchicalResourceQuotaReconciler) syncLimits(inst *api.HierarchicalResourceQuota) bool { + ns := r.Forest.Get(inst.GetNamespace()) + if isDeleted(inst) { + ns.RemoveLimits(inst.GetName()) + return true + } + return ns.UpdateLimits(inst.GetName(), inst.Spec.Hard) +} + +// syncUsages updates resource usage status based on in-memory resource usages. +func (r *HierarchicalResourceQuotaReconciler) syncUsages(inst *api.HierarchicalResourceQuota) { + // If the object is deleted, there is no need to update its usage status. + if isDeleted(inst) { + return + } + ns := r.Forest.Get(inst.GetNamespace()) + + // Get all usage counts in this namespace + usages := ns.GetSubtreeUsages() + + // Get the names of all resource types being limited by this particular HRQ + nms := utils.ResourceNames(inst.Spec.Hard) + + // Filter the usages to only include the resource types being limited by this HRQ and write those + // usages back to the HRQ status. + inst.Status.Used = utils.Mask(usages, nms) +} + +func isDeleted(inst *api.HierarchicalResourceQuota) bool { + return inst.GetCreationTimestamp() == (metav1.Time{}) +} + +// Enqueue enqueues a specific HierarchicalResourceQuota object to trigger the reconciliation of the +// object for a given reason. This occurs in a goroutine so the caller doesn't block; since the +// reconciler is never garbage-collected, this is safe. +// +// It's called by the RQ reconciler when an RQ's status has changed, which might indicate that the +// HRQ's status (specifically its usage) needs to be changed as well. +func (r *HierarchicalResourceQuotaReconciler) Enqueue(log logr.Logger, reason, ns, nm string) { + go func() { + log.V(1).Info("Enqueuing for reconciliation", "reason", reason, "enqueuedName", nm, "enqueuedNamespace", ns) + // The watch handler doesn't care about anything except the metadata. + inst := &api.HierarchicalResourceQuota{} + inst.ObjectMeta.Name = nm + inst.ObjectMeta.Namespace = ns + r.trigger <- event.GenericEvent{Object: inst} + }() +} + +func (r *HierarchicalResourceQuotaReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.trigger = make(chan event.GenericEvent) + return ctrl.NewControllerManagedBy(mgr). + For(&api.HierarchicalResourceQuota{}). + Watches(&source.Channel{Source: r.trigger}, &handler.EnqueueRequestForObject{}). + Complete(r) +} diff --git a/internal/hrq/hrqreconciler_test.go b/internal/hrq/hrqreconciler_test.go new file mode 100644 index 000000000..3c996590e --- /dev/null +++ b/internal/hrq/hrqreconciler_test.go @@ -0,0 +1,248 @@ +package hrq_test + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + api "sigs.k8s.io/hierarchical-namespaces/api/v1alpha2" + "sigs.k8s.io/hierarchical-namespaces/internal/hrq" + . "sigs.k8s.io/hierarchical-namespaces/internal/integtest" +) + +func TestHRQReconciler(t *testing.T) { + HNCRun(t, "HRQ Suite") +} + +var _ = BeforeSuite(HNCBeforeSuite) +var _ = AfterSuite(HNCAfterSuite) + +const ( + fooHRQName = "foo-quota" + barHRQName = "bar-quota" + bazHRQName = "baz-quota" +) + +var _ = Describe("HRQ reconciler tests", func() { + ctx := context.Background() + + var ( + fooName string + barName string + bazName string + ) + + BeforeEach(func() { + fooName = CreateNS(ctx, "foo") + barName = CreateNS(ctx, "bar") + bazName = CreateNS(ctx, "baz") + + barHier := NewHierarchy(barName) + barHier.Spec.Parent = fooName + UpdateHierarchy(ctx, barHier) + + bazHier := NewHierarchy(bazName) + bazHier.Spec.Parent = fooName + UpdateHierarchy(ctx, bazHier) + + Eventually(HasChild(ctx, fooName, barName)).Should(Equal(true)) + Eventually(HasChild(ctx, fooName, bazName)).Should(Equal(true)) + }) + + AfterEach(func() { + cleanupHRQObjects(ctx) + }) + + It("should set limits in status correctly after creating an HRQ object", func() { + setHRQ(ctx, fooHRQName, fooName, "secrets", "6", "pods", "3") + + Eventually(getHRQStatus(ctx, fooName, fooHRQName)).Should(equalRL("secrets", "6", "pods", "3")) + }) + + It("should update limits in status correctly after updating limits in spec", func() { + setHRQ(ctx, fooHRQName, fooName, "secrets", "6", "pods", "3") + + Eventually(getHRQStatus(ctx, fooName, fooHRQName)).Should(equalRL("secrets", "6", "pods", "3")) + + // Change limits for secrets from 6 to 50 + setHRQ(ctx, fooHRQName, fooName, "secrets", "50", "pods", "3") + + Eventually(getHRQStatus(ctx, fooName, fooHRQName)).Should(equalRL("secrets", "50", "pods", "3")) + }) + + It("should update usages in status correctly after consuming a resource", func() { + setHRQ(ctx, fooHRQName, fooName, "secrets", "6", "pods", "3") + setHRQ(ctx, barHRQName, barName, "secrets", "100", "cpu", "50") + setHRQ(ctx, bazHRQName, bazName, "pods", "1") + // Simulate the K8s ResourceQuota controller to update usages. + updateRQUsage(ctx, fooName, "secrets", "0", "pods", "0") + updateRQUsage(ctx, barName, "secrets", "0", "cpu", "0", "pods", "0") + updateRQUsage(ctx, bazName, "secrets", "0", "pods", "0") + + // Increase secret counts from 0 to 10 in baz and verify that the usage is + // increased in foo's HRQ but not bar's (not an ancestor of baz) or baz' + // (secrets is not limited in baz' HRQ). + updateRQUsage(ctx, bazName, "secrets", "10") + Eventually(getHRQUsed(ctx, fooName, fooHRQName)).Should(equalRL("secrets", "10", "pods", "0")) + Eventually(getHRQUsed(ctx, barName, barHRQName)).Should(equalRL("secrets", "0", "cpu", "0")) + Eventually(getHRQUsed(ctx, bazName, bazHRQName)).Should(equalRL("pods", "0")) + + // Increase secret counts from 10 to 11 in baz and verify that the usage is + // increased in foo's HRQ. bar's (not an ancestor of baz) and baz' (secrets + // is not limited in baz' HRQ) remain unchanged. + updateRQUsage(ctx, bazName, "secrets", "11") + Eventually(getHRQUsed(ctx, fooName, fooHRQName)).Should(equalRL("secrets", "11", "pods", "0")) + Eventually(getHRQUsed(ctx, barName, barHRQName)).Should(equalRL("secrets", "0", "cpu", "0")) + Eventually(getHRQUsed(ctx, bazName, bazHRQName)).Should(equalRL("pods", "0")) + + // Decrease secret counts from 10 to 0 in baz and ensure all usages are gone. + updateRQUsage(ctx, bazName, "secrets", "0") + Eventually(getHRQUsed(ctx, fooName, fooHRQName)).Should(equalRL("secrets", "0", "pods", "0")) + Eventually(getHRQUsed(ctx, barName, barHRQName)).Should(equalRL("secrets", "0", "cpu", "0")) + Eventually(getHRQUsed(ctx, bazName, bazHRQName)).Should(equalRL("pods", "0")) + }) + + It("should update limits in ResourceQuota objects correctly after deleting an HRQ object", func() { + setHRQ(ctx, fooHRQName, fooName, "secrets", "6", "pods", "3") + setHRQ(ctx, barHRQName, barName, "secrets", "100", "cpu", "50") + + // The RQ in foo should equal its HRQ, while in bar it should be the intersection + Eventually(getRQSpec(ctx, fooName)).Should(equalRL("secrets", "6", "pods", "3")) + Eventually(getRQSpec(ctx, barName)).Should(equalRL("secrets", "6", "pods", "3", "cpu", "50")) + + // After deleting foo's HRQ, bar's RQ should be the same as its HRQ on its own + deleteHierarchicalResourceQuota(ctx, fooName, fooHRQName) + Eventually(getRQSpec(ctx, fooName)).Should(equalRL()) + Eventually(getRQSpec(ctx, barName)).Should(equalRL("secrets", "100", "cpu", "50")) + }) + + XIt("should recover if the subtree usages are out of sync in the forest and in reality", func() { + setHRQ(ctx, fooHRQName, fooName, "secrets", "6", "pods", "3") + setHRQ(ctx, barHRQName, barName, "secrets", "100", "cpu", "50") + // Simulate the K8s ResourceQuota controller to update usages. + updateRQUsage(ctx, fooName, "secrets", "0", "pods", "0") + updateRQUsage(ctx, barName, "secrets", "0", "cpu", "0", "pods", "0") + + // Consume 10 secrets in bar. + updateRQUsage(ctx, barName, "secrets", "10") + + // Get the expected bar local usages and foo subtree usages in the forest. + Eventually(forestGetLocalUsages(barName)).Should(equalRL("secrets", "10", "cpu", "0", "pods", "0")) + Eventually(forestGetSubtreeUsages(fooName)).Should(equalRL("secrets", "10", "pods", "0")) + + // Introduce a bug to make the foo subtree usages in the forest out-of-sync. + // 10 secrets are consumed in reality while the forest has 9. + forestSetSubtreeUsages(fooName, "secrets", "9", "pods", "0") + // Verify the subtree secret usages is updated from 10 to 9. + Eventually(forestGetSubtreeUsages(fooName)).Should(equalRL("secrets", "9", "pods", "0")) + + // Now consume 1 more secret (11 compared with previous 10). + updateRQUsage(ctx, barName, "secrets", "11") + + // We should eventually get 11 secrets subtree usages in the forest. + // Note: without the recalculation of subtree usages from scratch in the HRQ + // reconciler, we would get an incremental update to 10 according to the + // calculation in "UseResources()": 9 + (11 - 10), which is still out-of-sync. + Eventually(forestGetSubtreeUsages(fooName), HRQSyncInterval).Should(equalRL("secrets", "11", "pods", "0")) + Eventually(getHRQUsed(ctx, fooName, fooHRQName)).Should(equalRL("secrets", "11", "pods", "0")) + }) + + It("should enqueue subtree even if the newly created HRQ has the same `spec.hard` and `status.hard`", func() { + // Verify RQ singletons in the subtree are not created before the test. + Eventually(getRQSpec(ctx, barName)).Should(equalRL()) + Eventually(getRQSpec(ctx, bazName)).Should(equalRL()) + // Create an HRQ with the same `spec.hard` and `status.hard`. + setHRQwithSameStatus(ctx, fooHRQName, fooName, "secrets", "6", "pods", "3") + // Verify the subtree is enqueued to update. + Eventually(getRQSpec(ctx, barName)).Should(equalRL("secrets", "6", "pods", "3")) + Eventually(getRQSpec(ctx, bazName)).Should(equalRL("secrets", "6", "pods", "3")) + }) +}) + +func forestSetSubtreeUsages(ns string, args ...string) { + TestForest.Lock() + defer TestForest.Unlock() + TestForest.Get(ns).UpdateSubtreeUsages(argsToResourceList(0, args...)) +} + +func getHRQStatus(ctx context.Context, ns, nm string) func() v1.ResourceList { + return func() v1.ResourceList { + nsn := types.NamespacedName{Namespace: ns, Name: nm} + inst := &api.HierarchicalResourceQuota{} + if err := K8sClient.Get(ctx, nsn, inst); err != nil { + return nil + } + return inst.Status.Hard + } +} + +func getHRQUsed(ctx context.Context, ns, nm string) func() v1.ResourceList { + return func() v1.ResourceList { + nsn := types.NamespacedName{Namespace: ns, Name: nm} + inst := &api.HierarchicalResourceQuota{} + if err := K8sClient.Get(ctx, nsn, inst); err != nil { + return nil + } + return inst.Status.Used + } +} + +func getRQSpec(ctx context.Context, ns string) func() v1.ResourceList { + return func() v1.ResourceList { + nsn := types.NamespacedName{Namespace: ns, Name: hrq.ResourceQuotaSingleton} + inst := &v1.ResourceQuota{} + if err := K8sClient.Get(ctx, nsn, inst); err != nil { + return nil + } + return inst.Spec.Hard + } +} + +func deleteHierarchicalResourceQuota(ctx context.Context, ns, nm string) { + inst := &api.HierarchicalResourceQuota{} + inst.SetNamespace(ns) + inst.SetName(nm) + EventuallyWithOffset(1, func() error { + return K8sClient.Delete(ctx, inst) + }).Should(Succeed()) +} + +// setHRQwithSameStatus creates or replaces an existing HRQ with the given +// resource limits. Any existing spec and status are replaced by this function. +func setHRQwithSameStatus(ctx context.Context, nm, ns string, args ...string) { + nsn := types.NamespacedName{Namespace: ns, Name: nm} + hrq := &api.HierarchicalResourceQuota{ + ObjectMeta: metav1.ObjectMeta{ + Name: nm, + Namespace: ns, + }, + } + EventuallyWithOffset(1, func() error { + err := K8sClient.Get(ctx, nsn, hrq) + if errors.IsNotFound(err) { + return nil + } + return err + }).Should(Succeed(), "While checking if HRQ %s/%s already exists", ns, nm) + + hrq.Spec.Hard = argsToResourceList(1, args...) + hrq.Status.Hard = argsToResourceList(1, args...) + + EventuallyWithOffset(1, func() error { + if hrq.CreationTimestamp.IsZero() { + err := K8sClient.Create(ctx, hrq) + if err == nil { + createdHRQs = append(createdHRQs, hrq) + } + return err + } else { + return K8sClient.Update(ctx, hrq) + } + }).Should(Succeed(), "While updating HRQ; %+v", hrq) +} diff --git a/internal/hrq/hrqvalidator.go b/internal/hrq/hrqvalidator.go new file mode 100644 index 000000000..4447d0a68 --- /dev/null +++ b/internal/hrq/hrqvalidator.go @@ -0,0 +1,128 @@ +package hrq + +import ( + "context" + "fmt" + "math/rand" + "strings" + + "github.com/go-logr/logr" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + api "sigs.k8s.io/hierarchical-namespaces/api/v1alpha2" + "sigs.k8s.io/hierarchical-namespaces/internal/logutils" +) + +const ( + // HRQServingPath is where the validator will run. Must be kept in sync with the + // kubebuilder markers below. + HRQServingPath = "/validate-hnc-x-k8s-io-v1alpha2-hrq" + + // validatingResourceQuotaPrefix is the prefix of the name of the resource + // quota that is created in the dry-run to validate the resource types. + validatingResourceQuotaPrefix = "hnc-x-k8s-io-hrq-validating-rq-" + + // fieldInfo is the field that has invalid resource types. + fieldInfo = ".spec.hard" +) + +// Note: the validating webhook FAILS CLOSE. This means that if the webhook goes +// down, no further changes are allowed. +// +// +kubebuilder:webhook:admissionReviewVersions=v1;v1beta1,path=/validate-hnc-x-k8s-io-v1alpha2-hrq,mutating=false,failurePolicy=fail,groups="hnc.x-k8s.io",resources=hierarchicalresourcequotas,sideEffects=None,verbs=create;update,versions=v1alpha2,name=hrq.hnc.x-k8s.io + +type HRQ struct { + server serverClient + Log logr.Logger + decoder *admission.Decoder +} + +// serverClient represents the checks that should typically be performed against +// the apiserver, but need to be stubbed out during unit testing. +type serverClient interface { + // validate validates if the resource types in the RQ are valid quota types. + validate(ctx context.Context, inst *v1.ResourceQuota) error +} + +func (v *HRQ) Handle(ctx context.Context, req admission.Request) admission.Response { + log := logutils.WithRID(v.Log).WithValues("nm", req.Name, "nnm", req.Namespace) + + inst := &api.HierarchicalResourceQuota{} + if err := v.decoder.Decode(req, inst); err != nil { + log.Error(err, "Couldn't decode request") + return deny(metav1.StatusReasonBadRequest, err.Error()) + } + + resp := v.handle(ctx, log, inst) + if resp.Allowed { + log.V(1).Info("Allowed", "message", resp.Result.Message) + } else { + log.Info("Denied", "code", resp.Result.Code, "reason", resp.Result.Reason, "message", resp.Result.Message) + } + return resp +} + +// Validate if the resources in the spec are valid resource types for quota. We +// will dry-run writing the HRQ resources to an RQ to see if we get an error +// from the apiserver or not. If yes, we will deny the HRQ request. +func (v *HRQ) handle(ctx context.Context, log logr.Logger, inst *api.HierarchicalResourceQuota) admission.Response { + rq := &v1.ResourceQuota{} + rq.Namespace = inst.Namespace + rq.Name = createRQName() + rq.Spec.Hard = inst.Spec.Hard + log.V(1).Info("Validating resource types in the HRQ spec by writing them to a resource quota on apiserver", "limits", inst.Spec.Hard) + if err := v.server.validate(ctx, rq); err != nil { + return denyInvalidField(fieldInfo, ignoreRQErr(err.Error())) + } + + return allow("") +} + +func createRQName() string { + suffix := make([]byte, 10) + rand.Read(suffix) + return fmt.Sprintf("%s%x", validatingResourceQuotaPrefix, suffix) +} + +// ignoreRQErr ignores the error message on the resource quota object and only +// keeps the field info, e.g. "[spec.hard[roles]: Invalid value ..." instead of +// "ResourceQuota "xxx" is invalid: [spec.hard[roles]: Invalid value ...". +func ignoreRQErr(err string) string { + return strings.TrimPrefix(err, strings.Split(err, ":")[0]+": ") +} + +// realClient implements serverClient, and is not used during unit tests. +type realClient struct { + client client.Client +} + +// validate creates a resource quota on the apiserver if it does not exist. +// Otherwise, it updates existing one on the apiserver. Since our real client is +// a dry-run client, this operation will be a dry-run too. +func (c *realClient) validate(ctx context.Context, inst *v1.ResourceQuota) error { + if inst.CreationTimestamp.IsZero() { + if err := c.client.Create(ctx, inst); err != nil { + return err + } + return nil + } + + if err := c.client.Update(ctx, inst); err != nil { + return err + } + return nil +} + +func (v *HRQ) InjectClient(c client.Client) error { + // Create a dry-run client. + v.server = &realClient{client: client.NewDryRunClient(c)} + return nil +} + +func (r *HRQ) InjectDecoder(d *admission.Decoder) error { + r.decoder = d + return nil +} diff --git a/internal/hrq/hrqvalidator_test.go b/internal/hrq/hrqvalidator_test.go new file mode 100644 index 000000000..b74a93515 --- /dev/null +++ b/internal/hrq/hrqvalidator_test.go @@ -0,0 +1,63 @@ +package hrq + +import ( + "context" + "errors" + "testing" + + v1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + api "sigs.k8s.io/hierarchical-namespaces/api/v1alpha2" +) + +func TestHRQSpec(t *testing.T) { + l := zap.New() + tests := []struct { + name string + hrqLimit []string + fail bool + msg string + }{ + { + name: "allow one standard resource", + hrqLimit: []string{"configmaps", "1"}, + }, { + name: "allow multiple standard resources", + hrqLimit: []string{"configmaps", "1", "secrets", "1"}, + }, { + name: "deny non-standard resource", + hrqLimit: []string{"foobar", "1"}, + fail: true, + msg: "'foobar' is not a valid quota type", + }, { + name: "deny a mix of standard and non-standard resources", + hrqLimit: []string{"foobar", "1", "configmaps", "1"}, + fail: true, + msg: "'foobar' is not a valid quota type", + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + hrq := &api.HierarchicalResourceQuota{} + hrq.Spec.Hard = argsToResourceList(tc.hrqLimit...) + v := HRQ{server: fakeServer("")} + got := v.handle(context.Background(), l, hrq) + if got.AdmissionResponse.Allowed == tc.fail || got.Result.Message != tc.msg { + t.Errorf("unexpected admission response. Expected: %t, %s; Got: %t, %s", !tc.fail, tc.msg, got.AdmissionResponse.Allowed, got.Result.Message) + } + }) + } +} + +// fakeServer implements serverClient. +type fakeServer string + +// validate returns an error if the RQ has invalid quota types, "foobar" +// specifically in this test. Otherwise, return no error. +func (f fakeServer) validate(ctx context.Context, inst *v1.ResourceQuota) error { + if _, ok := inst.Spec.Hard["foobar"]; ok { + return errors.New("'foobar' is not a valid quota type") + } + return nil +} diff --git a/internal/hrq/rqreconciler.go b/internal/hrq/rqreconciler.go new file mode 100644 index 000000000..6cfceea69 --- /dev/null +++ b/internal/hrq/rqreconciler.go @@ -0,0 +1,296 @@ +package hrq + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" + + api "sigs.k8s.io/hierarchical-namespaces/api/v1alpha2" + "sigs.k8s.io/hierarchical-namespaces/internal/forest" + "sigs.k8s.io/hierarchical-namespaces/internal/hrq/utils" + "sigs.k8s.io/hierarchical-namespaces/internal/logutils" + "sigs.k8s.io/hierarchical-namespaces/internal/metadata" +) + +const ( + // The name of the ResourceQuota object created by the + // ResourceQuotaReconciler in a namespace. + ResourceQuotaSingleton = "hrq." + api.MetaGroup +) + +// HRQEnqueuer enqueues HierarchicalResourceQuota objects. +// HierarchicalResourceQuotaReconciler implements the interface so that it can +// be called to update HierarchicalResourceQuota objects. +type HRQEnqueuer interface { + // Enqueue enqueues HierarchicalResourceQuota objects of the given names in + // the give namespaces. + Enqueue(log logr.Logger, reason, nsnm, qnm string) +} + +// ResourceQuotaReconciler reconciles singleton RQ per namespace, which represents the HRQ in this +// and any ancestor namespaces. The reconciler is called on two occasions: +// 1. The HRQ in this or an ancestor namespace has changed. This can either be because an HRQ has +// been modified (in which case, the HRQR will call EnqueueSubtree) or because the ancestors of +// this namespace have changed (in which case, the NSR will call OnChangeNamespace). Either way, +// this will typically result in the limits being updated. +// 2. The K8s apiserver has modified the usage of this RQ, typically in response to a resource being +// _released_ but it's also possible to observe increasing usage here as well (see go/hnc-hrq for +// details). In such cases, we basically just need to enqueue the HRQs in all ancestor namespaces +// so that they can update their usages as well. +type ResourceQuotaReconciler struct { + client.Client + eventRecorder record.EventRecorder + Log logr.Logger + + // Forest is the in-memory data structure that is shared with all other reconcilers. + Forest *forest.Forest + + // trigger is a channel of event.GenericEvent (see "Watching Channels" in + // https://book-v1.book.kubebuilder.io/beyond_basics/controller_watches.html) + // that is used to enqueue the singleton to trigger reconciliation. + trigger chan event.GenericEvent + HRQR HRQEnqueuer +} + +// +kubebuilder:rbac:groups="",resources=resourcequotas,verbs=get;list;watch;create;update;patch;delete + +func (r *ResourceQuotaReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + // The reconciler only reconciles ResourceQuota objects created by itself. + if req.NamespacedName.Name != ResourceQuotaSingleton { + return ctrl.Result{}, nil + } + + ns := req.NamespacedName.Namespace + log := logutils.WithRID(r.Log).WithValues("trigger", req.NamespacedName) + + inst, err := r.getSingleton(ctx, ns) + if err != nil { + log.Error(err, "Couldn't read singleton") + return ctrl.Result{}, err + } + + // Update our limits, and enqueue any related HRQs if our usage has changed. + updated := r.syncWithForest(log, inst) + + // Delete the obsolete singleton and early exit if the new limits are empty. + if inst.Spec.Hard == nil { + return ctrl.Result{}, r.deleteSingleton(ctx, log, inst) + } + + // We only need to write back to the apiserver if the spec has changed + if updated { + if err := r.writeSingleton(ctx, log, inst); err != nil { + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil +} + +// getSingleton returns the singleton if it exists, or creates a default one if it doesn't. +func (r *ResourceQuotaReconciler) getSingleton(ctx context.Context, + ns string) (*v1.ResourceQuota, error) { + nnm := types.NamespacedName{Namespace: ns, Name: ResourceQuotaSingleton} + inst := &v1.ResourceQuota{} + if err := r.Get(ctx, nnm, inst); err != nil { + if !errors.IsNotFound(err) { + return nil, err + } + // It doesn't exist - initialize it to a reasonable initial value. + inst.ObjectMeta.Name = ResourceQuotaSingleton + inst.ObjectMeta.Namespace = ns + } + + return inst, nil +} + +// writeSingleton creates a singleton on the apiserver if it does not exist. +// Otherwise, it updates existing singleton on the apiserver. +func (r *ResourceQuotaReconciler) writeSingleton(ctx context.Context, + log logr.Logger, inst *v1.ResourceQuota) error { + if inst.CreationTimestamp.IsZero() { + // Set the cleanup label so the singleton can be deleted by selector later. + metadata.SetLabel(inst, api.HRQLabelCleanup, "true") + // Add a non-propagate exception annotation to the instance so that it won't + // be overwritten by ancestors, when the resource quota type is configured + // as Propagate mode in HNCConfig. + metadata.SetAnnotation(inst, api.NonPropagateAnnotation, "true") + log.V(1).Info("Creating a singleton on apiserver", "limits", inst.Spec.Hard) + if err := r.Create(ctx, inst); err != nil { + r.reportHRQEvent(ctx, log, inst, err) + return fmt.Errorf("while creating rq: %w", err) + } + return nil + } + + log.V(1).Info("Updating the singleton on apiserver", "oldLimits", inst.Status.Hard, "newLimits", inst.Spec.Hard) + if err := r.Update(ctx, inst); err != nil { + r.reportHRQEvent(ctx, log, inst, err) + return fmt.Errorf("while updating rq: %w", err) + } + return nil +} + +// reportHRQEvent reports events on all ancestor HRQs if the error is not nil. +func (r *ResourceQuotaReconciler) reportHRQEvent(ctx context.Context, log logr.Logger, inst *v1.ResourceQuota, err error) { + if err == nil { + return + } + + for _, nnm := range r.getAncestorHRQs(inst) { + hrq := &api.HierarchicalResourceQuota{} + // Since the Event() func requires the full object as an input, we will + // have to get the HRQ from the apiserver here. + if err := r.Get(ctx, nnm, hrq); err != nil { + // We just log the error and continue here because this function is only + // called when the reconciler already decides to return an error to retry. + log.Error(err, "While trying to generate an event on the instance") + continue + } + msg := fmt.Sprintf("could not create/update lower-level ResourceQuota in namespace %q: %s", inst.Namespace, ignoreRQErr(err.Error())) + r.eventRecorder.Event(hrq, "Warning", api.EventCannotWriteResourceQuota, msg) + } +} + +// There may be race condition here since we previously release the hold that the +// forest may have changed. However, it should be fine since the caller uses the +// result to generate events on and will return an error for this reconciler to +// retry. We will eventually get the right ancestor HRQs. +func (r *ResourceQuotaReconciler) getAncestorHRQs(inst *v1.ResourceQuota) []types.NamespacedName { + r.Forest.Lock() + defer r.Forest.Unlock() + + names := []types.NamespacedName{} + for _, nsnm := range r.Forest.Get(inst.Namespace).AncestryNames() { + for _, hrqnm := range r.Forest.Get(nsnm).HRQNames() { + names = append(names, types.NamespacedName{Namespace: nsnm, Name: hrqnm}) + } + } + + return names +} + +// deleteSingleton deletes a singleton on the apiserver if it exists. Otherwise, +// do nothing. +func (r *ResourceQuotaReconciler) deleteSingleton(ctx context.Context, log logr.Logger, inst *v1.ResourceQuota) error { + // Early exit if the singleton doesn't already exist. + if inst.CreationTimestamp.IsZero() { + return nil + } + + log.V(1).Info("Deleting obsolete empty singleton on apiserver") + if err := r.Delete(ctx, inst); err != nil { + return fmt.Errorf("while deleting rq: %w", err) + } + return nil +} + +// syncWithForest syncs limits and resource usages of in-memory `hrq` objects of +// current namespace and its ancestors with the ResourceQuota object of the +// namespace. Specifically, it performs following tasks: +// - Syncs `ResourceQuota.Spec.Hard` with `hrq.hard` of the current namespace +// and its ancestors. +// - Updates `hrq.used.local` of the current namespace and `hrq.used.subtree` +// of the current namespace and its ancestors based on `ResourceQuota.Status.Used`. +func (r *ResourceQuotaReconciler) syncWithForest(log logr.Logger, + inst *v1.ResourceQuota) bool { + r.Forest.Lock() + defer r.Forest.Unlock() + ns := r.Forest.Get(inst.ObjectMeta.Namespace) + + // Determine if the RQ's spec needs to be updated + updated := r.syncResourceLimits(ns, inst) + + // Since all resourcequota usage changes will be caught by this reconciler (no + // matter it's from K8s resourcequota admission controller or K8s resourcequota + // controller), we consolidate all the affected HRQ enqueuing here. + // + // Note that we say UseResources, and not TryUseResources, which means that even if we're over + // quota, the resource counts will be still be increased. This is because by the time the + // reconciler's running, the resources truly have been consumed so we just need to (accurately) + // reflect that we're over quota. + log.V(1).Info("RQ usages may have updated", "oldUsages", ns.GetLocalUsages(), "newUsages", inst.Status.Used) + ns.UseResources(inst.Status.Used) + for _, nsnm := range ns.AncestryNames() { + for _, qnm := range r.Forest.Get(nsnm).HRQNames() { + r.HRQR.Enqueue(log, "subtree resource usages may have changed", nsnm, qnm) + } + } + + return updated +} + +// syncResourceLimits updates `ResourceQuota.Spec.Hard` to be the union types from of `hrq.hard` of +// the namespace and its ancestors. If there are more than one limits for a resource type in the +// union of `hrq.hard`, only the most strictest limit will be set to `ResourceQuota.Spec.Hard`. +// +// Returns true if any changes were made, false otherwise. +func (r *ResourceQuotaReconciler) syncResourceLimits(ns *forest.Namespace, inst *v1.ResourceQuota) bool { + // Get the list of all resources that need to be restricted in this namespace, as well as the + // maximum possible limit for each resource. + l := ns.Limits() + + // Check to see if there's been any change and update if so. + if utils.Equals(l, inst.Spec.Hard) { + return false + } + inst.Spec.Hard = l + return true +} + +// OnChangeNamespace enqueues the singleton in a specific namespace to trigger the reconciliation of +// the singleton for a given reason . This occurs in a goroutine so the caller doesn't block; since +// the reconciler is never garbage-collected, this is safe. +func (r *ResourceQuotaReconciler) OnChangeNamespace(log logr.Logger, ns *forest.Namespace) { + nsnm := ns.Name() + go func() { + // The watch handler doesn't care about anything except the metadata. + inst := &v1.ResourceQuota{} + inst.ObjectMeta.Name = ResourceQuotaSingleton + inst.ObjectMeta.Namespace = nsnm + r.trigger <- event.GenericEvent{Object: inst} + }() +} + +// EnqueueSubtree enqueues ResourceQuota objects of the given namespace and its descendants. +// +// The method is robust against race conditions. The method holds the forest lock so that the +// in-memory forest (specifically the descendants of the namespace that we record in-memory) cannot +// be changed while enqueueing ResourceQuota objects in the namespace and its descendants. +// +// If a new namespace becomes a descendant just after we acquire the lock, the ResourceQuota object +// in the new namespace will be enqueued by the NamespaceReconciler, instead of the +// ResourceQuotaReconciler. By contrast, if a namespace is *removed* as a descendant, we'll still +// call the reconciler but it will have no effect (reconcilers can safely be called multiple times, +// even if the object has been deleted). +func (r *ResourceQuotaReconciler) EnqueueSubtree(log logr.Logger, nsnm string) { + r.Forest.Lock() + defer r.Forest.Unlock() + + nsnms := r.Forest.Get(nsnm).DescendantNames() + nsnms = append(nsnms, nsnm) + for _, nsnm := range nsnms { + r.OnChangeNamespace(log, r.Forest.Get(nsnm)) + } +} + +func (r *ResourceQuotaReconciler) SetupWithManager(mgr ctrl.Manager) error { + // This field will be shown as source.component=hnc.x-k8s.io in events. + r.eventRecorder = mgr.GetEventRecorderFor(api.MetaGroup) + r.trigger = make(chan event.GenericEvent) + + return ctrl.NewControllerManagedBy(mgr). + For(&v1.ResourceQuota{}). + Watches(&source.Channel{Source: r.trigger}, &handler.EnqueueRequestForObject{}). + Complete(r) +} diff --git a/internal/hrq/rqreconciler_test.go b/internal/hrq/rqreconciler_test.go new file mode 100644 index 000000000..e4204f648 --- /dev/null +++ b/internal/hrq/rqreconciler_test.go @@ -0,0 +1,333 @@ +package hrq_test + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + api "sigs.k8s.io/hierarchical-namespaces/api/v1alpha2" + "sigs.k8s.io/hierarchical-namespaces/internal/hrq" + "sigs.k8s.io/hierarchical-namespaces/internal/hrq/utils" + . "sigs.k8s.io/hierarchical-namespaces/internal/integtest" + "sigs.k8s.io/hierarchical-namespaces/internal/metadata" +) + +// createdHRQs keeps track of HierarchicalResourceQuota objects created in a +// test case so that we can clean them up properly after the test case. +var createdHRQs = []*api.HierarchicalResourceQuota{} + +var _ = Describe("RQ reconciler tests", func() { + ctx := context.Background() + + var ( + fooName string + barName string + bazName string + ) + + BeforeEach(func() { + fooName = CreateNS(ctx, "foo") + barName = CreateNS(ctx, "bar") + bazName = CreateNS(ctx, "baz") + + barHier := NewHierarchy(barName) + barHier.Spec.Parent = fooName + UpdateHierarchy(ctx, barHier) + + bazHier := NewHierarchy(bazName) + bazHier.Spec.Parent = fooName + UpdateHierarchy(ctx, bazHier) + + Eventually(HasChild(ctx, fooName, barName)).Should(Equal(true)) + Eventually(HasChild(ctx, fooName, bazName)).Should(Equal(true)) + }) + + AfterEach(func() { + cleanupHRQObjects(ctx) + }) + + It("should set hard limits for each namespace", func() { + setHRQ(ctx, fooHRQName, fooName, "secrets", "6", "pods", "3") + setHRQ(ctx, barHRQName, barName, "secrets", "100", "cpu", "50") + setHRQ(ctx, bazHRQName, bazName, "pods", "1") + + Eventually(getRQSpec(ctx, fooName)).Should(equalRL("secrets", "6", "pods", "3")) + // Even though bar allows 100 secrets, the secret limit in the ResourceQuota object in bar + // should be 6, which is the strictest limit among bar and its ancestor (i.e., foo). + Eventually(getRQSpec(ctx, barName)).Should(equalRL("secrets", "6", "pods", "3", "cpu", "50")) + Eventually(getRQSpec(ctx, bazName)).Should(equalRL("secrets", "6", "pods", "1")) + }) + + It("should update in-memory resource usages after ResourceQuota status changes", func() { + setHRQ(ctx, fooHRQName, fooName, "secrets", "6", "pods", "3") + setHRQ(ctx, barHRQName, barName, "secrets", "100", "cpu", "50") + setHRQ(ctx, bazHRQName, bazName, "pods", "1") + // Simulate the K8s ResourceQuota controller to update usages. bar and baz + // are foo's children, so they have "secrets" and "pods" in their RQ singleton. + updateRQUsage(ctx, fooName, "secrets", "0", "pods", "0") + updateRQUsage(ctx, barName, "secrets", "0", "cpu", "0", "pods", "0") + updateRQUsage(ctx, bazName, "secrets", "0", "pods", "0") + + // Increase secret counts from 0 to 10 in baz. Note that the number of secrets is theoretically + // limited to 6 (since baz is a child of foo), but the webhooks aren't actually running in this + // test so we can exceed those limits. + updateRQUsage(ctx, bazName, "secrets", "10") + + // Check in-memory resource usages for baz (should have increased). + Eventually(forestGetLocalUsages(bazName)).Should(equalRL("secrets", "10", "pods", "0")) + Eventually(forestGetSubtreeUsages(bazName)).Should(equalRL("secrets", "10", "pods", "0")) + + // Check in-memory resource usages for bar (unaffected by what's going on in baz). + Eventually(forestGetLocalUsages(barName)).Should(equalRL("secrets", "0", "cpu", "0", "pods", "0")) + Eventually(forestGetSubtreeUsages(barName)).Should(equalRL("secrets", "0", "cpu", "0", "pods", "0")) + + // Check in-memory resource usages for foo (local usage is unaffected; + // subtree usage has gone up since baz is a child). + Eventually(forestGetLocalUsages(fooName)).Should(equalRL("secrets", "0", "pods", "0")) + Eventually(forestGetSubtreeUsages(fooName)).Should(equalRL("secrets", "10", "pods", "0")) + + // Decrease secret counts from 10 to 0 in baz. + updateRQUsage(ctx, bazName, "secrets", "0") + + // Check in-memory resource usages for baz. + Eventually(forestGetLocalUsages(bazName)).Should(equalRL("secrets", "0", "pods", "0")) + Eventually(forestGetSubtreeUsages(bazName)).Should(equalRL("secrets", "0", "pods", "0")) + + // Check in-memory resource usages for bar. + Eventually(forestGetLocalUsages(barName)).Should(equalRL("secrets", "0", "cpu", "0", "pods", "0")) + Eventually(forestGetSubtreeUsages(barName)).Should(equalRL("secrets", "0", "cpu", "0", "pods", "0")) + + // Check in-memory resource usages for foo. + Eventually(forestGetLocalUsages(fooName)).Should(equalRL("secrets", "0", "pods", "0")) + Eventually(forestGetSubtreeUsages(fooName)).Should(equalRL("secrets", "0", "pods", "0")) + + // Additionally testing not limited resource consumption: increase cpu usage + // in baz, which is not limited in foo or baz itself. Make sure the in-memory + // subtree usage for foo and the local and subtree usage for baz are zero. + updateRQUsage(ctx, bazName, "cpu", "1") + Eventually(forestGetLocalUsages(fooName)).Should(equalRL("secrets", "0", "pods", "0")) + Eventually(forestGetSubtreeUsages(fooName)).Should(equalRL("secrets", "0", "pods", "0")) + Eventually(forestGetLocalUsages(bazName)).Should(equalRL("secrets", "0", "pods", "0")) + Eventually(forestGetSubtreeUsages(bazName)).Should(equalRL("secrets", "0", "pods", "0")) + }) + + // This is simulating the RQ status change happened in the RQStatusValidator, + // where the forest is updated while the HRQs are not yet enqueued. + It("should enqueue HRQs even if the forest usages (updated by RQStatusValidator) is the same as the RQ usages", func() { + setHRQ(ctx, fooHRQName, fooName, "secrets", "6", "pods", "3") + setHRQ(ctx, barHRQName, barName, "secrets", "100", "cpu", "50") + setHRQ(ctx, bazHRQName, bazName, "pods", "1") + // Simulate the K8s ResourceQuota controller to update usages. bar and baz + // are foo's children, so they have "secrets" and "pods" in their RQ singleton. + updateRQUsage(ctx, fooName, "secrets", "0", "pods", "0") + updateRQUsage(ctx, barName, "secrets", "0", "cpu", "0", "pods", "0") + updateRQUsage(ctx, bazName, "secrets", "0", "pods", "0") + // Make sure the above RQ reconciliations are finished before proceeding. + Eventually(getHRQUsed(ctx, fooName, fooHRQName)).Should(equalRL("secrets", "0", "pods", "0")) + Eventually(getHRQUsed(ctx, barName, barHRQName)).Should(equalRL("secrets", "0", "cpu", "0")) + Eventually(getHRQUsed(ctx, bazName, bazHRQName)).Should(equalRL("pods", "0")) + + // Simulate RQStatusValidator - update usages in the forest and the RQ + // object. Since updating RQ usages triggers the RQ reconciliation that + // enqueues HRQs to update usages, let's check the foo HRQ before the RQ + // update to make sure it's not updated before the RQ reconciliation. + forestUseResource(bazName, "secrets", "10", "pods", "0") + Eventually(forestGetSubtreeUsages(fooName)).Should(equalRL("secrets", "10", "pods", "0")) + // Use Consistently here because the HRQs should not be enqueued to update + // usages yet. + Consistently(getHRQUsed(ctx, fooName, fooHRQName)).Should(equalRL("secrets", "0", "pods", "0")) + // Simulate the K8s ResourceQuota admission controller to update usages + // after validating the request. + updateRQUsage(ctx, bazName, "secrets", "10") + + // Verify the HRQ is enqueued to update the usage. + Eventually(getHRQUsed(ctx, fooName, fooHRQName)).Should(equalRL("secrets", "10", "pods", "0")) + }) + + It("should set cleanup label on the singletons", func() { + setHRQ(ctx, fooHRQName, fooName, "secrets", "6", "pods", "3") + + Eventually(rqSingletonHasCleanupLabel(ctx, fooName)).Should(Equal(true)) + Eventually(rqSingletonHasCleanupLabel(ctx, barName)).Should(Equal(true)) + Eventually(rqSingletonHasCleanupLabel(ctx, bazName)).Should(Equal(true)) + }) + + It("should add non-propagate exception annotation to all the RQ singletons", func() { + // Since foo is the ancestor of both bar and baz, we just need to create one + // hrq in the foo namespace for this test. + setHRQ(ctx, fooHRQName, fooName, "secrets", "6", "pods", "3") + + // All RQ singletons in foo, bar, baz should have the non-propagate annotation. + Eventually(rqSingletonHasNonPropagateAnnotation(ctx, fooName)).Should(Equal(true)) + Eventually(rqSingletonHasNonPropagateAnnotation(ctx, barName)).Should(Equal(true)) + Eventually(rqSingletonHasNonPropagateAnnotation(ctx, bazName)).Should(Equal(true)) + }) +}) + +// rlMatcher implements GomegaMatcher: https://onsi.github.io/gomega/#adding-your-own-matchers +type rlMatcher struct { + want v1.ResourceList + got v1.ResourceList +} + +// equalRL is a replacement for the Equal() Gomega matcher but automatically constructs a resource +// list (RL) from its arguments, which are interpretted by argsToResourceList. +func equalRL(args ...string) *rlMatcher { + return &rlMatcher{want: argsToResourceList(1, args...)} +} + +func (r *rlMatcher) Match(actual interface{}) (bool, error) { + got, ok := actual.(v1.ResourceList) + if !ok { + return false, fmt.Errorf("Not a resource list: %+v", actual) + } + r.got = got + return utils.Equals(got, r.want), nil +} + +func (r *rlMatcher) FailureMessage(_ interface{}) string { + return fmt.Sprintf("Got: %+v\nWant: %+v", r.got, r.want) +} + +func (r *rlMatcher) NegatedFailureMessage(_ interface{}) string { + return fmt.Sprintf("Did not want: %+v", r.want) +} + +func forestGetLocalUsages(ns string) func() v1.ResourceList { + TestForest.Lock() + defer TestForest.Unlock() + return func() v1.ResourceList { + return TestForest.Get(ns).GetLocalUsages() + } +} + +func forestGetSubtreeUsages(ns string) func() v1.ResourceList { + TestForest.Lock() + defer TestForest.Unlock() + return func() v1.ResourceList { + return TestForest.Get(ns).GetSubtreeUsages() + } +} + +func forestUseResource(ns string, args ...string) { + TestForest.Lock() + defer TestForest.Unlock() + TestForest.Get(ns).UseResources(argsToResourceList(0, args...)) +} + +// updateRQUsages updates the resource usages on the per-namespace RQ in this +// test environment. It can simulate the usage changes coming from the K8s +// ResourceQuota controller or K8s ResourceQuota admission controller. If a +// resource is limited in the RQ but not mentioned when calling this function, +// it is left unchanged (i.e. this is a merge operation at the overall status +// level, not a replacement of the entire status). +func updateRQUsage(ctx context.Context, ns string, args ...string) { + nsn := types.NamespacedName{Namespace: ns, Name: hrq.ResourceQuotaSingleton} + EventuallyWithOffset(1, func() error { + rq := &v1.ResourceQuota{} + if err := K8sClient.Get(ctx, nsn, rq); err != nil { + return err + } + if rq.Status.Used == nil { + rq.Status.Used = v1.ResourceList{} + } + for k, v := range argsToResourceList(1, args...) { + rq.Status.Used[k] = v + } + return K8sClient.Status().Update(ctx, rq) + }).Should(Succeed(), "Updating usage in %s to %v", ns, args) +} + +// setHRQ creates or replaces an existing HRQ with the given resource limits. Any existing spec is +// replaced by this function. +func setHRQ(ctx context.Context, nm, ns string, args ...string) { + nsn := types.NamespacedName{Namespace: ns, Name: nm} + hrq := &api.HierarchicalResourceQuota{ + ObjectMeta: metav1.ObjectMeta{ + Name: nm, + Namespace: ns, + }, + } + EventuallyWithOffset(1, func() error { + err := K8sClient.Get(ctx, nsn, hrq) + if errors.IsNotFound(err) { + return nil + } + return err + }).Should(Succeed(), "While checking if HRQ %s/%s already exists", ns, nm) + + hrq.Spec.Hard = argsToResourceList(1, args...) + + EventuallyWithOffset(1, func() error { + if hrq.CreationTimestamp.IsZero() { + err := K8sClient.Create(ctx, hrq) + if err == nil { + createdHRQs = append(createdHRQs, hrq) + } + return err + } else { + return K8sClient.Update(ctx, hrq) + } + }).Should(Succeed(), "While updating HRQ; %+v", hrq) +} + +// argsToResourceList provides a convenient way to specify a resource list in these tests by +// interpretting even-numbered args as resource names (e.g. "secrets") and odd-valued args as +// quantities (e.g. "5", "1Gb", etc). +func argsToResourceList(offset int, args ...string) v1.ResourceList { + list := map[v1.ResourceName]resource.Quantity{} + ExpectWithOffset(offset+1, len(args)%2).Should(Equal(0), "Need even number of arguments, not %d", len(args)) + for i := 0; i < len(args); i += 2 { + list[v1.ResourceName(args[i])] = resource.MustParse(args[i+1]) + } + return list +} + +func cleanupHRQObjects(ctx context.Context) { + for _, obj := range createdHRQs { + GinkgoT().Logf("Deleting HRQ object: %s/%s", obj.Namespace, obj.Name) + EventuallyWithOffset(1, func() error { + err := K8sClient.Delete(ctx, obj) + if errors.IsNotFound(err) { + return nil + } + return err + }).Should(Succeed(), "While deleting object %s/%s", obj.GetNamespace(), obj.GetName()) + } + createdHRQs = []*api.HierarchicalResourceQuota{} +} + +func rqSingletonHasCleanupLabel(ctx context.Context, ns string) func() bool { + return func() bool { + nsn := types.NamespacedName{Namespace: ns, Name: hrq.ResourceQuotaSingleton} + inst := &v1.ResourceQuota{} + if err := K8sClient.Get(ctx, nsn, inst); err != nil { + return false + } + if set, ok := metadata.GetLabel(inst, api.HRQLabelCleanup); ok { + return set == "true" + } + return false + } +} + +func rqSingletonHasNonPropagateAnnotation(ctx context.Context, ns string) func() bool { + return func() bool { + nsn := types.NamespacedName{Namespace: ns, Name: hrq.ResourceQuotaSingleton} + inst := &v1.ResourceQuota{} + if err := K8sClient.Get(ctx, nsn, inst); err != nil { + return false + } + if set, ok := metadata.GetAnnotation(inst, api.NonPropagateAnnotation); ok { + return set == "true" + } + return false + } +} diff --git a/internal/hrq/rqstatusvalidator.go b/internal/hrq/rqstatusvalidator.go new file mode 100644 index 000000000..28d753e74 --- /dev/null +++ b/internal/hrq/rqstatusvalidator.go @@ -0,0 +1,185 @@ +package hrq + +import ( + "context" + + "github.com/go-logr/logr" + k8sadm "k8s.io/api/admission/v1" + authnv1 "k8s.io/api/authentication/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "sigs.k8s.io/hierarchical-namespaces/internal/forest" + "sigs.k8s.io/hierarchical-namespaces/internal/logutils" +) + +const ( + // ResourceQuotasStatusServingPath is where the validator will run. Must be kept in sync with the + // kubebuilder markers below. + ResourceQuotasStatusServingPath = "/validate-hnc-x-k8s-io-v1alpha2-resourcequotasstatus" + ResourceQuotaAdmissionControllerUsername = "system:apiserver" +) + +// TODO: Change to fail closed after we add specific labels on the excluded namespaces so there +// won't be a deadlock of our pod not responding while it is also not allowed to be created in the +// "hnc-system" namespace. Note: the validating webhook FAILS OPEN. This means that if the webhook +// goes down, all further changes are allowed. +// +// +kubebuilder:webhook:admissionReviewVersions=v1;v1beta1,path=/validate-hnc-x-k8s-io-v1alpha2-resourcequotasstatus,mutating=false,failurePolicy=ignore,groups="",resources=resourcequotas/status,sideEffects=None,verbs=update,versions=v1,name=resourcesquotasstatus.hnc.x-k8s.io + +type ResourceQuotaStatus struct { + Log logr.Logger + Forest *forest.Forest + + client client.Client + decoder *admission.Decoder +} + +func (r *ResourceQuotaStatus) Handle(ctx context.Context, req admission.Request) admission.Response { + log := logutils.WithRID(r.Log).WithValues("nm", req.Name, "nnm", req.Namespace) + + // We only intercept and validate the HRQ-singleton RQ status changes from the + // K8s Resource Quota Admission Controller. Once the admission controller + // allows a request, it will issue a request to update the RQ status. We can + // control whether a resource should (or should not) be consumed by allowing + // (or denying) status update requests from the admission controller. + // + // Thus we will allow any non-HRQ-singleton RQ updates first. + if req.Name != ResourceQuotaSingleton { + return allow("non-HRQ-singleton RQ status change ignored") + } + + // Then for HRQ-singleton RQ status updates, we will allow any request + // attempted by other than K8s Resource Quota Admission Controller. + if !isResourceQuotaAdmissionControllerServiceAccount(&req.UserInfo) { + log.V(1).Info("Request is not from Kubernetes Resource Quota Admission Controller; Allowed") + return allow("") + } + + inst := &v1.ResourceQuota{} + if err := r.decoder.Decode(req, inst); err != nil { + log.Error(err, "Couldn't decode request") + return deny(metav1.StatusReasonBadRequest, err.Error()) + } + + resp := r.handle(inst) + if resp.Allowed { + log.V(1).Info("Allowed", "message", resp.Result.Message, "usages", inst.Status.Used) + } else { + log.Info("Denied", "code", resp.Result.Code, "reason", resp.Result.Reason, "message", resp.Result.Message, "usages", inst.Status.Used) + } + return resp +} + +func (r *ResourceQuotaStatus) handle(inst *v1.ResourceQuota) admission.Response { + r.Forest.Lock() + defer r.Forest.Unlock() + + ns := r.Forest.Get(inst.Namespace) + if err := ns.TryUseResources(inst.Status.Used); err != nil { + return deny(metav1.StatusReasonForbidden, err.Error()) + } + + return allow("the usage update is in compliance with the ancestors' (including itself) HRQs") +} + +func (r *ResourceQuotaStatus) InjectClient(c client.Client) error { + r.client = c + return nil +} + +func (r *ResourceQuotaStatus) InjectDecoder(d *admission.Decoder) error { + r.decoder = d + return nil +} + +// allow is a replacement for controller-runtime's admission.Allowed() that allows you to set the +// message (human-readable) as opposed to the reason (machine-readable). +func allow(msg string) admission.Response { + return admission.Response{AdmissionResponse: k8sadm.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: 0, + Message: msg, + }, + }} +} + +func isResourceQuotaAdmissionControllerServiceAccount(user *authnv1.UserInfo) bool { + // Treat nil user same as admission controller SA so that unit tests do not need to + // specify admission controller SA. + return user == nil || user.Username == ResourceQuotaAdmissionControllerUsername +} + +// deny is a replacement for controller-runtime's admission.Denied() that allows you to set _both_ a +// human-readable message _and_ a machine-readable reason, and also sets the code correctly instead +// of hardcoding it to 403 Forbidden. +func deny(reason metav1.StatusReason, msg string) admission.Response { + return admission.Response{AdmissionResponse: k8sadm.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Code: codeFromReason(reason), + Message: msg, + Reason: reason, + }, + }} +} + +// denyInvalidField sets "invalid" as the reason and allows you to set which +// field is invalid and a human-readable message. +// +// Note: this code piece is first copied from OSS HNC, so that the denial +// message can show up for invalid requests. Then a "field" field is added so +// that the denial message is shown properly with the culprit field instead of +// double colons (see https://github.com/kubernetes-sigs/multi-tenancy/issues/1218). +func denyInvalidField(field, msg string) admission.Response { + // We need to set the custom message in both Details and Message fields. + // + // When manipulating the HRQ object via kubectl directly, kubectl + // ignores the Message field and displays the Details field if an error is + // StatusReasonInvalid (see implementation here: https://github.com/kubernetes/kubectl/blob/master/pkg/cmd/util/helpers.go#L145-L160). + // + // When manipulating the HRQ object via the hns kubectl plugin, + // if an error is StatusReasonInvalid, only the Message field will be displayed. This is because + // the Error method (https://github.com/kubernetes/client-go/blob/cb664d40f84c27bee45c193e4acb0fcd549b0305/rest/request.go#L1273) + // calls FromObject (https://github.com/kubernetes/apimachinery/blob/7e441e0f246a2db6cf1855e4110892d1623a80cf/pkg/api/errors/errors.go#L100), + // which generates a StatusError (https://github.com/kubernetes/apimachinery/blob/7e441e0f246a2db6cf1855e4110892d1623a80cf/pkg/api/errors/errors.go#L35) object. + // *StatusError implements the Error interface using only the Message + // field (https://github.com/kubernetes/apimachinery/blob/7e441e0f246a2db6cf1855e4110892d1623a80cf/pkg/api/errors/errors.go#L49)). + // Therefore, when displaying the error, only the Message field will be available. + resp := deny(metav1.StatusReasonInvalid, msg) + resp.Result.Details = &metav1.StatusDetails{ + Causes: []metav1.StatusCause{ + { + Message: msg, + Field: field, + }, + }, + } + return resp +} + +// codeFromReason implements the needed subset of +// https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#StatusReason +func codeFromReason(reason metav1.StatusReason) int32 { + switch reason { + case metav1.StatusReasonUnknown: + return 500 + case metav1.StatusReasonUnauthorized: + return 401 + case metav1.StatusReasonForbidden: + return 403 + case metav1.StatusReasonConflict: + return 409 + case metav1.StatusReasonBadRequest: + return 400 + case metav1.StatusReasonInvalid: + return 422 + case metav1.StatusReasonInternalError: + return 500 + default: + return 500 + } +} diff --git a/internal/hrq/rqstatusvalidator_test.go b/internal/hrq/rqstatusvalidator_test.go new file mode 100644 index 000000000..e4e8ae5bf --- /dev/null +++ b/internal/hrq/rqstatusvalidator_test.go @@ -0,0 +1,71 @@ +package hrq + +import ( + "testing" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "sigs.k8s.io/hierarchical-namespaces/internal/forest" + "sigs.k8s.io/hierarchical-namespaces/internal/hrq/utils" +) + +func TestRQStatusChange(t *testing.T) { + tests := []struct { + name string + namespace string + consume []string + fail bool + }{ + {name: "allow 1 more configmap in parent", namespace: "a", consume: []string{"configmaps", "1"}}, + {name: "allow 1 configmap in child", namespace: "b", consume: []string{"configmaps", "1"}}, + {name: "deny 2 more configmaps in parent (violating a-hrq)", namespace: "a", consume: []string{"configmaps", "2"}, fail: true}, + {name: "deny 2 configmaps in child (violating a-hrq)", namespace: "b", consume: []string{"configmaps", "2"}, fail: true}, + {name: "allow 1 more secret in child", namespace: "b", consume: []string{"secrets", "1"}}, + {name: "allow 1 secret in parent", namespace: "a", consume: []string{"secrets", "1"}}, + {name: "deny 2 more secrets in child (violating a-hrq)", namespace: "b", consume: []string{"secrets", "2"}, fail: true}, + {name: "deny 2 secrets in parent (violating a-hrq)", namespace: "a", consume: []string{"secrets", "2"}, fail: true}, + {name: "allow any other resources", namespace: "a", consume: []string{"pods", "100"}}, + {name: "allow 1 more configmap in parent and other resources", namespace: "a", consume: []string{"configmaps", "1", "pods", "100"}}, + {name: "deny 2 more configmaps in parent (violating a-hrq) together with other resources", namespace: "a", consume: []string{"configmaps", "2", "pods", "100"}, fail: true}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + // Create a forest with namespace 'a' as the root and 'b' as a child. + f := forest.NewForest() + nsA := f.Get("a") + nsB := f.Get("b") + nsB.SetParent(nsA) + // Add an HRQ with limits of 2 secrets and 2 configmaps in 'a'. + nsA.UpdateLimits("a-hrq", argsToResourceList("configmaps", "2", "secrets", "2")) + // Add a looser HRQ with limits of 10 secrets and 10 configmaps in 'b'. + nsB.UpdateLimits("b-hrq", argsToResourceList("configmaps", "10", "secrets", "10")) + // Consume 1 configmap in 'a' and 1 secret in 'b'. + nsA.UseResources(argsToResourceList("configmaps", "1")) + nsB.UseResources(argsToResourceList("secrets", "1")) + rqs := &ResourceQuotaStatus{Forest: f} + + // Construct the requested instance from the delta usages specified in the + // test case and what's in forest. + rqInst := &v1.ResourceQuota{} + rqInst.Namespace = tc.namespace + delta := argsToResourceList(tc.consume...) + rqInst.Status.Used = utils.Add(delta, f.Get(tc.namespace).GetLocalUsages()) + + got := rqs.handle(rqInst) + if got.AdmissionResponse.Allowed == tc.fail { + t.Errorf("unexpected admission response") + } + }) + } +} + +// argsToResourceList provides a convenient way to specify a resource list by +// interpreting even-numbered args as resource names (e.g. "secrets") and +// odd-valued args as quantities (e.g. "5", "1Gb", etc). +func argsToResourceList(args ...string) v1.ResourceList { + list := map[v1.ResourceName]resource.Quantity{} + for i := 0; i < len(args); i += 2 { + list[v1.ResourceName(args[i])] = resource.MustParse(args[i+1]) + } + return list +} diff --git a/internal/hrq/utils/resources.go b/internal/hrq/utils/resources.go new file mode 100644 index 000000000..6537c6115 --- /dev/null +++ b/internal/hrq/utils/resources.go @@ -0,0 +1,182 @@ +// This file is a modified copy of k8s.io/kubernetes/pkg/quota/v1/resources.go +package utils + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Resources maps a ResourceName to a string representation of a quantity. +type Resources map[v1.ResourceName]string + +// Equals returns true if the two lists are equivalent +func Equals(a v1.ResourceList, b v1.ResourceList) bool { + if len(a) != len(b) { + return false + } + + for key, value1 := range a { + value2, found := b[key] + if !found { + return false + } + if value1.Cmp(value2) != 0 { + return false + } + } + + return true +} + +// LessThanOrEqual returns true if a < b for each key in b +// If false, it returns the keys in a that exceeded b +func LessThanOrEqual(a v1.ResourceList, b v1.ResourceList) (bool, []v1.ResourceName) { + result := true + resourceNames := []v1.ResourceName{} + for key, value := range b { + if other, found := a[key]; found { + if other.Cmp(value) > 0 { + result = false + resourceNames = append(resourceNames, key) + } + } + } + return result, resourceNames +} + +// Add returns the result of a + b for each named resource that exists in both a and b. +func AddIfExists(a v1.ResourceList, b v1.ResourceList) v1.ResourceList { + result := v1.ResourceList{} + for key, value := range a { + quantity := value.DeepCopy() + if other, found := b[key]; found { + quantity.Add(other) + result[key] = quantity + } + } + return result +} + +// Add returns the result of a + b for each named resource +func Add(a v1.ResourceList, b v1.ResourceList) v1.ResourceList { + result := v1.ResourceList{} + for key, value := range a { + quantity := value.DeepCopy() + if other, found := b[key]; found { + quantity.Add(other) + } + result[key] = quantity + } + for key, value := range b { + if _, found := result[key]; !found { + result[key] = value.DeepCopy() + } + } + return result +} + +// Subtract returns the result of a - b for each named resource +func Subtract(a v1.ResourceList, b v1.ResourceList) v1.ResourceList { + result := v1.ResourceList{} + for key, value := range a { + quantity := value.DeepCopy() + if other, found := b[key]; found { + quantity.Sub(other) + } + result[key] = quantity + } + for key, value := range b { + if _, found := result[key]; !found { + quantity := value.DeepCopy() + quantity.Neg() + result[key] = quantity + } + } + return result +} + +// OmitZeroQuantity returns a list omitting zero quantity resources. +func OmitZeroQuantity(a v1.ResourceList) v1.ResourceList { + for n, q := range a { + if q.IsZero() { + delete(a, n) + } + } + return a +} + +// Copy returns a deep copy of a ResourceList. +func Copy(rl v1.ResourceList) v1.ResourceList { + rs := make(v1.ResourceList) + for k, v := range rl { + rs[k] = v.DeepCopy() + } + return rs +} + +// Min returns the result of Min(a, b) (i.e., smallest resource quantity) for +// each named resource. If a resource only exists in one but not all ResourceLists +// inputs, it will be returned. +func Min(a v1.ResourceList, b v1.ResourceList) v1.ResourceList { + result := v1.ResourceList{} + for key, value := range a { + if other, found := b[key]; found { + if value.Cmp(other) > 0 { + result[key] = other.DeepCopy() + continue + } + } + result[key] = value.DeepCopy() + } + for key, value := range b { + if _, found := result[key]; !found { + result[key] = value.DeepCopy() + } + } + return result +} + +// Contains returns true if the specified item is in the list of items +func Contains(items []v1.ResourceName, item v1.ResourceName) bool { + for _, i := range items { + if i == item { + return true + } + } + return false +} + +// CleanupUnneeded cleans up the raw usages by omitting not limited usages. +func CleanupUnneeded(rawUsages v1.ResourceList, limits v1.ResourceList) v1.ResourceList { + return Mask(rawUsages, ResourceNames(limits)) +} + +// ResourceNames returns a list of all resource names in the ResourceList +func ResourceNames(resources v1.ResourceList) []v1.ResourceName { + result := []v1.ResourceName{} + for resourceName := range resources { + result = append(result, resourceName) + } + return result +} + +// Mask returns a new resource list that only has the values with the specified names +func Mask(resources v1.ResourceList, names []v1.ResourceName) v1.ResourceList { + nameSet := toSet(names) + result := v1.ResourceList{} + for key, value := range resources { + if nameSet.Has(string(key)) { + result[key] = value.DeepCopy() + } + } + return result +} + +// toSet takes a list of resource names and converts to a string set +func toSet(resourceNames []v1.ResourceName) sets.String { + result := sets.NewString() + for _, resourceName := range resourceNames { + result.Insert(string(resourceName)) + } + return result +} diff --git a/internal/hrq/utils/resources_test.go b/internal/hrq/utils/resources_test.go new file mode 100644 index 000000000..4e7a7c04d --- /dev/null +++ b/internal/hrq/utils/resources_test.go @@ -0,0 +1,577 @@ +// This file is a modified copy of k8s.io/kubernetes/pkg/quota/v1/resources_test.go +package utils + +import ( + "reflect" + "sort" + "testing" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/sets" +) + +func TestEquals(t *testing.T) { + type inputs struct { + a v1.ResourceList + b v1.ResourceList + } + tests := []struct { + name string + in inputs + want bool + }{ + { + name: "isEqual", + in: inputs{ + a: v1.ResourceList{}, + b: v1.ResourceList{}, + }, + want: true, + }, + { + name: "isEqualWithKeys", + in: inputs{ + a: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + b: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + want: true, + }, + { + name: "isNotEqualSameKeys", + in: inputs{ + a: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + b: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + want: false, + }, + { + name: "isNotEqualDiffKeys", + in: inputs{ + a: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + b: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourcePods: resource.MustParse("1"), + }, + }, + want: false, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + if got := Equals(tc.in.a, tc.in.b); got != tc.want { + t.Errorf("%s want: %v, got: %v, a=%v, b=%v", tc.name, + tc.want, got, tc.in.a, tc.in.b) + } + }) + } +} + +func TestAdd(t *testing.T) { + type inputs struct { + a v1.ResourceList + b v1.ResourceList + } + tests := []struct { + name string + in inputs + want v1.ResourceList + }{ + { + name: "empty", + in: inputs{ + a: v1.ResourceList{}, + b: v1.ResourceList{}, + }, + want: v1.ResourceList{}, + }, + { + name: "one input empty", + in: inputs{ + a: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + b: v1.ResourceList{}}, + want: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + }, + { + name: "both inputs non-empty", + in: inputs{ + a: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + b: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}}, + want: v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m")}, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := Add(tc.in.a, tc.in.b) + if result := Equals(tc.want, got); !result { + t.Errorf("%s want: %v, got: %v, a=%v, b=%v", tc.name, + tc.want, got, tc.in.a, tc.in.b) + } + }) + } +} + +func TestAddIfExists(t *testing.T) { + type inputs struct { + a v1.ResourceList + b v1.ResourceList + } + tests := []struct { + name string + in inputs + want v1.ResourceList + }{ + { + name: "noKeys", + in: inputs{ + a: v1.ResourceList{}, + b: v1.ResourceList{}, + }, + want: v1.ResourceList{}, + }, + { + name: "toEmpty", + in: inputs{ + a: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + b: v1.ResourceList{}, + }, + want: v1.ResourceList{}, + }, + { + name: "matching", + in: inputs{ + a: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + b: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + }, + want: v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m")}, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := AddIfExists(tc.in.a, tc.in.b) + if result := Equals(tc.want, got); !result { + t.Errorf("%s want: %v, got: %v", tc.name, tc.want, got) + } + }) + } +} + +func TestSubtract(t *testing.T) { + type inputs struct { + a v1.ResourceList + b v1.ResourceList + } + tests := []struct { + name string + in inputs + want v1.ResourceList + }{ + { + name: "noKeys", + in: inputs{ + a: v1.ResourceList{}, + b: v1.ResourceList{}, + }, + want: v1.ResourceList{}, + }, + { + name: "value-empty", + in: inputs{ + a: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + b: v1.ResourceList{}, + }, + want: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + }, + { + name: "empty-value", + in: inputs{ + a: v1.ResourceList{}, + b: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + }, + want: v1.ResourceList{v1.ResourceCPU: resource.MustParse("-100m")}, + }, + { + name: "value-value", + in: inputs{ + a: v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m")}, + b: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + }, + want: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := Subtract(tc.in.a, tc.in.b) + if result := Equals(tc.want, got); !result { + t.Errorf("%s want: %v, got: %v", tc.name, tc.want, got) + } + }) + } +} + +func TestCopy(t *testing.T) { + tests := []struct { + name string + in v1.ResourceList + want v1.ResourceList + }{ + { + name: "noKeys", + in: v1.ResourceList{}, + want: v1.ResourceList{}, + }, + { + name: "hasKeys", + in: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourcePods: resource.MustParse("1")}, + want: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourcePods: resource.MustParse("1")}, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := Copy(tc.in) + if result := Equals(tc.want, got); !result { + t.Errorf("%s want: %v, got: %v", tc.name, tc.want, got) + } + }) + } +} + +func TestMin(t *testing.T) { + type inputs struct { + a v1.ResourceList + b v1.ResourceList + } + tests := []struct { + name string + in inputs + want v1.ResourceList + }{ + { + name: "two empty lists", + in: inputs{ + a: v1.ResourceList{}, + b: v1.ResourceList{}, + }, + want: v1.ResourceList{}, + }, + { + name: "one empty list", + in: inputs{ + a: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceSecrets: resource.MustParse("10")}, + b: v1.ResourceList{}, + }, + want: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceSecrets: resource.MustParse("10")}, + }, + { + name: "two lists have no common resource types", + in: inputs{ + a: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceSecrets: resource.MustParse("10")}, + b: v1.ResourceList{v1.ResourcePods: resource.MustParse("6")}, + }, + want: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceSecrets: resource.MustParse("10"), + v1.ResourcePods: resource.MustParse("6")}, + }, + { + name: "two lists have common resource types", + in: inputs{ + a: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("25m"), + v1.ResourceSecrets: resource.MustParse("20")}, + b: v1.ResourceList{ + v1.ResourcePods: resource.MustParse("6"), + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceSecrets: resource.MustParse("10")}, + }, + want: v1.ResourceList{ + v1.ResourcePods: resource.MustParse("6"), + v1.ResourceCPU: resource.MustParse("25m"), + v1.ResourceSecrets: resource.MustParse("10")}, + }, + { + name: "some resource types have same quantities", + in: inputs{ + a: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("25m"), + v1.ResourceSecrets: resource.MustParse("20")}, + b: v1.ResourceList{ + v1.ResourcePods: resource.MustParse("6"), + v1.ResourceCPU: resource.MustParse("25m"), + v1.ResourceSecrets: resource.MustParse("10")}, + }, + want: v1.ResourceList{ + v1.ResourcePods: resource.MustParse("6"), + v1.ResourceCPU: resource.MustParse("25m"), + v1.ResourceSecrets: resource.MustParse("10")}, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := Min(tc.in.a, tc.in.b) + if result := Equals(tc.want, got); !result { + t.Errorf("%s want: %v, got: %v", tc.name, tc.want, got) + } + }) + } +} + +func TestResourceNames(t *testing.T) { + tests := []struct { + name string + in v1.ResourceList + want []v1.ResourceName + }{ + { + name: "empty", + in: v1.ResourceList{}, + want: []v1.ResourceName{}, + }, + { + name: "non-empty", + in: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + want: []v1.ResourceName{v1.ResourceMemory, v1.ResourceCPU}, + }, + } + for _, tc := range tests { + got := ResourceNames(tc.in) + sort.Slice(got, func(i, j int) bool { return got[i] < got[j] }) + sort.Slice(tc.want, func(i, j int) bool { return tc.want[i] < tc.want[j] }) + + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s expected: %v, actual: %v", tc.name, tc.want, got) + } + } +} + +func TestContains(t *testing.T) { + type inputs struct { + a []v1.ResourceName + b v1.ResourceName + } + tests := []struct { + name string + in inputs + want bool + }{ + { + name: "empty slice", + in: inputs{ + a: []v1.ResourceName{}, + b: v1.ResourceCPU, + }, + want: false, + }, + { + name: "does not contain", + in: inputs{ + a: []v1.ResourceName{v1.ResourceMemory}, + b: v1.ResourceCPU, + }, + want: false, + }, + { + name: "contains", + in: inputs{ + a: []v1.ResourceName{v1.ResourceMemory, v1.ResourceCPU}, + b: v1.ResourceCPU, + }, + want: true, + }, + } + for _, tc := range tests { + if got := Contains(tc.in.a, tc.in.b); got != tc.want { + t.Errorf("%s expected: %v, actual: %v", tc.name, tc.want, got) + } + } +} + +func TestMask(t *testing.T) { + type inputs struct { + a v1.ResourceList + b []v1.ResourceName + } + tests := []struct { + name string + in inputs + want v1.ResourceList + }{ + { + name: "empty list empty names", + }, + { + name: "empty list", + in: inputs{ + b: []v1.ResourceName{ + v1.ResourceSecrets, + }, + }, + }, + { + name: "empty name", + in: inputs{ + a: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + { + name: "list does not contain resources of given names", + in: inputs{ + a: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + b: []v1.ResourceName{ + v1.ResourceSecrets, + }, + }, + }, + { + name: "list contains resources of given names", + in: inputs{ + a: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + b: []v1.ResourceName{ + v1.ResourceCPU, + }, + }, + want: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m")}, + }, + } + for _, tc := range tests { + if got := Mask(tc.in.a, tc.in.b); !Equals(tc.want, got) { + t.Errorf("%s expected: %v, actual: %v", tc.name, tc.want, got) + } + } +} + +func TestToSet(t *testing.T) { + tests := []struct { + name string + in []v1.ResourceName + want sets.String + }{ + { + name: "empty resource names", + }, + { + name: "no duplicate resource name", + in: []v1.ResourceName{ + v1.ResourceCPU, + v1.ResourceMemory, + }, + want: sets.NewString( + v1.ResourceCPU.String(), + v1.ResourceMemory.String()), + }, + { + name: "has duplicate resource name", + in: []v1.ResourceName{ + v1.ResourceCPU, + v1.ResourceCPU, + v1.ResourceMemory, + }, + want: sets.NewString( + v1.ResourceCPU.String(), + v1.ResourceMemory.String()), + }, + } + for _, tc := range tests { + got := toSet(tc.in) + if !got.Equal(tc.want) { + t.Errorf("%s expected: %v, actual: %v", tc.name, tc.want, got) + } + } +} + +func TestCleanupUnneeded(t *testing.T) { + tests := []struct { + name string + usages v1.ResourceList + limits v1.ResourceList + want v1.ResourceList + }{ + { + name: "usages with empty limits; should get empty list", + usages: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("0"), + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourcePods: resource.MustParse("1"), + }, + }, + { + name: "one usage matching the limits; should list the matching usage", + usages: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourcePods: resource.MustParse("1"), + }, + limits: v1.ResourceList{ + // Add a random resource from the usage list to the limits. + v1.ResourceCPU: resource.MustParse("100m"), + }, + want: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + }, + }, + { + name: "matching usage is zero and no other matching usages; should list the matching usage even if it's empty", + usages: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("0"), + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourcePods: resource.MustParse("0"), + }, + limits: v1.ResourceList{ + // Add one of the zero-quantity resource in limits. + v1.ResourcePods: resource.MustParse("1"), + }, + want: v1.ResourceList{ + v1.ResourcePods: resource.MustParse("0"), + }, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := CleanupUnneeded(tc.usages, tc.limits) + if result := Equals(tc.want, got); !result { + t.Errorf("%s want: %v, got: %v", tc.name, tc.want, got) + } + }) + } +} diff --git a/internal/integtest/setup.go b/internal/integtest/setup.go index b46493bb0..28995062c 100644 --- a/internal/integtest/setup.go +++ b/internal/integtest/setup.go @@ -48,6 +48,11 @@ var ( K8sClient client.Client testEnv *envtest.Environment k8sManagerCancelFn context.CancelFunc + TestForest *forest.Forest +) + +const ( + HRQSyncInterval = 5 * time.Second ) func HNCRun(t *testing.T, title string) { @@ -102,12 +107,14 @@ func HNCBeforeSuite() { By("creating reconcilers") opts := setup.Options{ - MaxReconciles: 100, - UseFakeClient: true, - HNCCfgRefresh: 1 * time.Second, // so we don't have to wait as long + MaxReconciles: 100, + UseFakeClient: true, + HNCCfgRefresh: 1 * time.Second, // so we don't have to wait as long + HRQ: true, + HRQSyncInterval: HRQSyncInterval, } - - err = setup.CreateReconcilers(k8sManager, forest.NewForest(), opts) + TestForest = forest.NewForest() + err = setup.CreateReconcilers(k8sManager, TestForest, opts) Expect(err).ToNot(HaveOccurred()) By("Creating clients") @@ -127,6 +134,7 @@ func HNCAfterSuite() { if k8sManagerCancelFn != nil { k8sManagerCancelFn() } + k8sManagerCancelFn = nil By("tearing down the test environment") Expect(testEnv).ToNot(BeNil()) err := testEnv.Stop() diff --git a/internal/metadata/metav1.go b/internal/metadata/metav1.go index 02c434b8b..cd4336b47 100644 --- a/internal/metadata/metav1.go +++ b/internal/metadata/metav1.go @@ -22,6 +22,15 @@ func SetLabel(inst metav1.Object, label string, value string) { inst.SetLabels(labels) } +func GetAnnotation(inst metav1.Object, annot string) (string, bool) { + annots := inst.GetAnnotations() + if annots == nil { + return "", false + } + value, ok := annots[annot] + return value, ok +} + func SetAnnotation(inst metav1.Object, annotation string, value string) { annotations := inst.GetAnnotations() if annotations == nil { diff --git a/internal/setup/reconcilers.go b/internal/setup/reconcilers.go index e93d64d01..f516faa0b 100644 --- a/internal/setup/reconcilers.go +++ b/internal/setup/reconcilers.go @@ -13,14 +13,17 @@ import ( "sigs.k8s.io/hierarchical-namespaces/internal/forest" "sigs.k8s.io/hierarchical-namespaces/internal/hierarchyconfig" "sigs.k8s.io/hierarchical-namespaces/internal/hncconfig" + "sigs.k8s.io/hierarchical-namespaces/internal/hrq" ) type Options struct { - MaxReconciles int - ReadOnly bool - UseFakeClient bool - NoWebhooks bool - HNCCfgRefresh time.Duration + MaxReconciles int + ReadOnly bool + UseFakeClient bool + NoWebhooks bool + HNCCfgRefresh time.Duration + HRQ bool + HRQSyncInterval time.Duration } func Create(log logr.Logger, mgr ctrl.Manager, f *forest.Forest, opts Options) { @@ -73,6 +76,38 @@ func CreateReconcilers(mgr ctrl.Manager, f *forest.Forest, opts Options) error { ReadOnly: opts.ReadOnly, } + if opts.HRQ { + // Create resource quota reconciler + rqr := &hrq.ResourceQuotaReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("reconcilers").WithName("ResourceQuota"), + Forest: f, + } + f.AddListener(rqr) + + // Create hierarchical resource quota reconciler + hrqr := &hrq.HierarchicalResourceQuotaReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("reconcilers").WithName("HierarchicalResourceQuota"), + Forest: f, + RQR: rqr, + } + rqr.HRQR = hrqr + + if err := rqr.SetupWithManager(mgr); err != nil { + return fmt.Errorf("cannot create resource quota reconciler: %s", err.Error()) + } + if err := hrqr.SetupWithManager(mgr); err != nil { + return fmt.Errorf("cannot create hierarchical resource quota reconciler: %s", err.Error()) + } + rqr.HRQR = hrqr + + // Create a periodic checker to make sure the forest is not out-of-sync. + if opts.HRQSyncInterval != 0 { + go detectIncrementalHRQUsageDrift(f, opts.HRQSyncInterval, hrqr) + } + } + if err := ar.SetupWithManager(mgr); err != nil { return fmt.Errorf("cannot create anchor reconciler: %s", err.Error()) } @@ -85,3 +120,14 @@ func CreateReconcilers(mgr ctrl.Manager, f *forest.Forest, opts Options) error { return nil } + +func detectIncrementalHRQUsageDrift(f *forest.Forest, forestSyncInterval time.Duration, hrqr *hrq.HierarchicalResourceQuotaReconciler) { + syncTicker := time.NewTicker(forestSyncInterval) + for { + <-syncTicker.C + // If there's any out-of-sync, enqueue the affected HRQs to update usages. + // If not, nothing will be enqueued. + //hrqr.Enqueue(hrqr.Log, "recover out-of-sync usages", f.CheckSubtreeUsages()) + hrqr.Log.Info("TODO: recover out-of-sync usages") + } +} diff --git a/internal/setup/webhooks.go b/internal/setup/webhooks.go index 40f62973c..c418eea50 100644 --- a/internal/setup/webhooks.go +++ b/internal/setup/webhooks.go @@ -12,6 +12,7 @@ import ( "sigs.k8s.io/hierarchical-namespaces/internal/forest" "sigs.k8s.io/hierarchical-namespaces/internal/hierarchyconfig" "sigs.k8s.io/hierarchical-namespaces/internal/hncconfig" + "sigs.k8s.io/hierarchical-namespaces/internal/hrq" ns "sigs.k8s.io/hierarchical-namespaces/internal/namespace" // for some reason, by default this gets imported as "namespace*s*" "sigs.k8s.io/hierarchical-namespaces/internal/objects" ) @@ -53,7 +54,7 @@ func ManageCerts(mgr ctrl.Manager, setupFinished chan struct{}, restartOnSecretR }) } -// createWebhooks creates all mutators and validators. This function is called from main.go. +// createWebhooks creates all mutators and validators. func createWebhooks(mgr ctrl.Manager, f *forest.Forest, opts Options) { // Create webhook for Hierarchy mgr.GetWebhookServer().Register(hierarchyconfig.ServingPath, &webhook.Admission{Handler: &hierarchyconfig.Validator{ @@ -89,4 +90,17 @@ func createWebhooks(mgr ctrl.Manager, f *forest.Forest, opts Options) { mgr.GetWebhookServer().Register(ns.MutatorServingPath, &webhook.Admission{Handler: &ns.Mutator{ Log: ctrl.Log.WithName("namespace").WithName("mutate"), }}) + + if opts.HRQ { + // Create webhook for ResourceQuota status. + mgr.GetWebhookServer().Register(hrq.ResourceQuotasStatusServingPath, &webhook.Admission{Handler: &hrq.ResourceQuotaStatus{ + Log: ctrl.Log.WithName("validators").WithName("ResourceQuota"), + Forest: f, + }}) + + // Create webhook for HierarchicalResourceQuota spec. + mgr.GetWebhookServer().Register(hrq.HRQServingPath, &webhook.Admission{Handler: &hrq.HRQ{ + Log: ctrl.Log.WithName("validators").WithName("HierarchicalResourceQuota"), + }}) + } } diff --git a/test/e2e/delete_crd_test.go b/test/e2e/delete_crd_test.go index a8fdfe08a..b40931fb7 100644 --- a/test/e2e/delete_crd_test.go +++ b/test/e2e/delete_crd_test.go @@ -69,6 +69,7 @@ var _ = Describe("When deleting CRDs", func() { time.Sleep(1 * time.Second) MustRun("kubectl delete crd subnamespaceanchors.hnc.x-k8s.io") MustRun("kubectl delete crd hncconfigurations.hnc.x-k8s.io") + MustRun("kubectl delete crd hierarchicalresourcequotas.hnc.x-k8s.io") // Give HNC 10s to have the chance to fully delete everything (5s wasn't enough). // Verify that the HNC CRDs are gone (if nothing's printed, then they are). RunShouldNotContain("hnc", cleanupTimeout, "kubectl get crd") diff --git a/test/e2e/hrq_test.go b/test/e2e/hrq_test.go new file mode 100644 index 000000000..e958b11a1 --- /dev/null +++ b/test/e2e/hrq_test.go @@ -0,0 +1,356 @@ +package e2e + +import ( + "io/ioutil" + "math/rand" + "os" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + . "sigs.k8s.io/hierarchical-namespaces/pkg/testutils" +) + +// This test primarily uses PVCs to test the quota system (with a few tests of pods). We used to use +// Secrets and ConfigMaps, but K8s and GKE keep on changing the number of default objects of these +// types that it puts in every namespace. PVCs seem to be more stable and are well-documented in +// https://kubernetes.io/docs/tasks/administer-cluster/quota-api-object/. + +const ( + prefix = "hrq-test-" + nsA = prefix+"a" + nsB = prefix+"b" + nsC = prefix+"c" + + propagationTime = 5 + resourceQuotaSingleton = "hrq.hnc.x-k8s.io" +) + +var _ = Describe("Hierarchical Resource Quota", func() { + BeforeEach(func() { + rand.Seed(time.Now().UnixNano()) + CleanupTestNamespaces() + }) + + AfterEach(func() { + CleanupTestNamespaces() + }) + + It("should create RQs with correct limits in the descendants (including itself) for HRQs", func() { + // set up + CreateNamespace(nsA) + CreateSubnamespace(nsB, nsA) + + // test + setHRQ("a-hrq", nsA, "persistentvolumeclaims", "3") + setHRQ("b-hrq", nsB, "secrets", "2") + + // verify + FieldShouldContain("resourcequota", nsA, resourceQuotaSingleton, ".spec.hard", "persistentvolumeclaims:3") + FieldShouldContain("resourcequota", nsB, resourceQuotaSingleton, ".spec.hard", "persistentvolumeclaims:3 secrets:2") + }) + + It("should deny the request if the HRQ has invalid quota types", func() { + // set up + CreateNamespace(nsA) + + // test and verify ("foobar" is an invalid quota type) + shouldNotSetHRQ("a-hrq", nsA, "persistentvolumeclaims", "3", "foobar", "1") + }) + + It("should remove obsolete (empty) RQ singletons if there's no longer an HRQ in the ancestor", func() { + // set up namespaces + CreateNamespace(nsA) + CreateSubnamespace(nsB, nsA) + // set up HRQs and verify descendant RQs + setHRQ("a-hrq", nsA, "persistentvolumeclaims", "3") + setHRQ("b-hrq", nsB, "secrets", "2") + FieldShouldContain("resourcequota", nsA, resourceQuotaSingleton, ".spec.hard", "persistentvolumeclaims:3") + FieldShouldContain("resourcequota", nsB, resourceQuotaSingleton, ".spec.hard", "persistentvolumeclaims:3 secrets:2") + + // test deleting the HRQ in namespace 'a' + MustRun("kubectl delete hrq -n", nsA, "a-hrq") + + // verify the RQ singleton in namespace 'a' is gone + RunShouldNotContain(resourceQuotaSingleton, propagationTime, "kubectl get resourcequota -n", nsA) + // verity the RQ singleton in namespace 'b' still exists but the limits are + // updated + FieldShouldContain("resourcequota", nsB, resourceQuotaSingleton, ".spec.hard", "secrets:2") + FieldShouldNotContain("resourcequota", nsB, resourceQuotaSingleton, ".spec.hard", "persistentvolumeclaims") + }) + + It("should update descendants' (including itself's) RQ limits if an ancestor HRQ changes", func() { + // set up namespaces + CreateNamespace(nsA) + CreateSubnamespace(nsB, nsA) + // set up an HRQ and verify descendant RQs + setHRQ("a-hrq", nsA, "persistentvolumeclaims", "3") + FieldShouldContain("resourcequota", nsA, resourceQuotaSingleton, ".spec.hard", "persistentvolumeclaims:3") + FieldShouldContain("resourcequota", nsB, resourceQuotaSingleton, ".spec.hard", "persistentvolumeclaims:3") + + // test updating the HRQ + setHRQ("a-hrq", nsA, "persistentvolumeclaims", "2", "secrets", "2") + + // verify the RQ limits are updated + FieldShouldContain("resourcequota", nsA, resourceQuotaSingleton, ".spec.hard", "persistentvolumeclaims:2 secrets:2") + FieldShouldContain("resourcequota", nsB, resourceQuotaSingleton, ".spec.hard", "persistentvolumeclaims:2 secrets:2") + }) + + It("should update ancestor HRQ usages if limited resource is consumed/deleted in a descendant (including itself)", func() { + // set up namespaces + CreateNamespace(nsA) + CreateSubnamespace(nsB, nsA) + // set up an HRQ + setHRQ("a-hrq-persistentvolumeclaims", nsA, "persistentvolumeclaims", "3") + + // test consuming a resource in a descendant + Eventually(createPVC("b-pvc", nsB)).Should(Succeed()) + // verify the HRQ usages are updated + FieldShouldContain("hrq", nsA, "a-hrq-persistentvolumeclaims", ".status.used", "persistentvolumeclaims:1") + + // test consuming a resource in the same namespace (itself) with HRQ + Eventually(createPVC("a-pvc", nsA)).Should(Succeed()) + // verify the HRQ usages are updated + FieldShouldContain("hrq", nsA, "a-hrq-persistentvolumeclaims", ".status.used", "persistentvolumeclaims:2") + + // test deleting a resource in a descendant + MustRun("kubectl delete persistentvolumeclaim b-pvc -n", nsB) + // verify the HRQ usages are updated + FieldShouldContain("hrq", nsA, "a-hrq-persistentvolumeclaims", ".status.used", "persistentvolumeclaims:1") + }) + + It("should update ancestor HRQ usages if a descendant namespace with limited resources is deleted", func() { + // set up namespaces + CreateNamespace(nsA) + CreateSubnamespace(nsB, nsA) + // set up an HRQ + setHRQ("a-hrq-persistentvolumeclaims", nsA, "persistentvolumeclaims", "3") + // verify usage updates after consuming limited resources in the descendants + Eventually(createPVC("b-pvc", nsB)).Should(Succeed()) + FieldShouldContain("hrq", nsA, "a-hrq-persistentvolumeclaims", ".status.used", "persistentvolumeclaims:1") + + // test deleting the namespace with usages + MustRun("kubectl delete subns", nsB, "-n", nsA) + + // verify the HRQ usages are updated (restored) + FieldShouldContain("hrq", nsA, "a-hrq-persistentvolumeclaims", ".status.used", "persistentvolumeclaims:0") + }) + + It("should update descendant RQ limits or delete them if a namespace with HRQs is deleted", func() { + // set up namespaces - 'a' as the parent of 'b' and 'c'. + CreateNamespace(nsA) + CreateNamespace(nsB) + CreateNamespace(nsC) + MustRun("kubectl hns set -p", nsA, nsB) + MustRun("kubectl hns set -p", nsA, nsC) + + // set up an HRQ in namespace 'a' and 'b' + setHRQ("a-hrq-persistentvolumeclaims", nsA, "persistentvolumeclaims", "3") + setHRQ("b-hrq-secrets", nsB, "secrets", "2") + // verify RQ singletons in the descendants 'b' and 'c' + FieldShouldContain("resourcequota", nsB, resourceQuotaSingleton, ".spec.hard", "persistentvolumeclaims:3 secrets:2") + FieldShouldContain("resourcequota", nsC, resourceQuotaSingleton, ".spec.hard", "persistentvolumeclaims:3") + + // Make the child namespaces roots + MustRun("kubectl hns set -r", nsB) + MustRun("kubectl hns set -r", nsC) + + // verify the RQ is updated in namespace 'b' and removed in namespace 'c' + FieldShouldNotContain("resourcequota", nsB, resourceQuotaSingleton, ".spec.hard", "persistentvolumeclaims") + FieldShouldContain("resourcequota", nsB, resourceQuotaSingleton, ".spec.hard", "secrets:2") + MustNotRun("kubectl get resourcequota -n", nsC, resourceQuotaSingleton) + }) + + It("should recreate RQ singleton if user deletes it", func() { + // set up namespaces + CreateNamespace(nsA) + CreateSubnamespace(nsB, nsA) + // set up ancestor HRQ and verify descendant RQs + setHRQ("a-hrq-persistentvolumeclaims", nsA, "persistentvolumeclaims", "3") + MustRun("kubectl get resourcequota -n", nsA, resourceQuotaSingleton) + MustRun("kubectl get resourcequota -n", nsB, resourceQuotaSingleton) + + // test deleting the RQ singleton in the descendant + MustRun("kubectl delete resourcequota -n", nsB, resourceQuotaSingleton) + + // verify the RQ singleton is recreated + MustRun("kubectl get resourcequota -n", nsB, resourceQuotaSingleton) + }) + + It("shouldn't allow consuming resources violating HRQs", func() { + // set up namespaces a <- b (a is the parent) + CreateNamespace(nsA) + CreateSubnamespace(nsB, nsA) + // set up an HRQ with limits of 1 persistentvolumeclaims in namespace a + setHRQ("a-hrq-persistentvolumeclaims", nsA, "persistentvolumeclaims", "1") + + // verify allowing creating 1 persistentvolumeclaims in namespace b + Eventually(createPVC("b-pvc", nsB)).Should(Succeed()) + // verify denying creating any more persistentvolumeclaims + Expect(createPVC("b-pvc-2", nsB)()).ShouldNot(Succeed()) + Expect(createPVC("a-pvc", nsA)()).ShouldNot(Succeed()) + + // test deleting the only persistentvolumeclaims + MustRun("kubectl delete persistentvolumeclaim b-pvc -n", nsB) + // verify allowing creating 1 persistentvolumeclaims in namespace a + Eventually(createPVC("a-pvc", nsA)).Should(Succeed()) + // verify denying creating any more persistentvolumeclaims again + Expect(createPVC("a-pvc-2", nsA)()).ShouldNot(Succeed()) + Expect(createPVC("b-pvc", nsB)()).ShouldNot(Succeed()) + }) + + It("should leave the resource consumption as is if it violates HRQs before HRQs are created", func() { + // set up namespaces a <- b (a is the parent) + CreateNamespace(nsA) + CreateSubnamespace(nsB, nsA) + // set up 1 persistentvolumeclaims in each namespace + Eventually(createPVC("b-pvc", nsB)).Should(Succeed()) + Eventually(createPVC("a-pvc", nsA)).Should(Succeed()) + // set up an HRQ with limits of 1 persistentvolumeclaims in namespace a + setHRQ("a-hrq-persistentvolumeclaims", nsA, "persistentvolumeclaims", "1") + + // verify two existing persistentvolumeclaims still exist + MustRun("kubectl get persistentvolumeclaim b-pvc -n", nsB) + MustRun("kubectl get persistentvolumeclaim a-pvc -n", nsA) + // verify the HRQ usage is 2 even if the limit is 1 + FieldShouldContain("hrq", nsA, "a-hrq-persistentvolumeclaims", ".status.used", "persistentvolumeclaims:2") + FieldShouldContain("hrq", nsA, "a-hrq-persistentvolumeclaims", ".spec.hard", "persistentvolumeclaims:1") + }) + + It("should not allow creating a pod if the pod cpu/memory usages violate HRQs", func() { + // Set up namespaces - 'a' as the parent of 'b' and 'c'. + CreateNamespace(nsA) + CreateSubnamespace(nsB, nsA) + CreateSubnamespace(nsC, nsA) + // Set up an HRQ with limits of 200Mi memory and 300m cpu in namespace a. + setHRQ("a-hrq", nsA, "memory", "200Mi", "cpu", "300m") + + // Should allow creating a pod under limit in namespace b. + createPod("pod1", nsB, "memory 200Mi cpu 300m", "memory 100Mi cpu 150m") + + // Verify the HRQ usages are updated. + FieldShouldContain("hrq", nsA, "a-hrq", ".status.used", "memory:100Mi") + FieldShouldContain("hrq", nsA, "a-hrq", ".status.used", "cpu:150m") + + // Try creating a pod in namespace c (b's sibling who is also subject to + // the HRQ in namespace a). + // Should reject creating a pod exceeding cpu limits (250m + 150m exceeds 300m). + shouldNotCreatePod("pod2", nsC, "memory 200Mi cpu 500m", "memory 100Mi cpu 250m") + // Should reject creating a pod exceeding memory limits (150Mi + 100Mi exceeds 200Mi). + shouldNotCreatePod("pod2", nsC, "memory 300Mi cpu 300m", "memory 150Mi cpu 150m") + // Should reject creating a pod exceeding both memory and cpu limits. + shouldNotCreatePod("pod2", nsC, "memory 300Mi cpu 500m", "memory 150Mi cpu 250m") + // Should allow creating another pod under limit. + createPod("pod2", nsC, "memory 200Mi cpu 300m", "memory 100Mi cpu 150m") + }) +}) + +func generateHRQManifest(nm, nsnm string, args ...string) string { + return `# temp file created by hrq_test.go +apiVersion: hnc.x-k8s.io/v1alpha2 +kind: HierarchicalResourceQuota +metadata: + name: ` + nm + ` + namespace: ` + nsnm + ` +spec: + hard:` + argsToResourceListString(2, args...) +} + +func generatePodManifest(nm, nsnm, limits, requests string) string { + l := argsToResourceListString(4, strings.Split(limits, " ")...) + r := argsToResourceListString(4, strings.Split(requests, " ")...) + return `# temp file created by hrq_test.go +apiVersion: v1 +kind: Pod +metadata: + name: ` + nm + ` + namespace: ` + nsnm + ` +spec: + containers: + - name: quota-mem-cpu + image: nginx + resources: + limits:` + l + ` + requests:` + r +} + +// createPod creates a pod with the given container memory and cpu limits and +// requests. +func createPod(nm, nsnm, limits, requests string) { + pod := generatePodManifest(nm, nsnm, limits, requests) + MustApplyYAML(pod) + RunShouldContain(nm, propagationTime, "kubectl get pod -n", nsnm) +} + +func createPVC(nm, nsnm string) func() error { + return func() error { + pvc := `# temp file created by hrq_test.go +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: `+nm+` + namespace: `+nsnm+` +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi` + fn := writeTempFile(pvc) + GinkgoT().Log("Wrote "+fn+":\n"+pvc) + defer removeFile(fn) + return TryRun("kubectl apply -f", fn) + } +} + +func writeTempFile(cxt string) string { + f, err := ioutil.TempFile(os.TempDir(), "e2e-test-*.yaml") + Expect(err).Should(BeNil()) + defer f.Close() + f.WriteString(cxt) + return f.Name() +} + +func removeFile(path string) { + Expect(os.Remove(path)).Should(BeNil()) +} + +// shouldNotCreatePod should not be able to create the pod with the given +// container memory and cpu limits and requests. +func shouldNotCreatePod(nm, nsnm, limits, requests string) { + pod := generatePodManifest(nm, nsnm, limits, requests) + MustNotApplyYAML(pod) + RunShouldNotContain(nm, propagationTime, "kubectl get pod -n", nsnm) +} + +// setHRQ creates/updates an HRQ with a given name in the given namespace with +// hard limits. +func setHRQ(nm, nsnm string, args ...string) { + hrq := generateHRQManifest(nm, nsnm, args...) + MustApplyYAML(hrq) + RunShouldContain(nm, propagationTime, "kubectl get hrq -n", nsnm) +} + +// shouldNotSetHRQ should not be able to create the HRQ with the given resources. +func shouldNotSetHRQ(nm, nsnm string, args ...string) { + hrq := generateHRQManifest(nm, nsnm, args...) + MustNotApplyYAML(hrq) + RunShouldNotContain(nm, propagationTime, "kubectl get hrq -n", nsnm) +} + +// argsToResourceListString provides a convenient way to specify a resource list +// in hard limits/usages for HRQ or RQ instances, or limits/requests for pod +// containers by interpreting even-numbered args as resource names (e.g. +// "secrets") and odd-valued args as quantities (e.g. "5", "1Gb", etc). The +// level controls how much indention in the output (0 indicates no indention). +func argsToResourceListString(level int, args ...string) string { + Expect(len(args)%2).Should(Equal(0), "Need even number of arguments, not %d", len(args)) + rl := `` + for i := 0; i < len(args); i += 2 { + rl += ` +` + strings.Repeat(" ", level) + args[i] + `: "` + args[i+1] + `"` + } + return rl +}