diff --git a/resource_customizations/karpenter.k8s.aws/EC2NodeClass/health.lua b/resource_customizations/karpenter.k8s.aws/EC2NodeClass/health.lua new file mode 100644 index 0000000000..df9039e4e2 --- /dev/null +++ b/resource_customizations/karpenter.k8s.aws/EC2NodeClass/health.lua @@ -0,0 +1,19 @@ +local hs = {} +if obj.status ~= nil and obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" then + if condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + elseif condition.status == "True" then + hs.status = "Healthy" + hs.message = "EC2NodeClass is ready" + return hs + end + end + end +end +hs.status = "Progressing" +hs.message = "Waiting for EC2NodeClass to be ready" +return hs diff --git a/resource_customizations/karpenter.k8s.aws/EC2NodeClass/health_test.yaml b/resource_customizations/karpenter.k8s.aws/EC2NodeClass/health_test.yaml new file mode 100644 index 0000000000..765a470746 --- /dev/null +++ b/resource_customizations/karpenter.k8s.aws/EC2NodeClass/health_test.yaml @@ -0,0 +1,9 @@ +tests: + - healthStatus: + status: Degraded + message: 'EC2NodeClass is not ready' + inputPath: testdata/degraded.yaml + - healthStatus: + status: Healthy + message: 'EC2NodeClass is ready' + inputPath: testdata/healthy.yaml diff --git a/resource_customizations/karpenter.k8s.aws/EC2NodeClass/testdata/degraded.yaml b/resource_customizations/karpenter.k8s.aws/EC2NodeClass/testdata/degraded.yaml new file mode 100644 index 0000000000..48ee0477c4 --- /dev/null +++ b/resource_customizations/karpenter.k8s.aws/EC2NodeClass/testdata/degraded.yaml @@ -0,0 +1,40 @@ +apiVersion: karpenter.k8s.aws/v1 +kind: EC2NodeClass +metadata: + finalizers: + - karpenter.k8s.aws/termination + name: ec2nodeclass +spec: + amiFamily: AL2023 + amiSelectorTerms: + - alias: al2023@latest + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + deleteOnTermination: true + encrypted: false + volumeSize: 50Gi + volumeType: gp3 + kubelet: + imageGCHighThresholdPercent: 85 + imageGCLowThresholdPercent: 80 + metadataOptions: + httpEndpoint: enabled + httpProtocolIPv6: disabled + httpPutResponseHopLimit: 1 + httpTokens: required + role: KarpenterInstanceProfile1111 + securityGroupSelectorTerms: + - id: sg-1111 + subnetSelectorTerms: + - id: subnet-1111 + - id: subnet-2222 + tags: + Name: karpenter.sh/default + karpenter.sh/discovery: cluster +status: + conditions: + - message: EC2NodeClass is not ready + reason: NotReady + status: "False" + type: Ready diff --git a/resource_customizations/karpenter.k8s.aws/EC2NodeClass/testdata/healthy.yaml b/resource_customizations/karpenter.k8s.aws/EC2NodeClass/testdata/healthy.yaml new file mode 100644 index 0000000000..0a262520b1 --- /dev/null +++ b/resource_customizations/karpenter.k8s.aws/EC2NodeClass/testdata/healthy.yaml @@ -0,0 +1,52 @@ +apiVersion: karpenter.k8s.aws/v1 +kind: EC2NodeClass +metadata: + finalizers: + - karpenter.k8s.aws/termination + name: ec2nodeclass +spec: + amiFamily: AL2023 + amiSelectorTerms: + - alias: al2023@latest + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + deleteOnTermination: true + encrypted: false + volumeSize: 50Gi + volumeType: gp3 + kubelet: + imageGCHighThresholdPercent: 85 + imageGCLowThresholdPercent: 80 + metadataOptions: + httpEndpoint: enabled + httpProtocolIPv6: disabled + httpPutResponseHopLimit: 1 + httpTokens: required + role: KarpenterInstanceProfile + securityGroupSelectorTerms: + - id: sg-002e1110bcbd37f8f1 + subnetSelectorTerms: + - id: subnet-0b01fc471111fd661 + - id: subnet-0af6c9817111edd26 + tags: + Name: karpenter.sh/default + karpenter.sh/discovery: cluster +status: + conditions: + - message: EC2NodeClass is ready + reason: Ready + status: "True" + type: Ready + - message: "" + reason: SubnetsReady + status: "True" + type: SubnetsReady + - message: "" + reason: SecurityGroupsReady + status: "True" + type: SecurityGroupsReady + - message: "" + reason: ValidationSucceeded + status: "True" + type: ValidationSucceeded diff --git a/resource_customizations/karpenter.sh/NodePool/health.lua b/resource_customizations/karpenter.sh/NodePool/health.lua new file mode 100644 index 0000000000..820d754796 --- /dev/null +++ b/resource_customizations/karpenter.sh/NodePool/health.lua @@ -0,0 +1,19 @@ +local hs = {} +if obj.status ~= nil and obj.status.conditions ~= nil then + for i, condition in ipairs(obj.status.conditions) do + if condition.type == "Ready" then + if condition.status == "False" then + hs.status = "Degraded" + hs.message = condition.message + return hs + elseif condition.status == "True" then + hs.status = "Healthy" + hs.message = condition.message + return hs + end + end + end +end +hs.status = "Progressing" +hs.message = "Waiting for NodePool to be ready" +return hs diff --git a/resource_customizations/karpenter.sh/NodePool/health_test.yaml b/resource_customizations/karpenter.sh/NodePool/health_test.yaml new file mode 100644 index 0000000000..bb6005d74e --- /dev/null +++ b/resource_customizations/karpenter.sh/NodePool/health_test.yaml @@ -0,0 +1,9 @@ +tests: + - healthStatus: + status: Degraded + message: 'NodePool is not ready' + inputPath: testdata/degraded.yaml + - healthStatus: + status: Healthy + message: 'NodePool is ready' + inputPath: testdata/healthy.yaml diff --git a/resource_customizations/karpenter.sh/NodePool/testdata/degraded.yaml b/resource_customizations/karpenter.sh/NodePool/testdata/degraded.yaml new file mode 100644 index 0000000000..b96f049cc0 --- /dev/null +++ b/resource_customizations/karpenter.sh/NodePool/testdata/degraded.yaml @@ -0,0 +1,36 @@ +apiVersion: karpenter.sh/v1 +kind: NodePool +metadata: + name: nodepool +spec: + disruption: + budgets: + - nodes: 100% + consolidateAfter: 10m + consolidationPolicy: WhenEmptyOrUnderutilized + limits: + cpu: 10001 + template: + spec: + expireAfter: 720h + nodeClassRef: + group: karpenter.k8s.aws + kind: EC2NodeClass + name: default1 + requirements: + - key: karpenter.k8s.aws/instance-family + operator: In + values: + - m51 + - key: karpenter.k8s.aws/instance-size + operator: In + values: + - large1 + - xlarge1 + - 2xlarge1 +status: + conditions: + - message: NodePool is not ready + reason: NotReady + status: "False" + type: Ready diff --git a/resource_customizations/karpenter.sh/NodePool/testdata/healthy.yaml b/resource_customizations/karpenter.sh/NodePool/testdata/healthy.yaml new file mode 100644 index 0000000000..8eac40b417 --- /dev/null +++ b/resource_customizations/karpenter.sh/NodePool/testdata/healthy.yaml @@ -0,0 +1,40 @@ +apiVersion: karpenter.sh/v1 +kind: NodePool +metadata: + name: nodepool +spec: + disruption: + budgets: + - nodes: 10% + consolidateAfter: 10m + consolidationPolicy: WhenEmptyOrUnderutilized + limits: + cpu: 1000 + template: + spec: + expireAfter: 720h + nodeClassRef: + group: karpenter.k8s.aws + kind: EC2NodeClass + name: default + requirements: + - key: karpenter.k8s.aws/instance-family + operator: In + values: + - m5 + - key: karpenter.k8s.aws/instance-size + operator: In + values: + - large + - xlarge + - 2xlarge +status: + conditions: + - message: NodePool is ready + reason: Ready + status: "True" + type: Ready + - message: "" + reason: NodeClassReady + status: "True" + type: NodeClassReady