From faeca24e075bc2bc9ee1b4d684991de45403b189 Mon Sep 17 00:00:00 2001 From: Kiryl Filatau Date: Thu, 18 Dec 2025 17:45:37 +0100 Subject: [PATCH 1/8] Add GPU support to kubernetes_scale on EKS Karpenter --- .../kubernetes_scale/aws-gpu-nodepool.yaml.j2 | 38 +++++++++++++ .../kubernetes_scale/kubernetes_scale.yaml.j2 | 16 ++++++ .../kubernetes_scale_benchmark.py | 55 ++++++++++++++++--- 3 files changed, 101 insertions(+), 8 deletions(-) create mode 100644 perfkitbenchmarker/data/container/kubernetes_scale/aws-gpu-nodepool.yaml.j2 diff --git a/perfkitbenchmarker/data/container/kubernetes_scale/aws-gpu-nodepool.yaml.j2 b/perfkitbenchmarker/data/container/kubernetes_scale/aws-gpu-nodepool.yaml.j2 new file mode 100644 index 000000000..9d6fce3d9 --- /dev/null +++ b/perfkitbenchmarker/data/container/kubernetes_scale/aws-gpu-nodepool.yaml.j2 @@ -0,0 +1,38 @@ +apiVersion: karpenter.sh/v1 +kind: NodePool +metadata: + name: {{ gpu_nodepool_name | default('gpu') }} +spec: + disruption: + consolidateAfter: {{ gpu_consolidate_after | default('1m') }} + consolidationPolicy: {{ gpu_consolidation_policy | default('WhenEmptyOrUnderutilized') }} + limits: + cpu: {{ gpu_nodepool_cpu_limit | default(1000) }} + template: + metadata: + labels: + pkb_nodepool: {{ gpu_nodepool_label | default('gpu') }} + spec: + nodeClassRef: + group: karpenter.k8s.aws + kind: EC2NodeClass + name: {{ karpenter_ec2nodeclass_name | default('default') }} + requirements: + - key: kubernetes.io/arch + operator: In + values: {{ gpu_arch | default(['amd64']) }} + - key: kubernetes.io/os + operator: In + values: {{ gpu_os | default(['linux']) }} + - key: karpenter.sh/capacity-type + operator: In + values: {{ gpu_capacity_types | default(['on-demand']) }} + - key: karpenter.k8s.aws/instance-category + operator: In + values: {{ gpu_instance_categories | default(['g']) }} + - key: karpenter.k8s.aws/instance-family + operator: In + values: {{ gpu_instance_families | default(['g6','g6e']) }} + taints: + - key: {{ gpu_taint_key | default('nvidia.com/gpu') }} + effect: NoSchedule diff --git a/perfkitbenchmarker/data/container/kubernetes_scale/kubernetes_scale.yaml.j2 b/perfkitbenchmarker/data/container/kubernetes_scale/kubernetes_scale.yaml.j2 index 9b79aa6cd..477666f45 100644 --- a/perfkitbenchmarker/data/container/kubernetes_scale/kubernetes_scale.yaml.j2 +++ b/perfkitbenchmarker/data/container/kubernetes_scale/kubernetes_scale.yaml.j2 @@ -13,6 +13,10 @@ spec: labels: name: {{ Name }} spec: + {%- if NvidiaGpuRequest and Cloud == 'aws' %} + nodeSelector: + karpenter.sh/nodepool: {{ GpuNodepoolName | default('gpu') }} + {%- endif %} containers: - name: {{ Name }} image: {{ Image }} @@ -20,6 +24,13 @@ spec: command: {{ Command }} {%- endif %} resources: + requests: + cpu: {{ CpuRequest }} + memory: {{ MemoryRequest }} + ephemeral-storage: {{ EphemeralStorageRequest }} + {%- if NvidiaGpuRequest %} + nvidia.com/gpu: {{ NvidiaGpuRequest }} + {%- endif %} limits: cpu: {{ CpuRequest }} memory: {{ MemoryRequest }} @@ -53,3 +64,8 @@ spec: operator: "Exists" effect: "NoExecute" tolerationSeconds: {{ PodTimeout }} + {%- if NvidiaGpuRequest and Cloud == 'aws' %} + - key: {{ GpuTaintKey | default('nvidia.com/gpu') }} + operator: Exists + effect: NoSchedule + {%- endif %} diff --git a/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py b/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py index 8e8cb21f0..9462c36ea 100644 --- a/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py +++ b/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py @@ -97,8 +97,7 @@ def Prepare(bm_spec: benchmark_spec.BenchmarkSpec): def _GetRolloutCreationTime(rollout_name: str) -> int: """Returns the time when the rollout was created.""" out, _, _ = container_service.RunRetryableKubectlCommand([ - 'rollout', - 'history', + 'get', rollout_name, '-o', 'jsonpath={.metadata.creationTimestamp}', @@ -180,8 +179,32 @@ def ScaleUpPods( max_wait_time = _GetScaleTimeout() resource_timeout = max_wait_time + 60 * 5 # 5 minutes after waiting to avoid # pod delete events from polluting data collection. - yaml_docs = cluster.ConvertManifestToYamlDicts( - MANIFEST_TEMPLATE, + + # Ensure a GPU NodePool exists for EKS Karpenter before applying the workload. + is_eks_karpenter_aws_gpu = ( + virtual_machine.GPU_COUNT.value + and FLAGS.cloud.lower() == 'aws' + and getattr(cluster, 'CLUSTER_TYPE', None) == 'Karpenter' + ) + + if is_eks_karpenter_aws_gpu: + cluster.ApplyManifest( + 'container/kubernetes_scale/aws-gpu-nodepool.yaml.j2', + gpu_nodepool_name='gpu', + gpu_nodepool_label='gpu', + karpenter_ec2nodeclass_name='default', + gpu_instance_categories=['g'], + gpu_instance_families=['g6', 'g6e'], + gpu_capacity_types=['on-demand'], + gpu_arch=['amd64'], + gpu_os=['linux'], + gpu_taint_key='nvidia.com/gpu', + gpu_consolidate_after='1m', + gpu_consolidation_policy='WhenEmptyOrUnderutilized', + gpu_nodepool_cpu_limit=1000, + ) + + manifest_kwargs = dict( Name='kubernetes-scaleup', Replicas=num_new_pods, CpuRequest=CPUS_PER_POD.value, @@ -192,12 +215,28 @@ def ScaleUpPods( EphemeralStorageRequest='10Mi', RolloutTimeout=max_wait_time, PodTimeout=resource_timeout, + Cloud=FLAGS.cloud.lower(), ) - cluster.ModifyPodSpecPlacementYaml( - yaml_docs, - 'kubernetes-scaleup', - cluster.default_nodepool.machine_type, + + if is_eks_karpenter_aws_gpu: + manifest_kwargs.update({ + 'GpuNodepoolName': 'gpu', + 'GpuTaintKey': 'nvidia.com/gpu', + }) + + yaml_docs = cluster.ConvertManifestToYamlDicts( + MANIFEST_TEMPLATE, + **manifest_kwargs, ) + + # Non-GPU path: keep existing placement behavior. + if not is_eks_karpenter_aws_gpu: + cluster.ModifyPodSpecPlacementYaml( + yaml_docs, + 'kubernetes-scaleup', + cluster.default_nodepool.machine_type, + ) + resource_names = cluster.ApplyYaml(yaml_docs) assert resource_names From 633ce8070fe9cd1b203a54941194d0d036e791c6 Mon Sep 17 00:00:00 2001 From: Kiryl Filatau Date: Wed, 7 Jan 2026 18:03:12 +0100 Subject: [PATCH 2/8] Move EKS Karpenter GPU NodePool setup to Prepare() --- .../kubernetes_scale_benchmark.py | 52 +++++++++++-------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py b/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py index 9462c36ea..739f8f5b8 100644 --- a/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py +++ b/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py @@ -88,10 +88,38 @@ def GetConfig(user_config): config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) return config +def _IsEksKarpenterAwsGpu(cluster: container_service.KubernetesCluster) -> bool: + return ( + virtual_machine.GPU_COUNT.value + and FLAGS.cloud.lower() == 'aws' + and getattr(cluster, 'CLUSTER_TYPE', None) == 'Karpenter' + ) + +def _EnsureEksKarpenterGpuNodepool(cluster: container_service.KubernetesCluster) -> None: + """Ensures a GPU NodePool exists for EKS Karpenter before applying workloads.""" + if not _IsEksKarpenterAwsGpu(cluster): + return + cluster.ApplyManifest( + 'container/kubernetes_scale/aws-gpu-nodepool.yaml.j2', + gpu_nodepool_name='gpu', + gpu_nodepool_label='gpu', + karpenter_ec2nodeclass_name='default', + gpu_instance_categories=['g'], + gpu_instance_families=['g6', 'g6e'], + gpu_capacity_types=['on-demand'], + gpu_arch=['amd64'], + gpu_os=['linux'], + gpu_taint_key='nvidia.com/gpu', + gpu_consolidate_after='1m', + gpu_consolidation_policy='WhenEmptyOrUnderutilized', + gpu_nodepool_cpu_limit=1000, + ) def Prepare(bm_spec: benchmark_spec.BenchmarkSpec): """Sets additional spec attributes.""" bm_spec.always_call_cleanup = True + assert bm_spec.container_cluster + _EnsureEksKarpenterGpuNodepool(bm_spec.container_cluster) def _GetRolloutCreationTime(rollout_name: str) -> int: @@ -180,29 +208,7 @@ def ScaleUpPods( resource_timeout = max_wait_time + 60 * 5 # 5 minutes after waiting to avoid # pod delete events from polluting data collection. - # Ensure a GPU NodePool exists for EKS Karpenter before applying the workload. - is_eks_karpenter_aws_gpu = ( - virtual_machine.GPU_COUNT.value - and FLAGS.cloud.lower() == 'aws' - and getattr(cluster, 'CLUSTER_TYPE', None) == 'Karpenter' - ) - - if is_eks_karpenter_aws_gpu: - cluster.ApplyManifest( - 'container/kubernetes_scale/aws-gpu-nodepool.yaml.j2', - gpu_nodepool_name='gpu', - gpu_nodepool_label='gpu', - karpenter_ec2nodeclass_name='default', - gpu_instance_categories=['g'], - gpu_instance_families=['g6', 'g6e'], - gpu_capacity_types=['on-demand'], - gpu_arch=['amd64'], - gpu_os=['linux'], - gpu_taint_key='nvidia.com/gpu', - gpu_consolidate_after='1m', - gpu_consolidation_policy='WhenEmptyOrUnderutilized', - gpu_nodepool_cpu_limit=1000, - ) + is_eks_karpenter_aws_gpu = _IsEksKarpenterAwsGpu(cluster) manifest_kwargs = dict( Name='kubernetes-scaleup', From 2b8985dd14360770e2b480d887c1df48482e0b94 Mon Sep 17 00:00:00 2001 From: Kiryl Filatau Date: Thu, 15 Jan 2026 16:57:51 +0100 Subject: [PATCH 3/8] Refactor AWS EKS Karpenter GPU node selector application --- perfkitbenchmarker/container_service.py | 2 +- .../kubernetes_scale/kubernetes_scale.yaml.j2 | 4 ---- .../kubernetes_scale_benchmark.py | 19 ++++++++----------- .../aws/elastic_kubernetes_service.py | 14 ++++++++++---- 4 files changed, 19 insertions(+), 20 deletions(-) diff --git a/perfkitbenchmarker/container_service.py b/perfkitbenchmarker/container_service.py index f8d5905b0..8657ebfd3 100644 --- a/perfkitbenchmarker/container_service.py +++ b/perfkitbenchmarker/container_service.py @@ -2143,7 +2143,7 @@ def _ModifyPodSpecPlacementYaml( del name node_selectors = self.GetNodeSelectors(machine_type) if node_selectors: - pod_spec_yaml['nodeSelector'].update(node_selectors) + pod_spec_yaml.setdefault('nodeSelector', {}).update(node_selectors) def DeployIngress( self, name: str, namespace: str, port: int, health_path: str = '' diff --git a/perfkitbenchmarker/data/container/kubernetes_scale/kubernetes_scale.yaml.j2 b/perfkitbenchmarker/data/container/kubernetes_scale/kubernetes_scale.yaml.j2 index 477666f45..a8ef0a611 100644 --- a/perfkitbenchmarker/data/container/kubernetes_scale/kubernetes_scale.yaml.j2 +++ b/perfkitbenchmarker/data/container/kubernetes_scale/kubernetes_scale.yaml.j2 @@ -13,10 +13,6 @@ spec: labels: name: {{ Name }} spec: - {%- if NvidiaGpuRequest and Cloud == 'aws' %} - nodeSelector: - karpenter.sh/nodepool: {{ GpuNodepoolName | default('gpu') }} - {%- endif %} containers: - name: {{ Name }} image: {{ Image }} diff --git a/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py b/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py index 739f8f5b8..b0c8188ba 100644 --- a/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py +++ b/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py @@ -224,24 +224,21 @@ def ScaleUpPods( Cloud=FLAGS.cloud.lower(), ) + # GpuTaintKey is still needed for tolerations in the yaml template if is_eks_karpenter_aws_gpu: - manifest_kwargs.update({ - 'GpuNodepoolName': 'gpu', - 'GpuTaintKey': 'nvidia.com/gpu', - }) + manifest_kwargs['GpuTaintKey'] = 'nvidia.com/gpu' yaml_docs = cluster.ConvertManifestToYamlDicts( MANIFEST_TEMPLATE, **manifest_kwargs, ) - # Non-GPU path: keep existing placement behavior. - if not is_eks_karpenter_aws_gpu: - cluster.ModifyPodSpecPlacementYaml( - yaml_docs, - 'kubernetes-scaleup', - cluster.default_nodepool.machine_type, - ) + # Always use ModifyPodSpecPlacementYaml to add nodeSelectors via GetNodeSelectors() + cluster.ModifyPodSpecPlacementYaml( + yaml_docs, + 'kubernetes-scaleup', + cluster.default_nodepool.machine_type, + ) resource_names = cluster.ApplyYaml(yaml_docs) diff --git a/perfkitbenchmarker/providers/aws/elastic_kubernetes_service.py b/perfkitbenchmarker/providers/aws/elastic_kubernetes_service.py index fd856b1e6..cfb9ed2d1 100644 --- a/perfkitbenchmarker/providers/aws/elastic_kubernetes_service.py +++ b/perfkitbenchmarker/providers/aws/elastic_kubernetes_service.py @@ -1283,10 +1283,16 @@ def ResizeNodePool( def GetNodeSelectors(self, machine_type: str | None = None) -> dict[str, str]: """Gets the node selectors section of a yaml for the provider.""" - machine_family = util.GetMachineFamily(machine_type) - if machine_family: - return {'karpenter.k8s.aws/instance-family': machine_family} - return {} + selectors = {} + # If GPU is requested, use the GPU nodepool + if virtual_machine.GPU_TYPE.value: + selectors['karpenter.sh/nodepool'] = 'gpu' + else: + # Otherwise, use instance-family selector if machine_type is specified + machine_family = util.GetMachineFamily(machine_type) + if machine_family: + selectors['karpenter.k8s.aws/instance-family'] = machine_family + return selectors def GetNodePoolNames(self) -> list[str]: """Gets node pool names for the cluster. From ca8e619c87771442e21b6b015294a379677a86c7 Mon Sep 17 00:00:00 2001 From: Kiryl Filatau Date: Tue, 20 Jan 2026 18:30:43 +0100 Subject: [PATCH 4/8] Fix GPU toleration to apply only to EKS Karpenter clusters --- .../data/container/kubernetes_scale/kubernetes_scale.yaml.j2 | 4 ++-- .../linux_benchmarks/kubernetes_scale_benchmark.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/perfkitbenchmarker/data/container/kubernetes_scale/kubernetes_scale.yaml.j2 b/perfkitbenchmarker/data/container/kubernetes_scale/kubernetes_scale.yaml.j2 index a8ef0a611..ef911f16d 100644 --- a/perfkitbenchmarker/data/container/kubernetes_scale/kubernetes_scale.yaml.j2 +++ b/perfkitbenchmarker/data/container/kubernetes_scale/kubernetes_scale.yaml.j2 @@ -60,8 +60,8 @@ spec: operator: "Exists" effect: "NoExecute" tolerationSeconds: {{ PodTimeout }} - {%- if NvidiaGpuRequest and Cloud == 'aws' %} - - key: {{ GpuTaintKey | default('nvidia.com/gpu') }} + {%- if GpuTaintKey %} + - key: {{ GpuTaintKey }} operator: Exists effect: NoSchedule {%- endif %} diff --git a/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py b/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py index b0c8188ba..e0ea1d711 100644 --- a/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py +++ b/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py @@ -222,6 +222,7 @@ def ScaleUpPods( RolloutTimeout=max_wait_time, PodTimeout=resource_timeout, Cloud=FLAGS.cloud.lower(), + GpuTaintKey=None, # Only set to 'nvidia.com/gpu' for EKS Karpenter ) # GpuTaintKey is still needed for tolerations in the yaml template From d037155d74576640e4fb9b26e56b1b6a0085dca5 Mon Sep 17 00:00:00 2001 From: Kiryl Filatau Date: Tue, 20 Jan 2026 21:53:59 +0100 Subject: [PATCH 5/8] pyink adjustments --- .../linux_benchmarks/kubernetes_scale_benchmark.py | 7 ++++++- .../providers/aws/elastic_kubernetes_service.py | 8 ++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py b/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py index e0ea1d711..218db3c42 100644 --- a/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py +++ b/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py @@ -88,6 +88,7 @@ def GetConfig(user_config): config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) return config + def _IsEksKarpenterAwsGpu(cluster: container_service.KubernetesCluster) -> bool: return ( virtual_machine.GPU_COUNT.value @@ -95,7 +96,10 @@ def _IsEksKarpenterAwsGpu(cluster: container_service.KubernetesCluster) -> bool: and getattr(cluster, 'CLUSTER_TYPE', None) == 'Karpenter' ) -def _EnsureEksKarpenterGpuNodepool(cluster: container_service.KubernetesCluster) -> None: + +def _EnsureEksKarpenterGpuNodepool( + cluster: container_service.KubernetesCluster, +) -> None: """Ensures a GPU NodePool exists for EKS Karpenter before applying workloads.""" if not _IsEksKarpenterAwsGpu(cluster): return @@ -115,6 +119,7 @@ def _EnsureEksKarpenterGpuNodepool(cluster: container_service.KubernetesCluster) gpu_nodepool_cpu_limit=1000, ) + def Prepare(bm_spec: benchmark_spec.BenchmarkSpec): """Sets additional spec attributes.""" bm_spec.always_call_cleanup = True diff --git a/perfkitbenchmarker/providers/aws/elastic_kubernetes_service.py b/perfkitbenchmarker/providers/aws/elastic_kubernetes_service.py index 8ca68c1cf..891d3f70b 100644 --- a/perfkitbenchmarker/providers/aws/elastic_kubernetes_service.py +++ b/perfkitbenchmarker/providers/aws/elastic_kubernetes_service.py @@ -313,9 +313,7 @@ def _ingress_manifest_path(self) -> str: """The path to the ingress manifest template file.""" return 'container/ingress.yaml.j2' - def _WaitForIngress( - self, name: str, namespace: str, port: int - ) -> str: + def _WaitForIngress(self, name: str, namespace: str, port: int) -> str: """Waits for an Ingress resource to be deployed to the cluster.""" del port self.WaitForResource( @@ -706,9 +704,7 @@ def _Create(self): }], }, 'iamIdentityMappings': [{ - 'arn': ( - f'arn:aws:iam::{self.account}:role/KarpenterNodeRole-{self.name}' - ), + 'arn': f'arn:aws:iam::{self.account}:role/KarpenterNodeRole-{self.name}', 'username': 'system:node:{{EC2PrivateDNSName}}', 'groups': ['system:bootstrappers', 'system:nodes'], }], From faed023c585ee8004a3ae67c8db67a7524632a5b Mon Sep 17 00:00:00 2001 From: KirylF Date: Tue, 20 Jan 2026 23:23:04 +0100 Subject: [PATCH 6/8] adjust the style comments --- .../linux_benchmarks/kubernetes_scale_benchmark.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py b/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py index 218db3c42..4283e0f93 100644 --- a/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py +++ b/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py @@ -227,7 +227,7 @@ def ScaleUpPods( RolloutTimeout=max_wait_time, PodTimeout=resource_timeout, Cloud=FLAGS.cloud.lower(), - GpuTaintKey=None, # Only set to 'nvidia.com/gpu' for EKS Karpenter + GpuTaintKey=None, ) # GpuTaintKey is still needed for tolerations in the yaml template @@ -239,7 +239,7 @@ def ScaleUpPods( **manifest_kwargs, ) - # Always use ModifyPodSpecPlacementYaml to add nodeSelectors via GetNodeSelectors() + # Use ModifyPodSpecPlacementYaml to add nodeSelectors via GetNodeSelectors() cluster.ModifyPodSpecPlacementYaml( yaml_docs, 'kubernetes-scaleup', From 4213f640ac03342c117354799ee9b755ee198c17 Mon Sep 17 00:00:00 2001 From: Kiryl Filatau Date: Wed, 21 Jan 2026 18:08:52 +0100 Subject: [PATCH 7/8] pytype adjustments --- .../linux_benchmarks/kubernetes_scale_benchmark.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py b/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py index 4283e0f93..d0066651f 100644 --- a/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py +++ b/perfkitbenchmarker/linux_benchmarks/kubernetes_scale_benchmark.py @@ -90,7 +90,7 @@ def GetConfig(user_config): def _IsEksKarpenterAwsGpu(cluster: container_service.KubernetesCluster) -> bool: - return ( + return bool( virtual_machine.GPU_COUNT.value and FLAGS.cloud.lower() == 'aws' and getattr(cluster, 'CLUSTER_TYPE', None) == 'Karpenter' @@ -154,6 +154,7 @@ def Run(bm_spec: benchmark_spec.BenchmarkSpec) -> list[sample.Sample]: assert bm_spec.container_cluster cluster = bm_spec.container_cluster assert isinstance(cluster, container_service.KubernetesCluster) + cluster: container_service.KubernetesCluster = cluster # Warm up the cluster by creating a single pod. This compensates for # differences between Standard & Autopilot, where Standard already has 1 node @@ -438,7 +439,7 @@ def GetStatusConditionsForResourceType( def ConvertToEpochTime(timestamp: str) -> int: """Converts a timestamp to epoch time.""" # Example: 2024-11-08T23:44:36Z - return parser.parse(timestamp).timestamp() + return int(parser.parse(timestamp).timestamp()) def ParseStatusChanges( From 326e0886ca4159f6b790219816d955e54fb9803a Mon Sep 17 00:00:00 2001 From: Kiryl Filatau Date: Wed, 21 Jan 2026 20:48:27 +0100 Subject: [PATCH 8/8] add missed pytz to requirements.txt --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index aab449a55..d92a1d363 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,6 +23,7 @@ numpy>=1.16.5 packaging pandas>=1.1.5 pint +pytz PyYAML>=5.4.1 pywinrm requests