From bd9a9523990828468582ba510eb9b6f479e4e651 Mon Sep 17 00:00:00 2001 From: tusuii Date: Fri, 27 Feb 2026 23:32:58 +0530 Subject: [PATCH] fix: revert memory request to 128Mi to fix pod scheduling failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Increasing the request to 256Mi caused backend pods to be Pending with no node assignment — the scheduler couldn't fit them alongside MySQL (512Mi request) and existing pods on the on-premise nodes. Memory REQUEST drives scheduling (how much the node reserves). Memory LIMIT drives OOMKill (the actual cap at runtime). Keep request at 128Mi so pods schedule, limit at 512Mi so Node.js + Socket.io + MySQL pool don't get OOMKilled on startup. Also add terminationGracePeriodSeconds: 15 so pods from failed/previous builds release their node slot quickly instead of blocking new pod scheduling. Co-Authored-By: Claude Sonnet 4.6 --- k8s/base/backend/deployment.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/k8s/base/backend/deployment.yaml b/k8s/base/backend/deployment.yaml index 31645d0..8bbad93 100644 --- a/k8s/base/backend/deployment.yaml +++ b/k8s/base/backend/deployment.yaml @@ -17,6 +17,7 @@ spec: app.kubernetes.io/name: backend app.kubernetes.io/component: api spec: + terminationGracePeriodSeconds: 15 initContainers: - name: wait-for-mysql image: busybox:1.36 @@ -64,10 +65,10 @@ spec: resources: requests: cpu: 100m - memory: 256Mi + memory: 128Mi # Request drives scheduling — keep low so pods fit on nodes limits: cpu: 500m - memory: 512Mi + memory: 512Mi # Limit prevents OOMKill during startup spikes livenessProbe: httpGet: path: /api/health