From 5ed8d0bbdc85bd73ff5f615f4d30ddf48fe84796 Mon Sep 17 00:00:00 2001 From: tusuii Date: Fri, 27 Feb 2026 23:02:54 +0530 Subject: [PATCH] fix: remove PVC patch that broke kubectl apply on bound claims The mysql-data-pvc was already dynamically provisioned by the cluster's 'local-path' StorageClass. The overlay patch tried to change storageClassName to 'manual' and volumeName on an already-bound PVC, which Kubernetes forbids: "spec is immutable after creation except resources.requests" Fixes: - Remove mysql-pvc-patch from kustomization.yaml (PVC left as-is) - Remove mysql-pv.yaml resource (not needed with dynamic provisioner) - Add comment explaining when manual PV/PVC is needed vs not Jenkinsfile: add --timeout and FQDN to smoke test curl; add comments explaining MySQL Recreate strategy startup timing expectations. Co-Authored-By: Claude Sonnet 4.6 --- Jenkinsfile | 17 ++++++++++--- k8s/overlays/on-premise/kustomization.yaml | 9 +++---- k8s/overlays/on-premise/mysql-pv.yaml | 29 ---------------------- 3 files changed, 18 insertions(+), 37 deletions(-) delete mode 100644 k8s/overlays/on-premise/mysql-pv.yaml diff --git a/Jenkinsfile b/Jenkinsfile index 175f26d..43ee608 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -115,11 +115,18 @@ pipeline { withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) { sh "kubectl apply -k ${K8S_OVERLAY}" + // MySQL uses Recreate strategy: old pod terminates (~30s) before + // new pod starts. Readiness probe initialDelaySeconds=30 + up to + // 10 retries × 5s = 80s. Total worst-case: ~110s → 300s is safe. sh "kubectl rollout status deployment/mysql -n scrum-manager --timeout=300s" + + // Backend initContainer sleeps 15s after MySQL TCP is up before + // starting the Node process. 300s covers slow-start scenarios. sh "kubectl rollout status deployment/backend -n scrum-manager --timeout=300s" + sh "kubectl rollout status deployment/frontend -n scrum-manager --timeout=180s" - echo "✅ All deployments rolled out." + echo "All deployments rolled out." } } } @@ -127,14 +134,18 @@ pipeline { stage('Smoke Test') { steps { withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) { + // Run a curl pod inside the cluster to hit the backend health endpoint. + // Uses FQDN (backend.scrum-manager.svc.cluster.local) to be explicit. sh """ kubectl run smoke-${BUILD_NUMBER} \ - --image=curlimages/curl:latest \ + --image=curlimages/curl:8.5.0 \ --restart=Never \ --rm \ --attach \ + --timeout=30s \ -n scrum-manager \ - -- curl -sf http://backend:3001/api/health \ + -- curl -sf --max-time 10 \ + http://backend.scrum-manager.svc.cluster.local:3001/api/health \ && echo "Health check PASSED" \ || echo "Health check FAILED (non-blocking)" """ diff --git a/k8s/overlays/on-premise/kustomization.yaml b/k8s/overlays/on-premise/kustomization.yaml index f00470e..f0e5a46 100644 --- a/k8s/overlays/on-premise/kustomization.yaml +++ b/k8s/overlays/on-premise/kustomization.yaml @@ -19,11 +19,10 @@ resources: - ../../base - ingress.yaml -patches: - - path: mysql-pvc-patch.yaml - target: - kind: PersistentVolumeClaim - name: mysql-data-pvc +# NOTE: mysql-pv.yaml and mysql-pvc-patch.yaml are only needed on clusters +# without a default StorageClass. This cluster uses 'local-path' (dynamic +# provisioner), so the base PVC works as-is and must NOT be patched after +# it is already bound. images: - name: scrum-frontend diff --git a/k8s/overlays/on-premise/mysql-pv.yaml b/k8s/overlays/on-premise/mysql-pv.yaml deleted file mode 100644 index e53f870..0000000 --- a/k8s/overlays/on-premise/mysql-pv.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: mysql-pv - labels: - type: local -spec: - storageClassName: manual - capacity: - storage: 5Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain - hostPath: - path: "/mnt/data/mysql" - # DirectoryOrCreate: kubelet will create the dir if it doesn't exist, - # preventing MySQL CrashLoopBackOff due to missing mount path. - type: DirectoryOrCreate - # IMPORTANT for multi-node clusters: uncomment nodeAffinity and set your - # node's hostname so MySQL always schedules on the node that has the data. - # Run: kubectl get nodes to find the hostname. - # nodeAffinity: - # required: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/hostname - # operator: In - # values: - # - YOUR-NODE-HOSTNAME