fix: remove PVC patch that broke kubectl apply on bound claims
Some checks failed
scrum-manager/pipeline/head There was a failure building this commit

The mysql-data-pvc was already dynamically provisioned by the cluster's
'local-path' StorageClass. The overlay patch tried to change storageClassName
to 'manual' and volumeName on an already-bound PVC, which Kubernetes forbids:
  "spec is immutable after creation except resources.requests"

Fixes:
- Remove mysql-pvc-patch from kustomization.yaml (PVC left as-is)
- Remove mysql-pv.yaml resource (not needed with dynamic provisioner)
- Add comment explaining when manual PV/PVC is needed vs not

Jenkinsfile: add --timeout and FQDN to smoke test curl; add comments
explaining MySQL Recreate strategy startup timing expectations.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
tusuii
2026-02-27 23:02:54 +05:30
parent 73bd35173c
commit 5ed8d0bbdc
3 changed files with 18 additions and 37 deletions

17
Jenkinsfile vendored
View File

@@ -115,11 +115,18 @@ pipeline {
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) { withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
sh "kubectl apply -k ${K8S_OVERLAY}" sh "kubectl apply -k ${K8S_OVERLAY}"
// MySQL uses Recreate strategy: old pod terminates (~30s) before
// new pod starts. Readiness probe initialDelaySeconds=30 + up to
// 10 retries × 5s = 80s. Total worst-case: ~110s → 300s is safe.
sh "kubectl rollout status deployment/mysql -n scrum-manager --timeout=300s" sh "kubectl rollout status deployment/mysql -n scrum-manager --timeout=300s"
// Backend initContainer sleeps 15s after MySQL TCP is up before
// starting the Node process. 300s covers slow-start scenarios.
sh "kubectl rollout status deployment/backend -n scrum-manager --timeout=300s" sh "kubectl rollout status deployment/backend -n scrum-manager --timeout=300s"
sh "kubectl rollout status deployment/frontend -n scrum-manager --timeout=180s" sh "kubectl rollout status deployment/frontend -n scrum-manager --timeout=180s"
echo "All deployments rolled out." echo "All deployments rolled out."
} }
} }
} }
@@ -127,14 +134,18 @@ pipeline {
stage('Smoke Test') { stage('Smoke Test') {
steps { steps {
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) { withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
// Run a curl pod inside the cluster to hit the backend health endpoint.
// Uses FQDN (backend.scrum-manager.svc.cluster.local) to be explicit.
sh """ sh """
kubectl run smoke-${BUILD_NUMBER} \ kubectl run smoke-${BUILD_NUMBER} \
--image=curlimages/curl:latest \ --image=curlimages/curl:8.5.0 \
--restart=Never \ --restart=Never \
--rm \ --rm \
--attach \ --attach \
--timeout=30s \
-n scrum-manager \ -n scrum-manager \
-- curl -sf http://backend:3001/api/health \ -- curl -sf --max-time 10 \
http://backend.scrum-manager.svc.cluster.local:3001/api/health \
&& echo "Health check PASSED" \ && echo "Health check PASSED" \
|| echo "Health check FAILED (non-blocking)" || echo "Health check FAILED (non-blocking)"
""" """

View File

@@ -19,11 +19,10 @@ resources:
- ../../base - ../../base
- ingress.yaml - ingress.yaml
patches: # NOTE: mysql-pv.yaml and mysql-pvc-patch.yaml are only needed on clusters
- path: mysql-pvc-patch.yaml # without a default StorageClass. This cluster uses 'local-path' (dynamic
target: # provisioner), so the base PVC works as-is and must NOT be patched after
kind: PersistentVolumeClaim # it is already bound.
name: mysql-data-pvc
images: images:
- name: scrum-frontend - name: scrum-frontend

View File

@@ -1,29 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-pv
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
hostPath:
path: "/mnt/data/mysql"
# DirectoryOrCreate: kubelet will create the dir if it doesn't exist,
# preventing MySQL CrashLoopBackOff due to missing mount path.
type: DirectoryOrCreate
# IMPORTANT for multi-node clusters: uncomment nodeAffinity and set your
# node's hostname so MySQL always schedules on the node that has the data.
# Run: kubectl get nodes to find the hostname.
# nodeAffinity:
# required:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/hostname
# operator: In
# values:
# - YOUR-NODE-HOSTNAME