Some checks failed
scrum-manager/pipeline/head There was a failure building this commit
During rolling updates with the default maxSurge=1, an extra surge pod was created temporarily (3 pods instead of 2), causing all 3 nodes to report "Insufficient CPU" and delaying scheduling past the Jenkins rollout timeout. With maxSurge=0 / maxUnavailable=1, one old pod terminates first before a new one starts — pod count stays at 2 throughout, no extra CPU needed. Also increase Jenkins rollout timeout from 300s to 600s as a safety net for CPU-constrained nodes that may still need extra scheduling time. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
217 lines
8.1 KiB
Groovy
217 lines
8.1 KiB
Groovy
pipeline {
|
|
agent any
|
|
|
|
environment {
|
|
HARBOR_URL = '192.168.108.200:80'
|
|
HARBOR_PROJECT = 'library'
|
|
IMAGE_TAG = "${env.BUILD_NUMBER}"
|
|
K8S_CRED_ID = 'k8s-config'
|
|
|
|
FRONTEND_IMAGE = '192.168.108.200:80/library/scrum-frontend'
|
|
BACKEND_IMAGE = '192.168.108.200:80/library/scrum-backend'
|
|
|
|
// Workspace root IS the project root — no subdirectory needed
|
|
K8S_OVERLAY = 'k8s/overlays/on-premise'
|
|
}
|
|
|
|
options {
|
|
buildDiscarder(logRotator(numToKeepStr: '10'))
|
|
timeout(time: 30, unit: 'MINUTES')
|
|
disableConcurrentBuilds()
|
|
}
|
|
|
|
stages {
|
|
|
|
stage('Checkout') {
|
|
steps {
|
|
checkout scm
|
|
echo "Workspace: ${env.WORKSPACE}"
|
|
sh 'ls -la' // quick sanity check — confirm Dockerfile is here
|
|
}
|
|
}
|
|
|
|
stage('Test') {
|
|
parallel {
|
|
stage('Backend Tests') {
|
|
steps {
|
|
dir('server') { // server/ relative to workspace root
|
|
sh 'npm ci && npm test -- --reporter=verbose 2>&1 || true'
|
|
}
|
|
}
|
|
}
|
|
stage('Frontend Tests') {
|
|
steps {
|
|
// frontend lives at workspace root
|
|
sh 'npm ci && npm test -- --reporter=verbose 2>&1 || true'
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Build Images') {
|
|
parallel {
|
|
stage('Build Frontend') {
|
|
steps {
|
|
// Dockerfile is at workspace root
|
|
sh """
|
|
docker build \
|
|
-f Dockerfile \
|
|
-t ${FRONTEND_IMAGE}:${IMAGE_TAG} \
|
|
-t ${FRONTEND_IMAGE}:latest \
|
|
.
|
|
"""
|
|
}
|
|
}
|
|
stage('Build Backend') {
|
|
steps {
|
|
dir('server') { // server/Dockerfile
|
|
sh """
|
|
docker build \
|
|
-f Dockerfile \
|
|
-t ${BACKEND_IMAGE}:${IMAGE_TAG} \
|
|
-t ${BACKEND_IMAGE}:latest \
|
|
.
|
|
"""
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Push to Harbor') {
|
|
steps {
|
|
withCredentials([usernamePassword(
|
|
credentialsId: 'harbor-creds',
|
|
usernameVariable: 'HARBOR_USER',
|
|
passwordVariable: 'HARBOR_PASS'
|
|
)]) {
|
|
sh """
|
|
echo \$HARBOR_PASS | docker login ${HARBOR_URL} -u \$HARBOR_USER --password-stdin
|
|
|
|
docker push ${FRONTEND_IMAGE}:${IMAGE_TAG}
|
|
docker push ${FRONTEND_IMAGE}:latest
|
|
|
|
docker push ${BACKEND_IMAGE}:${IMAGE_TAG}
|
|
docker push ${BACKEND_IMAGE}:latest
|
|
"""
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Patch Image Tags') {
|
|
steps {
|
|
dir("${K8S_OVERLAY}") {
|
|
sh """
|
|
kustomize edit set image \
|
|
scrum-frontend=${FRONTEND_IMAGE}:${IMAGE_TAG} \
|
|
scrum-backend=${BACKEND_IMAGE}:${IMAGE_TAG}
|
|
"""
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Setup MetalLB') {
|
|
steps {
|
|
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
|
|
sh """
|
|
# Install MetalLB if not already present (idempotent)
|
|
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.8/config/manifests/metallb-native.yaml
|
|
|
|
# Wait for MetalLB controller and speaker to be ready
|
|
# speaker is a DaemonSet on all 3 nodes — give it extra time
|
|
kubectl rollout status deployment/controller -n metallb-system --timeout=120s
|
|
kubectl rollout status daemonset/speaker -n metallb-system --timeout=180s
|
|
|
|
# Apply IP pool config — CRDs must be ready before this
|
|
kubectl apply -f k8s/overlays/on-premise/metallb/
|
|
"""
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Deploy to K8s') {
|
|
steps {
|
|
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
|
|
sh "kubectl apply -k ${K8S_OVERLAY}"
|
|
|
|
// Show pod state immediately after apply so we can see pull/init status in logs
|
|
sh "kubectl get pods -n scrum-manager -o wide"
|
|
|
|
// MySQL uses Recreate strategy: old pod terminates then new starts.
|
|
sh "kubectl rollout status deployment/mysql -n scrum-manager --timeout=300s"
|
|
|
|
// maxSurge=0: old pod terminates first, new pod starts after.
|
|
// CPU-constrained nodes may delay scheduling — 600s covers this.
|
|
sh "kubectl rollout status deployment/backend -n scrum-manager --timeout=600s"
|
|
|
|
sh "kubectl rollout status deployment/frontend -n scrum-manager --timeout=600s"
|
|
|
|
echo "All deployments rolled out."
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Smoke Test') {
|
|
steps {
|
|
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
|
|
// Run a curl pod inside the cluster to hit the backend health endpoint.
|
|
// Uses FQDN (backend.scrum-manager.svc.cluster.local) to be explicit.
|
|
sh """
|
|
kubectl run smoke-${BUILD_NUMBER} \
|
|
--image=curlimages/curl:8.5.0 \
|
|
--restart=Never \
|
|
--rm \
|
|
--attach \
|
|
--timeout=30s \
|
|
-n scrum-manager \
|
|
-- curl -sf --max-time 10 \
|
|
http://backend.scrum-manager.svc.cluster.local:3001/api/health \
|
|
&& echo "Health check PASSED" \
|
|
|| echo "Health check FAILED (non-blocking)"
|
|
"""
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Clean Up') {
|
|
steps {
|
|
sh """
|
|
docker rmi ${FRONTEND_IMAGE}:${IMAGE_TAG} || true
|
|
docker rmi ${FRONTEND_IMAGE}:latest || true
|
|
docker rmi ${BACKEND_IMAGE}:${IMAGE_TAG} || true
|
|
docker rmi ${BACKEND_IMAGE}:latest || true
|
|
"""
|
|
}
|
|
}
|
|
}
|
|
|
|
post {
|
|
success {
|
|
echo "✅ Build #${env.BUILD_NUMBER} deployed → http://scrum.local"
|
|
}
|
|
failure {
|
|
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
|
|
sh """
|
|
echo '=== Pod Status ==='
|
|
kubectl get pods -n scrum-manager -o wide || true
|
|
|
|
echo '=== Backend Pod Events ==='
|
|
kubectl describe pods -l app.kubernetes.io/name=backend -n scrum-manager || true
|
|
|
|
echo '=== Backend Logs (last 50 lines) ==='
|
|
kubectl logs -l app.kubernetes.io/name=backend -n scrum-manager --tail=50 --all-containers=true || true
|
|
|
|
echo '=== Frontend Pod Events ==='
|
|
kubectl describe pods -l app.kubernetes.io/name=frontend -n scrum-manager || true
|
|
|
|
echo '=== MySQL Pod Events ==='
|
|
kubectl describe pods -l app.kubernetes.io/name=mysql -n scrum-manager || true
|
|
"""
|
|
}
|
|
}
|
|
always {
|
|
sh "docker logout ${HARBOR_URL} || true"
|
|
}
|
|
}
|
|
}
|