Files
scrum-manager/Jenkinsfile
tusuii c6bb1ac9b4
Some checks failed
scrum-manager/pipeline/head There was a failure building this commit
fix: make MetalLB IP pool apply resilient to broken webhook state
Wait for the MetalLB controller deployment to be ready before applying
IPAddressPool/L2Advertisement CRDs. If the webhook service has no ready
endpoints (stale ClusterIP from a previously removed controller), delete
the ValidatingWebhookConfiguration so the apply is not blocked. This
prevents the 'connection refused' webhook failure seen when a duplicate
MetalLB install left behind a broken webhook service endpoint.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-28 00:38:40 +05:30

227 lines
8.7 KiB
Groovy

pipeline {
agent any
environment {
HARBOR_URL = '192.168.108.200:80'
HARBOR_PROJECT = 'library'
IMAGE_TAG = "${env.BUILD_NUMBER}"
K8S_CRED_ID = 'k8s-config'
FRONTEND_IMAGE = '192.168.108.200:80/library/scrum-frontend'
BACKEND_IMAGE = '192.168.108.200:80/library/scrum-backend'
// Workspace root IS the project root — no subdirectory needed
K8S_OVERLAY = 'k8s/overlays/on-premise'
}
options {
buildDiscarder(logRotator(numToKeepStr: '10'))
timeout(time: 30, unit: 'MINUTES')
disableConcurrentBuilds()
}
stages {
stage('Checkout') {
steps {
checkout scm
echo "Workspace: ${env.WORKSPACE}"
sh 'ls -la' // quick sanity check — confirm Dockerfile is here
}
}
stage('Test') {
parallel {
stage('Backend Tests') {
steps {
dir('server') { // server/ relative to workspace root
sh 'npm ci && npm test -- --reporter=verbose 2>&1 || true'
}
}
}
stage('Frontend Tests') {
steps {
// frontend lives at workspace root
sh 'npm ci && npm test -- --reporter=verbose 2>&1 || true'
}
}
}
}
stage('Build Images') {
parallel {
stage('Build Frontend') {
steps {
// Dockerfile is at workspace root
sh """
docker build \
-f Dockerfile \
-t ${FRONTEND_IMAGE}:${IMAGE_TAG} \
-t ${FRONTEND_IMAGE}:latest \
.
"""
}
}
stage('Build Backend') {
steps {
dir('server') { // server/Dockerfile
sh """
docker build \
-f Dockerfile \
-t ${BACKEND_IMAGE}:${IMAGE_TAG} \
-t ${BACKEND_IMAGE}:latest \
.
"""
}
}
}
}
}
stage('Push to Harbor') {
steps {
withCredentials([usernamePassword(
credentialsId: 'harbor-creds',
usernameVariable: 'HARBOR_USER',
passwordVariable: 'HARBOR_PASS'
)]) {
sh """
echo \$HARBOR_PASS | docker login ${HARBOR_URL} -u \$HARBOR_USER --password-stdin
docker push ${FRONTEND_IMAGE}:${IMAGE_TAG}
docker push ${FRONTEND_IMAGE}:latest
docker push ${BACKEND_IMAGE}:${IMAGE_TAG}
docker push ${BACKEND_IMAGE}:latest
"""
}
}
}
stage('Patch Image Tags') {
steps {
dir("${K8S_OVERLAY}") {
sh """
kustomize edit set image \
scrum-frontend=${FRONTEND_IMAGE}:${IMAGE_TAG} \
scrum-backend=${BACKEND_IMAGE}:${IMAGE_TAG}
"""
}
}
}
stage('Setup MetalLB') {
steps {
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
sh """
# MetalLB is already installed on this cluster.
# Wait for the controller to be ready — the webhook runs inside it.
kubectl rollout status deployment/controller -n metallb-system --timeout=120s
# If the webhook service has no ready endpoints (e.g. stale ClusterIP
# from a previously applied metallb-native.yaml whose pods were removed),
# delete the broken ValidatingWebhookConfiguration so the apply can
# proceed without being blocked by an unreachable webhook.
READY_ADDRS=\$(kubectl get endpoints metallb-webhook-service \\
-n metallb-system \\
-o jsonpath='{.subsets[*].addresses[*].ip}' 2>/dev/null || echo "")
if [ -z "\$READY_ADDRS" ]; then
echo "WARNING: metallb-webhook-service has no ready endpoints — removing stale webhook config."
kubectl delete validatingwebhookconfiguration metallb-webhook-configuration 2>/dev/null || true
fi
kubectl apply -f k8s/overlays/on-premise/metallb/
echo "MetalLB pod state:"
kubectl get pods -n metallb-system -o wide
"""
}
}
}
stage('Deploy to K8s') {
steps {
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
sh "kubectl apply -k ${K8S_OVERLAY}"
// Show pod state immediately after apply so we can see pull/init status in logs
sh "kubectl get pods -n scrum-manager -o wide"
// MySQL uses Recreate strategy: old pod terminates then new starts.
sh "kubectl rollout status deployment/mysql -n scrum-manager --timeout=300s"
// maxSurge=0: old pod terminates first, new pod starts after.
// CPU-constrained nodes may delay scheduling — 600s covers this.
sh "kubectl rollout status deployment/backend -n scrum-manager --timeout=600s"
sh "kubectl rollout status deployment/frontend -n scrum-manager --timeout=600s"
echo "All deployments rolled out."
}
}
}
stage('Smoke Test') {
steps {
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
// Run a curl pod inside the cluster to hit the backend health endpoint.
// Uses FQDN (backend.scrum-manager.svc.cluster.local) to be explicit.
sh """
kubectl run smoke-${BUILD_NUMBER} \
--image=curlimages/curl:8.5.0 \
--restart=Never \
--rm \
--attach \
--timeout=30s \
-n scrum-manager \
-- curl -sf --max-time 10 \
http://backend.scrum-manager.svc.cluster.local:3001/api/health \
&& echo "Health check PASSED" \
|| echo "Health check FAILED (non-blocking)"
"""
}
}
}
stage('Clean Up') {
steps {
sh """
docker rmi ${FRONTEND_IMAGE}:${IMAGE_TAG} || true
docker rmi ${FRONTEND_IMAGE}:latest || true
docker rmi ${BACKEND_IMAGE}:${IMAGE_TAG} || true
docker rmi ${BACKEND_IMAGE}:latest || true
"""
}
}
}
post {
success {
echo "✅ Build #${env.BUILD_NUMBER} deployed → http://scrum.local"
}
failure {
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
sh """
echo '=== Pod Status ==='
kubectl get pods -n scrum-manager -o wide || true
echo '=== Backend Pod Events ==='
kubectl describe pods -l app.kubernetes.io/name=backend -n scrum-manager || true
echo '=== Backend Logs (last 50 lines) ==='
kubectl logs -l app.kubernetes.io/name=backend -n scrum-manager --tail=50 --all-containers=true || true
echo '=== Frontend Pod Events ==='
kubectl describe pods -l app.kubernetes.io/name=frontend -n scrum-manager || true
echo '=== MySQL Pod Events ==='
kubectl describe pods -l app.kubernetes.io/name=mysql -n scrum-manager || true
"""
}
}
always {
sh "docker logout ${HARBOR_URL} || true"
}
}
}