MetalLB is already installed and configured on the cluster. The pipeline no longer needs to apply IPAddressPool or L2Advertisement resources. Removed the 'Setup MetalLB' stage and deleted the metallb overlay files. The frontend Service type: LoadBalancer is already set, so MetalLB will automatically assign an external IP on deployment. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
198 lines
7.1 KiB
Groovy
198 lines
7.1 KiB
Groovy
pipeline {
|
|
agent any
|
|
|
|
environment {
|
|
HARBOR_URL = '192.168.108.200:80'
|
|
HARBOR_PROJECT = 'library'
|
|
IMAGE_TAG = "${env.BUILD_NUMBER}"
|
|
K8S_CRED_ID = 'k8s-config'
|
|
|
|
FRONTEND_IMAGE = '192.168.108.200:80/library/scrum-frontend'
|
|
BACKEND_IMAGE = '192.168.108.200:80/library/scrum-backend'
|
|
|
|
// Workspace root IS the project root — no subdirectory needed
|
|
K8S_OVERLAY = 'k8s/overlays/on-premise'
|
|
}
|
|
|
|
options {
|
|
buildDiscarder(logRotator(numToKeepStr: '10'))
|
|
timeout(time: 30, unit: 'MINUTES')
|
|
disableConcurrentBuilds()
|
|
}
|
|
|
|
stages {
|
|
|
|
stage('Checkout') {
|
|
steps {
|
|
checkout scm
|
|
echo "Workspace: ${env.WORKSPACE}"
|
|
sh 'ls -la' // quick sanity check — confirm Dockerfile is here
|
|
}
|
|
}
|
|
|
|
stage('Test') {
|
|
parallel {
|
|
stage('Backend Tests') {
|
|
steps {
|
|
dir('server') { // server/ relative to workspace root
|
|
sh 'npm ci && npm test -- --reporter=verbose 2>&1 || true'
|
|
}
|
|
}
|
|
}
|
|
stage('Frontend Tests') {
|
|
steps {
|
|
// frontend lives at workspace root
|
|
sh 'npm ci && npm test -- --reporter=verbose 2>&1 || true'
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Build Images') {
|
|
parallel {
|
|
stage('Build Frontend') {
|
|
steps {
|
|
// Dockerfile is at workspace root
|
|
sh """
|
|
docker build \
|
|
-f Dockerfile \
|
|
-t ${FRONTEND_IMAGE}:${IMAGE_TAG} \
|
|
-t ${FRONTEND_IMAGE}:latest \
|
|
.
|
|
"""
|
|
}
|
|
}
|
|
stage('Build Backend') {
|
|
steps {
|
|
dir('server') { // server/Dockerfile
|
|
sh """
|
|
docker build \
|
|
-f Dockerfile \
|
|
-t ${BACKEND_IMAGE}:${IMAGE_TAG} \
|
|
-t ${BACKEND_IMAGE}:latest \
|
|
.
|
|
"""
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Push to Harbor') {
|
|
steps {
|
|
withCredentials([usernamePassword(
|
|
credentialsId: 'harbor-creds',
|
|
usernameVariable: 'HARBOR_USER',
|
|
passwordVariable: 'HARBOR_PASS'
|
|
)]) {
|
|
sh """
|
|
echo \$HARBOR_PASS | docker login ${HARBOR_URL} -u \$HARBOR_USER --password-stdin
|
|
|
|
docker push ${FRONTEND_IMAGE}:${IMAGE_TAG}
|
|
docker push ${FRONTEND_IMAGE}:latest
|
|
|
|
docker push ${BACKEND_IMAGE}:${IMAGE_TAG}
|
|
docker push ${BACKEND_IMAGE}:latest
|
|
"""
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Patch Image Tags') {
|
|
steps {
|
|
dir("${K8S_OVERLAY}") {
|
|
sh """
|
|
kustomize edit set image \
|
|
scrum-frontend=${FRONTEND_IMAGE}:${IMAGE_TAG} \
|
|
scrum-backend=${BACKEND_IMAGE}:${IMAGE_TAG}
|
|
"""
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Deploy to K8s') {
|
|
steps {
|
|
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
|
|
sh "kubectl apply -k ${K8S_OVERLAY}"
|
|
|
|
// Show pod state immediately after apply so we can see pull/init status in logs
|
|
sh "kubectl get pods -n scrum-manager -o wide"
|
|
|
|
// MySQL uses Recreate strategy: old pod terminates then new starts.
|
|
sh "kubectl rollout status deployment/mysql -n scrum-manager --timeout=300s"
|
|
|
|
// maxSurge=0: old pod terminates first, new pod starts after.
|
|
// CPU-constrained nodes may delay scheduling — 600s covers this.
|
|
sh "kubectl rollout status deployment/backend -n scrum-manager --timeout=600s"
|
|
|
|
sh "kubectl rollout status deployment/frontend -n scrum-manager --timeout=600s"
|
|
|
|
echo "All deployments rolled out."
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Smoke Test') {
|
|
steps {
|
|
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
|
|
// Run a curl pod inside the cluster to hit the backend health endpoint.
|
|
// Uses FQDN (backend.scrum-manager.svc.cluster.local) to be explicit.
|
|
sh """
|
|
kubectl run smoke-${BUILD_NUMBER} \
|
|
--image=curlimages/curl:8.5.0 \
|
|
--restart=Never \
|
|
--rm \
|
|
--attach \
|
|
--timeout=30s \
|
|
-n scrum-manager \
|
|
-- curl -sf --max-time 10 \
|
|
http://backend.scrum-manager.svc.cluster.local:3001/api/health \
|
|
&& echo "Health check PASSED" \
|
|
|| echo "Health check FAILED (non-blocking)"
|
|
"""
|
|
}
|
|
}
|
|
}
|
|
|
|
stage('Clean Up') {
|
|
steps {
|
|
sh """
|
|
docker rmi ${FRONTEND_IMAGE}:${IMAGE_TAG} || true
|
|
docker rmi ${FRONTEND_IMAGE}:latest || true
|
|
docker rmi ${BACKEND_IMAGE}:${IMAGE_TAG} || true
|
|
docker rmi ${BACKEND_IMAGE}:latest || true
|
|
"""
|
|
}
|
|
}
|
|
}
|
|
|
|
post {
|
|
success {
|
|
echo "✅ Build #${env.BUILD_NUMBER} deployed → http://scrum.local"
|
|
}
|
|
failure {
|
|
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
|
|
sh """
|
|
echo '=== Pod Status ==='
|
|
kubectl get pods -n scrum-manager -o wide || true
|
|
|
|
echo '=== Backend Pod Events ==='
|
|
kubectl describe pods -l app.kubernetes.io/name=backend -n scrum-manager || true
|
|
|
|
echo '=== Backend Logs (last 50 lines) ==='
|
|
kubectl logs -l app.kubernetes.io/name=backend -n scrum-manager --tail=50 --all-containers=true || true
|
|
|
|
echo '=== Frontend Pod Events ==='
|
|
kubectl describe pods -l app.kubernetes.io/name=frontend -n scrum-manager || true
|
|
|
|
echo '=== MySQL Pod Events ==='
|
|
kubectl describe pods -l app.kubernetes.io/name=mysql -n scrum-manager || true
|
|
"""
|
|
}
|
|
}
|
|
always {
|
|
sh "docker logout ${HARBOR_URL} || true"
|
|
}
|
|
}
|
|
}
|