Some checks failed
scrum-manager/pipeline/head There was a failure building this commit
Backend was OOMKilled during rolling update startup (Node.js + Socket.io + MySQL pool exceeds 256Mi). Raised limit to 512Mi and request to 256Mi. Jenkinsfile: show kubectl get pods immediately after apply so pod state is visible in build logs. Added full diagnostics (describe + logs) in post.failure block so the root cause of any future rollout failure is visible without needing to SSH into the cluster. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
200 lines
7.3 KiB
Groovy
200 lines
7.3 KiB
Groovy
pipeline {
|
||
agent any
|
||
|
||
environment {
|
||
HARBOR_URL = '192.168.108.200:80'
|
||
HARBOR_PROJECT = 'library'
|
||
IMAGE_TAG = "${env.BUILD_NUMBER}"
|
||
K8S_CRED_ID = 'k8s-config'
|
||
|
||
FRONTEND_IMAGE = '192.168.108.200:80/library/scrum-frontend'
|
||
BACKEND_IMAGE = '192.168.108.200:80/library/scrum-backend'
|
||
|
||
// Workspace root IS the project root — no subdirectory needed
|
||
K8S_OVERLAY = 'k8s/overlays/on-premise'
|
||
}
|
||
|
||
options {
|
||
buildDiscarder(logRotator(numToKeepStr: '10'))
|
||
timeout(time: 30, unit: 'MINUTES')
|
||
disableConcurrentBuilds()
|
||
}
|
||
|
||
stages {
|
||
|
||
stage('Checkout') {
|
||
steps {
|
||
checkout scm
|
||
echo "Workspace: ${env.WORKSPACE}"
|
||
sh 'ls -la' // quick sanity check — confirm Dockerfile is here
|
||
}
|
||
}
|
||
|
||
stage('Test') {
|
||
parallel {
|
||
stage('Backend Tests') {
|
||
steps {
|
||
dir('server') { // server/ relative to workspace root
|
||
sh 'npm ci && npm test -- --reporter=verbose 2>&1 || true'
|
||
}
|
||
}
|
||
}
|
||
stage('Frontend Tests') {
|
||
steps {
|
||
// frontend lives at workspace root
|
||
sh 'npm ci && npm test -- --reporter=verbose 2>&1 || true'
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
stage('Build Images') {
|
||
parallel {
|
||
stage('Build Frontend') {
|
||
steps {
|
||
// Dockerfile is at workspace root
|
||
sh """
|
||
docker build \
|
||
-f Dockerfile \
|
||
-t ${FRONTEND_IMAGE}:${IMAGE_TAG} \
|
||
-t ${FRONTEND_IMAGE}:latest \
|
||
.
|
||
"""
|
||
}
|
||
}
|
||
stage('Build Backend') {
|
||
steps {
|
||
dir('server') { // server/Dockerfile
|
||
sh """
|
||
docker build \
|
||
-f Dockerfile \
|
||
-t ${BACKEND_IMAGE}:${IMAGE_TAG} \
|
||
-t ${BACKEND_IMAGE}:latest \
|
||
.
|
||
"""
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
stage('Push to Harbor') {
|
||
steps {
|
||
withCredentials([usernamePassword(
|
||
credentialsId: 'harbor-creds',
|
||
usernameVariable: 'HARBOR_USER',
|
||
passwordVariable: 'HARBOR_PASS'
|
||
)]) {
|
||
sh """
|
||
echo \$HARBOR_PASS | docker login ${HARBOR_URL} -u \$HARBOR_USER --password-stdin
|
||
|
||
docker push ${FRONTEND_IMAGE}:${IMAGE_TAG}
|
||
docker push ${FRONTEND_IMAGE}:latest
|
||
|
||
docker push ${BACKEND_IMAGE}:${IMAGE_TAG}
|
||
docker push ${BACKEND_IMAGE}:latest
|
||
"""
|
||
}
|
||
}
|
||
}
|
||
|
||
stage('Patch Image Tags') {
|
||
steps {
|
||
dir("${K8S_OVERLAY}") {
|
||
sh """
|
||
kustomize edit set image \
|
||
scrum-frontend=${FRONTEND_IMAGE}:${IMAGE_TAG} \
|
||
scrum-backend=${BACKEND_IMAGE}:${IMAGE_TAG}
|
||
"""
|
||
}
|
||
}
|
||
}
|
||
|
||
stage('Deploy to K8s') {
|
||
steps {
|
||
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
|
||
sh "kubectl apply -k ${K8S_OVERLAY}"
|
||
|
||
// Show pod state immediately after apply so we can see pull/init status in logs
|
||
sh "kubectl get pods -n scrum-manager -o wide"
|
||
|
||
// MySQL uses Recreate strategy: old pod terminates (~30s) before
|
||
// new pod starts. Readiness probe initialDelaySeconds=30 + up to
|
||
// 10 retries × 5s = 80s. Total worst-case: ~110s → 300s is safe.
|
||
sh "kubectl rollout status deployment/mysql -n scrum-manager --timeout=300s"
|
||
|
||
// Backend initContainer sleeps 15s after MySQL TCP is up before
|
||
// starting the Node process. 512Mi memory limit avoids OOMKill.
|
||
sh "kubectl rollout status deployment/backend -n scrum-manager --timeout=300s"
|
||
|
||
sh "kubectl rollout status deployment/frontend -n scrum-manager --timeout=180s"
|
||
|
||
echo "All deployments rolled out."
|
||
}
|
||
}
|
||
}
|
||
|
||
stage('Smoke Test') {
|
||
steps {
|
||
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
|
||
// Run a curl pod inside the cluster to hit the backend health endpoint.
|
||
// Uses FQDN (backend.scrum-manager.svc.cluster.local) to be explicit.
|
||
sh """
|
||
kubectl run smoke-${BUILD_NUMBER} \
|
||
--image=curlimages/curl:8.5.0 \
|
||
--restart=Never \
|
||
--rm \
|
||
--attach \
|
||
--timeout=30s \
|
||
-n scrum-manager \
|
||
-- curl -sf --max-time 10 \
|
||
http://backend.scrum-manager.svc.cluster.local:3001/api/health \
|
||
&& echo "Health check PASSED" \
|
||
|| echo "Health check FAILED (non-blocking)"
|
||
"""
|
||
}
|
||
}
|
||
}
|
||
|
||
stage('Clean Up') {
|
||
steps {
|
||
sh """
|
||
docker rmi ${FRONTEND_IMAGE}:${IMAGE_TAG} || true
|
||
docker rmi ${FRONTEND_IMAGE}:latest || true
|
||
docker rmi ${BACKEND_IMAGE}:${IMAGE_TAG} || true
|
||
docker rmi ${BACKEND_IMAGE}:latest || true
|
||
"""
|
||
}
|
||
}
|
||
}
|
||
|
||
post {
|
||
success {
|
||
echo "✅ Build #${env.BUILD_NUMBER} deployed → http://scrum.local"
|
||
}
|
||
failure {
|
||
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
|
||
sh """
|
||
echo '=== Pod Status ==='
|
||
kubectl get pods -n scrum-manager -o wide || true
|
||
|
||
echo '=== Backend Pod Events ==='
|
||
kubectl describe pods -l app.kubernetes.io/name=backend -n scrum-manager || true
|
||
|
||
echo '=== Backend Logs (last 50 lines) ==='
|
||
kubectl logs -l app.kubernetes.io/name=backend -n scrum-manager --tail=50 --all-containers=true || true
|
||
|
||
echo '=== Frontend Pod Events ==='
|
||
kubectl describe pods -l app.kubernetes.io/name=frontend -n scrum-manager || true
|
||
|
||
echo '=== MySQL Pod Events ==='
|
||
kubectl describe pods -l app.kubernetes.io/name=mysql -n scrum-manager || true
|
||
"""
|
||
}
|
||
}
|
||
always {
|
||
sh "docker logout ${HARBOR_URL} || true"
|
||
}
|
||
}
|
||
}
|