fix: k8s on-premise deployment and session persistence
Some checks failed
scrum-manager/pipeline/head There was a failure building this commit

Database fixes:
- Add hostPath.type=DirectoryOrCreate so kubelet auto-creates /mnt/data/mysql
- Add fsGroup=999 so MySQL process can write to the hostPath volume
- Add MYSQL_ROOT_HOST=% to allow backend pods to authenticate as root
- Fix liveness/readiness probes to include credentials (-p$MYSQL_ROOT_PASSWORD)
- Increase probe initialDelaySeconds (30/60s) for slow first-run init
- Add 15s grace sleep in backend initContainer after MySQL TCP is up
- Add persistentVolumeReclaimPolicy=Retain to prevent accidental data loss
- Explicit accessModes+resources in PVC patch to avoid list merge ambiguity
- Add nodeAffinity comment in PV for multi-node cluster guidance

Ingress/nginx fixes:
- Remove broken rewrite-target=/ that was rewriting all paths (incl /api) to /
- Route /socket.io directly to backend for WebSocket support
- Add /socket.io/ proxy location to both nginx.conf and K8s ConfigMap

Frontend fix:
- Persist currentUser to localStorage on login so page refresh no longer
  clears session and redirects users back to the login page

Tooling:
- Add k8s/overlays/on-premise/deploy.sh for one-command deployment

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
tusuii
2026-02-27 21:00:10 +05:30
parent fa8efe874e
commit 73bd35173c
9 changed files with 222 additions and 129 deletions

View File

@@ -24,14 +24,14 @@ spec:
- sh - sh
- -c - -c
- | - |
echo "Waiting for MySQL port to open..." echo "Waiting for MySQL TCP to be available..."
until nc -z mysql 3306; do until nc -z mysql 3306; do
echo "MySQL not ready yet, retrying in 5s..." echo "MySQL not reachable yet, retrying in 3s..."
sleep 5 sleep 3
done done
echo "Port open — waiting 15s for MySQL to finish initializing..." echo "MySQL TCP is up. Waiting 15s for full initialization..."
sleep 15 sleep 15
echo "MySQL is ready!" echo "Proceeding to start backend."
containers: containers:
- name: backend - name: backend
image: scrum-backend:latest image: scrum-backend:latest
@@ -48,12 +48,12 @@ spec:
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: mysql-secret name: mysql-secret
key: MYSQL_USER # matches new secret key key: DB_USER
- name: DB_PASSWORD - name: DB_PASSWORD
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: mysql-secret name: mysql-secret
key: MYSQL_PASSWORD # matches new secret key key: DB_PASSWORD
- name: DB_NAME - name: DB_NAME
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
@@ -72,7 +72,7 @@ spec:
httpGet: httpGet:
path: /api/health path: /api/health
port: http port: http
initialDelaySeconds: 30 initialDelaySeconds: 15
periodSeconds: 10 periodSeconds: 10
timeoutSeconds: 3 timeoutSeconds: 3
failureThreshold: 3 failureThreshold: 3
@@ -80,7 +80,7 @@ spec:
httpGet: httpGet:
path: /api/health path: /api/health
port: http port: http
initialDelaySeconds: 15 initialDelaySeconds: 5
periodSeconds: 5 periodSeconds: 5
timeoutSeconds: 3 timeoutSeconds: 3
failureThreshold: 5 failureThreshold: 5

View File

@@ -14,11 +14,6 @@ data:
root /usr/share/nginx/html; root /usr/share/nginx/html;
index index.html; index index.html;
# Serve static files
location / {
try_files $uri $uri/ /index.html;
}
# Proxy API requests to backend service # Proxy API requests to backend service
location /api/ { location /api/ {
proxy_pass http://backend:3001; proxy_pass http://backend:3001;
@@ -27,5 +22,23 @@ data:
proxy_set_header Connection 'upgrade'; proxy_set_header Connection 'upgrade';
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade; proxy_cache_bypass $http_upgrade;
proxy_read_timeout 60s;
}
# Proxy Socket.io (real-time notifications)
location /socket.io/ {
proxy_pass http://backend:3001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_cache_bypass $http_upgrade;
proxy_read_timeout 3600s;
}
# Serve static files — React SPA catch-all
location / {
try_files $uri $uri/ /index.html;
} }
} }

View File

@@ -8,7 +8,7 @@ metadata:
spec: spec:
replicas: 1 replicas: 1
strategy: strategy:
type: Recreate type: Recreate # MySQL requires Recreate since PVC is ReadWriteOnce
selector: selector:
matchLabels: matchLabels:
app.kubernetes.io/name: mysql app.kubernetes.io/name: mysql
@@ -19,6 +19,11 @@ spec:
app.kubernetes.io/name: mysql app.kubernetes.io/name: mysql
app.kubernetes.io/component: database app.kubernetes.io/component: database
spec: spec:
# fsGroup 999 = mysql group in the container image.
# Without this, the hostPath volume is owned by root and MySQL
# cannot write to /var/lib/mysql → pod CrashLoops immediately.
securityContext:
fsGroup: 999
containers: containers:
- name: mysql - name: mysql
image: mysql:8.0 image: mysql:8.0
@@ -36,16 +41,9 @@ spec:
secretKeyRef: secretKeyRef:
name: mysql-secret name: mysql-secret
key: DB_NAME key: DB_NAME
- name: MYSQL_USER # Allow root to connect from backend pods (any host), not just localhost.
valueFrom: - name: MYSQL_ROOT_HOST
secretKeyRef: value: "%"
name: mysql-secret
key: MYSQL_USER
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-secret
key: MYSQL_PASSWORD
volumeMounts: volumeMounts:
- name: mysql-data - name: mysql-data
mountPath: /var/lib/mysql mountPath: /var/lib/mysql
@@ -58,86 +56,26 @@ spec:
memory: 1Gi memory: 1Gi
livenessProbe: livenessProbe:
exec: exec:
command: ["mysqladmin", "ping", "-h", "localhost"] command:
initialDelaySeconds: 90 - sh
periodSeconds: 15 - -c
- mysqladmin ping -h 127.0.0.1 -u root -p"$MYSQL_ROOT_PASSWORD" --silent
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5 timeoutSeconds: 5
failureThreshold: 3 failureThreshold: 3
readinessProbe: readinessProbe:
exec: exec:
command: ["mysqladmin", "ping", "-h", "localhost"] command:
initialDelaySeconds: 60 - sh
periodSeconds: 10 - -c
timeoutSeconds: 5 - mysqladmin ping -h 127.0.0.1 -u root -p"$MYSQL_ROOT_PASSWORD" --silent
failureThreshold: 5 # MySQL 8.0 first-run initialization takes 30-60s on slow disks.
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 10
volumes: volumes:
- name: mysql-data - name: mysql-data
persistentVolumeClaim: persistentVolumeClaim:
claimName: mysql-data-pvc claimName: mysql-data-pvc
# apiVersion: apps/v1
# kind: Deployment
# metadata:
# name: mysql
# labels:
# app.kubernetes.io/name: mysql
# app.kubernetes.io/component: database
# spec:
# replicas: 1
# strategy:
# type: Recreate
# selector:
# matchLabels:
# app.kubernetes.io/name: mysql
# app.kubernetes.io/component: database
# template:
# metadata:
# labels:
# app.kubernetes.io/name: mysql
# app.kubernetes.io/component: database
# spec:
# containers:
# - name: mysql
# image: mysql:8.0
# ports:
# - containerPort: 3306
# name: mysql
# env:
# - name: MYSQL_ROOT_PASSWORD
# valueFrom:
# secretKeyRef:
# name: mysql-secret
# key: MYSQL_ROOT_PASSWORD
# - name: MYSQL_DATABASE
# valueFrom:
# secretKeyRef:
# name: mysql-secret
# key: DB_NAME
# volumeMounts:
# - name: mysql-data
# mountPath: /var/lib/mysql
# resources:
# requests:
# cpu: 250m
# memory: 512Mi
# limits:
# cpu: "1"
# memory: 1Gi
# livenessProbe:
# exec:
# command: ["mysqladmin", "ping", "-h", "localhost"]
# initialDelaySeconds: 90 # was 30 — must survive full init
# periodSeconds: 15
# timeoutSeconds: 5
# failureThreshold: 3
# readinessProbe:
# exec:
# command: ["mysqladmin", "ping", "-h", "localhost"]
# initialDelaySeconds: 60 # was 10 — critical fix
# periodSeconds: 10
# timeoutSeconds: 5
# failureThreshold: 5
# volumes:
# - name: mysql-data
# persistentVolumeClaim:
# claimName: mysql-data-pvc

View File

@@ -0,0 +1,95 @@
#!/usr/bin/env bash
set -euo pipefail
# ── Scrum Manager — On-Premise Kubernetes Deploy Script ─────────────────────
# Run from the project root: bash k8s/overlays/on-premise/deploy.sh
# ────────────────────────────────────────────────────────────────────────────
OVERLAY="k8s/overlays/on-premise"
NAMESPACE="scrum-manager"
REGISTRY="${REGISTRY:-}" # Optional: set to your registry, e.g. "192.168.1.10:5000"
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; NC='\033[0m'
info() { echo -e "${GREEN}[INFO]${NC} $*"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
error() { echo -e "${RED}[ERROR]${NC} $*"; exit 1; }
# ── Pre-flight checks ────────────────────────────────────────────────────────
info "Checking prerequisites..."
command -v kubectl >/dev/null 2>&1 || error "kubectl not found"
command -v docker >/dev/null 2>&1 || error "docker not found"
kubectl cluster-info >/dev/null 2>&1 || error "Cannot reach Kubernetes cluster. Check kubeconfig."
info "Prerequisites OK."
# ── Multi-node: hostPath nodeAffinity reminder ───────────────────────────────
NODE_COUNT=$(kubectl get nodes --no-headers 2>/dev/null | wc -l)
if [ "$NODE_COUNT" -gt 1 ]; then
warn "Multi-node cluster detected ($NODE_COUNT nodes)."
warn "MySQL data is stored at /mnt/data/mysql on ONE node only."
warn "Open k8s/overlays/on-premise/mysql-pv.yaml and uncomment"
warn "the nodeAffinity block, setting it to the correct node hostname."
warn "Run: kubectl get nodes to list hostnames."
read -rp "Press ENTER to continue anyway, or Ctrl+C to abort and fix first..."
fi
# ── Build Docker images ──────────────────────────────────────────────────────
info "Building Docker images..."
BACKEND_TAG="${REGISTRY:+${REGISTRY}/}scrum-backend:latest"
FRONTEND_TAG="${REGISTRY:+${REGISTRY}/}scrum-frontend:latest"
docker build -t "$BACKEND_TAG" -f server/Dockerfile server/
docker build -t "$FRONTEND_TAG" -f Dockerfile .
# ── Push or load images into cluster ────────────────────────────────────────
if [ -n "$REGISTRY" ]; then
info "Pushing images to registry $REGISTRY..."
docker push "$BACKEND_TAG"
docker push "$FRONTEND_TAG"
else
warn "No REGISTRY set. Attempting to load images via 'docker save | ssh'..."
warn "If you have a single-node cluster and Docker runs on the same host,"
warn "set imagePullPolicy: Never in the deployments (already set)."
warn "For multi-node, set REGISTRY=<your-registry> before running this script."
warn ""
warn " Alternatively, load images manually on each node with:"
warn " docker save scrum-backend:latest | ssh NODE docker load"
warn " docker save scrum-frontend:latest | ssh NODE docker load"
fi
# ── Apply Kubernetes manifests ────────────────────────────────────────────────
info "Applying manifests via kustomize..."
kubectl apply -k "$OVERLAY"
# ── Wait for rollout ──────────────────────────────────────────────────────────
info "Waiting for MySQL to become ready (this can take up to 90s on first run)..."
kubectl rollout status deployment/mysql -n "$NAMESPACE" --timeout=120s || \
warn "MySQL rollout timed out — check: kubectl describe pod -l app.kubernetes.io/name=mysql -n $NAMESPACE"
info "Waiting for backend..."
kubectl rollout status deployment/backend -n "$NAMESPACE" --timeout=90s || \
warn "Backend rollout timed out — check: kubectl logs -l app.kubernetes.io/name=backend -n $NAMESPACE"
info "Waiting for frontend..."
kubectl rollout status deployment/frontend -n "$NAMESPACE" --timeout=60s || \
warn "Frontend rollout timed out."
# ── Show access info ──────────────────────────────────────────────────────────
echo ""
info "Deploy complete! Access the app:"
NODEPORT=$(kubectl get svc frontend -n "$NAMESPACE" -o jsonpath='{.spec.ports[0].nodePort}' 2>/dev/null || echo "")
NODE_IP=$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}' 2>/dev/null || echo "<NODE-IP>")
if [ -n "$NODEPORT" ]; then
echo ""
echo -e " NodePort: ${GREEN}http://${NODE_IP}:${NODEPORT}${NC}"
fi
echo ""
echo -e " Ingress: ${GREEN}http://scrum.local${NC} (add '$NODE_IP scrum.local' to /etc/hosts)"
echo ""
echo "Useful commands:"
echo " kubectl get pods -n $NAMESPACE"
echo " kubectl logs -f deployment/backend -n $NAMESPACE"
echo " kubectl logs -f deployment/mysql -n $NAMESPACE"

View File

@@ -4,12 +4,25 @@ metadata:
name: scrum-manager-ingress name: scrum-manager-ingress
annotations: annotations:
kubernetes.io/ingress.class: nginx kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: / # No rewrite-target here — the old global rewrite-target: / was
# rewriting every path (including /api/tasks) to just /, breaking the API.
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
spec: spec:
rules: rules:
- host: scrum.local - host: scrum.local
http: http:
paths: paths:
# Socket.io long-polling and WebSocket connections go directly to backend.
- path: /socket.io
pathType: Prefix
backend:
service:
name: backend
port:
number: 3001
# All other traffic (including /api/) goes to frontend nginx,
# which proxies /api/ to backend internally. This avoids double-routing.
- path: / - path: /
pathType: Prefix pathType: Prefix
backend: backend:
@@ -17,10 +30,3 @@ spec:
name: frontend name: frontend
port: port:
number: 80 number: 80
- path: /api
pathType: Prefix
backend:
service:
name: backend
port:
number: 3001

View File

@@ -0,0 +1,29 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-pv
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
hostPath:
path: "/mnt/data/mysql"
# DirectoryOrCreate: kubelet will create the dir if it doesn't exist,
# preventing MySQL CrashLoopBackOff due to missing mount path.
type: DirectoryOrCreate
# IMPORTANT for multi-node clusters: uncomment nodeAffinity and set your
# node's hostname so MySQL always schedules on the node that has the data.
# Run: kubectl get nodes to find the hostname.
# nodeAffinity:
# required:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/hostname
# operator: In
# values:
# - YOUR-NODE-HOSTNAME

View File

@@ -1,15 +1,12 @@
# apiVersion: v1
# kind: PersistentVolumeClaim
# metadata:
# name: mysql-data-pvc
# spec:
# storageClassName: manual
# volumeName: mysql-pv
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
metadata: metadata:
name: mysql-data-pvc name: mysql-data-pvc
spec: spec:
storageClassName: local-path storageClassName: manual
volumeName: mysql-pv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi

View File

@@ -1,4 +1,3 @@
server { server {
listen 80; listen 80;
server_name localhost; server_name localhost;
@@ -6,12 +5,7 @@ server {
root /usr/share/nginx/html; root /usr/share/nginx/html;
index index.html; index index.html;
# Serve static files # Proxy API requests to backend service
location / {
try_files $uri $uri/ /index.html;
}
# Proxy API requests to backend
location /api/ { location /api/ {
proxy_pass http://backend:3001; proxy_pass http://backend:3001;
proxy_http_version 1.1; proxy_http_version 1.1;
@@ -19,5 +13,23 @@ server {
proxy_set_header Connection 'upgrade'; proxy_set_header Connection 'upgrade';
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade; proxy_cache_bypass $http_upgrade;
proxy_read_timeout 60s;
}
# Proxy Socket.io (real-time notifications)
location /socket.io/ {
proxy_pass http://backend:3001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_cache_bypass $http_upgrade;
proxy_read_timeout 3600s;
}
# Serve static files — React SPA catch-all
location / {
try_files $uri $uri/ /index.html;
} }
} }

View File

@@ -25,7 +25,10 @@ const VIEW_PAGES = ['calendar', 'kanban', 'list'];
export default function App() { export default function App() {
const now = new Date(); const now = new Date();
const [currentUser, setCurrentUser] = useState<User | null>(null); const [currentUser, setCurrentUser] = useState<User | null>(() => {
try { const s = localStorage.getItem('currentUser'); return s ? JSON.parse(s) : null; }
catch { return null; }
});
const [users, setUsers] = useState<User[]>([]); const [users, setUsers] = useState<User[]>([]);
const [tasks, setTasks] = useState<Task[]>([]); const [tasks, setTasks] = useState<Task[]>([]);
const [activePage, setActivePage] = useState('calendar'); const [activePage, setActivePage] = useState('calendar');
@@ -58,7 +61,7 @@ export default function App() {
.finally(() => setLoading(false)); .finally(() => setLoading(false));
}, [currentUser]); }, [currentUser]);
if (!currentUser) return <LoginPage onLogin={u => { setCurrentUser(u); setActivePage('calendar'); setActiveView('calendar'); }} />; if (!currentUser) return <LoginPage onLogin={u => { localStorage.setItem('currentUser', JSON.stringify(u)); setCurrentUser(u); setActivePage('calendar'); setActiveView('calendar'); }} />;
const handleNavigate = (page: string) => { const handleNavigate = (page: string) => {
setActivePage(page); setActivePage(page);
@@ -250,7 +253,7 @@ export default function App() {
onOpenSidebar={() => setSidebarOpen(true)} users={users} /> onOpenSidebar={() => setSidebarOpen(true)} users={users} />
<div className="app-body"> <div className="app-body">
<Sidebar currentUser={currentUser} activePage={activePage} onNavigate={handleNavigate} <Sidebar currentUser={currentUser} activePage={activePage} onNavigate={handleNavigate}
onSignOut={() => { setCurrentUser(null); setActivePage('calendar'); setActiveView('calendar'); setSidebarOpen(false); }} onSignOut={() => { localStorage.removeItem('currentUser'); setCurrentUser(null); setActivePage('calendar'); setActiveView('calendar'); setSidebarOpen(false); }}
isOpen={sidebarOpen} onClose={() => setSidebarOpen(false)} users={users} /> isOpen={sidebarOpen} onClose={() => setSidebarOpen(false)} users={users} />
<div className="main-content"> <div className="main-content">
{displayPage === 'calendar' && ( {displayPage === 'calendar' && (