1 Commits

Author SHA1 Message Date
6d9778e51f jenkinsfile test
Some checks are pending
test reactjs website/pipeline/head Build queued...
2026-02-22 11:01:35 +00:00
15 changed files with 72 additions and 450 deletions

43
Jenkinsfile vendored
View File

@@ -115,19 +115,11 @@ pipeline {
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) { withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
sh "kubectl apply -k ${K8S_OVERLAY}" sh "kubectl apply -k ${K8S_OVERLAY}"
// Show pod state immediately after apply so we can see pull/init status in logs sh "kubectl rollout status deployment/mysql -n scrum-manager --timeout=120s"
sh "kubectl get pods -n scrum-manager -o wide" sh "kubectl rollout status deployment/backend -n scrum-manager --timeout=120s"
sh "kubectl rollout status deployment/frontend -n scrum-manager --timeout=120s"
// MySQL uses Recreate strategy: old pod terminates then new starts. echo "✅ All deployments rolled out."
sh "kubectl rollout status deployment/mysql -n scrum-manager --timeout=300s"
// maxSurge=0: old pod terminates first, new pod starts after.
// CPU-constrained nodes may delay scheduling — 600s covers this.
sh "kubectl rollout status deployment/backend -n scrum-manager --timeout=600s"
sh "kubectl rollout status deployment/frontend -n scrum-manager --timeout=600s"
echo "All deployments rolled out."
} }
} }
} }
@@ -135,18 +127,14 @@ pipeline {
stage('Smoke Test') { stage('Smoke Test') {
steps { steps {
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) { withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
// Run a curl pod inside the cluster to hit the backend health endpoint.
// Uses FQDN (backend.scrum-manager.svc.cluster.local) to be explicit.
sh """ sh """
kubectl run smoke-${BUILD_NUMBER} \ kubectl run smoke-${BUILD_NUMBER} \
--image=curlimages/curl:8.5.0 \ --image=curlimages/curl:latest \
--restart=Never \ --restart=Never \
--rm \ --rm \
--attach \ --attach \
--timeout=30s \
-n scrum-manager \ -n scrum-manager \
-- curl -sf --max-time 10 \ -- curl -sf http://backend:3001/api/health \
http://backend.scrum-manager.svc.cluster.local:3001/api/health \
&& echo "Health check PASSED" \ && echo "Health check PASSED" \
|| echo "Health check FAILED (non-blocking)" || echo "Health check FAILED (non-blocking)"
""" """
@@ -171,24 +159,7 @@ pipeline {
echo "✅ Build #${env.BUILD_NUMBER} deployed → http://scrum.local" echo "✅ Build #${env.BUILD_NUMBER} deployed → http://scrum.local"
} }
failure { failure {
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) { echo "❌ Pipeline failed. Check stage logs above."
sh """
echo '=== Pod Status ==='
kubectl get pods -n scrum-manager -o wide || true
echo '=== Backend Pod Events ==='
kubectl describe pods -l app.kubernetes.io/name=backend -n scrum-manager || true
echo '=== Backend Logs (last 50 lines) ==='
kubectl logs -l app.kubernetes.io/name=backend -n scrum-manager --tail=50 --all-containers=true || true
echo '=== Frontend Pod Events ==='
kubectl describe pods -l app.kubernetes.io/name=frontend -n scrum-manager || true
echo '=== MySQL Pod Events ==='
kubectl describe pods -l app.kubernetes.io/name=mysql -n scrum-manager || true
"""
}
} }
always { always {
sh "docker logout ${HARBOR_URL} || true" sh "docker logout ${HARBOR_URL} || true"

View File

@@ -1,168 +0,0 @@
pipeline {
agent any
environment {
HARBOR_URL = '192.168.108.200:80'
HARBOR_PROJECT = 'library'
IMAGE_TAG = "${env.BUILD_NUMBER}"
K8S_CRED_ID = 'k8s-config'
FRONTEND_IMAGE = '192.168.108.200:80/library/scrum-frontend'
BACKEND_IMAGE = '192.168.108.200:80/library/scrum-backend'
// Workspace root IS the project root — no subdirectory needed
K8S_OVERLAY = 'k8s/overlays/on-premise'
}
options {
buildDiscarder(logRotator(numToKeepStr: '10'))
timeout(time: 30, unit: 'MINUTES')
disableConcurrentBuilds()
}
stages {
stage('Checkout') {
steps {
checkout scm
echo "Workspace: ${env.WORKSPACE}"
sh 'ls -la' // quick sanity check — confirm Dockerfile is here
}
}
stage('Test') {
parallel {
stage('Backend Tests') {
steps {
dir('server') { // server/ relative to workspace root
sh 'npm ci && npm test -- --reporter=verbose 2>&1 || true'
}
}
}
stage('Frontend Tests') {
steps {
// frontend lives at workspace root
sh 'npm ci && npm test -- --reporter=verbose 2>&1 || true'
}
}
}
}
stage('Build Images') {
parallel {
stage('Build Frontend') {
steps {
// Dockerfile is at workspace root
sh """
docker build \
-f Dockerfile \
-t ${FRONTEND_IMAGE}:${IMAGE_TAG} \
-t ${FRONTEND_IMAGE}:latest \
.
"""
}
}
stage('Build Backend') {
steps {
dir('server') { // server/Dockerfile
sh """
docker build \
-f Dockerfile \
-t ${BACKEND_IMAGE}:${IMAGE_TAG} \
-t ${BACKEND_IMAGE}:latest \
.
"""
}
}
}
}
}
stage('Push to Harbor') {
steps {
withCredentials([usernamePassword(
credentialsId: 'harbor-creds',
usernameVariable: 'HARBOR_USER',
passwordVariable: 'HARBOR_PASS'
)]) {
sh """
echo \$HARBOR_PASS | docker login ${HARBOR_URL} -u \$HARBOR_USER --password-stdin
docker push ${FRONTEND_IMAGE}:${IMAGE_TAG}
docker push ${FRONTEND_IMAGE}:latest
docker push ${BACKEND_IMAGE}:${IMAGE_TAG}
docker push ${BACKEND_IMAGE}:latest
"""
}
}
}
stage('Patch Image Tags') {
steps {
dir("${K8S_OVERLAY}") {
sh """
kustomize edit set image \
scrum-frontend=${FRONTEND_IMAGE}:${IMAGE_TAG} \
scrum-backend=${BACKEND_IMAGE}:${IMAGE_TAG}
"""
}
}
}
stage('Deploy to K8s') {
steps {
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
sh "kubectl apply -k ${K8S_OVERLAY}"
sh "kubectl rollout status deployment/mysql -n scrum-manager --timeout=300s"
sh "kubectl rollout status deployment/backend -n scrum-manager --timeout=300s"
sh "kubectl rollout status deployment/frontend -n scrum-manager --timeout=180s"
echo "✅ All deployments rolled out."
}
}
}
stage('Smoke Test') {
steps {
withKubeConfig([credentialsId: "${K8S_CRED_ID}"]) {
sh """
kubectl run smoke-${BUILD_NUMBER} \
--image=curlimages/curl:latest \
--restart=Never \
--rm \
--attach \
-n scrum-manager \
-- curl -sf http://backend:3001/api/health \
&& echo "Health check PASSED" \
|| echo "Health check FAILED (non-blocking)"
"""
}
}
}
stage('Clean Up') {
steps {
sh """
docker rmi ${FRONTEND_IMAGE}:${IMAGE_TAG} || true
docker rmi ${FRONTEND_IMAGE}:latest || true
docker rmi ${BACKEND_IMAGE}:${IMAGE_TAG} || true
docker rmi ${BACKEND_IMAGE}:latest || true
"""
}
}
}
post {
success {
echo "✅ Build #${env.BUILD_NUMBER} deployed → http://scrum.local"
}
failure {
echo "❌ Pipeline failed. Check stage logs above."
}
always {
sh "docker logout ${HARBOR_URL} || true"
}
}
}

View File

@@ -7,11 +7,6 @@ metadata:
app.kubernetes.io/component: api app.kubernetes.io/component: api
spec: spec:
replicas: 2 replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0 # Don't create extra pods during update — avoids CPU pressure
maxUnavailable: 1 # Terminate one old pod first, then start new one
selector: selector:
matchLabels: matchLabels:
app.kubernetes.io/name: backend app.kubernetes.io/name: backend
@@ -22,7 +17,6 @@ spec:
app.kubernetes.io/name: backend app.kubernetes.io/name: backend
app.kubernetes.io/component: api app.kubernetes.io/component: api
spec: spec:
terminationGracePeriodSeconds: 15
initContainers: initContainers:
- name: wait-for-mysql - name: wait-for-mysql
image: busybox:1.36 image: busybox:1.36
@@ -30,14 +24,12 @@ spec:
- sh - sh
- -c - -c
- | - |
echo "Waiting for MySQL TCP to be available..." echo "Waiting for MySQL to be ready..."
until nc -z mysql 3306; do until nc -z mysql 3306; do
echo "MySQL not reachable yet, retrying in 3s..." echo "MySQL is not ready yet, retrying in 3s..."
sleep 3 sleep 3
done done
echo "MySQL TCP is up. Waiting 15s for full initialization..." echo "MySQL is ready!"
sleep 15
echo "Proceeding to start backend."
containers: containers:
- name: backend - name: backend
image: scrum-backend:latest image: scrum-backend:latest
@@ -54,12 +46,12 @@ spec:
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: mysql-secret name: mysql-secret
key: MYSQL_USER key: DB_USER
- name: DB_PASSWORD - name: DB_PASSWORD
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: mysql-secret name: mysql-secret
key: MYSQL_PASSWORD key: DB_PASSWORD
- name: DB_NAME - name: DB_NAME
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
@@ -70,10 +62,10 @@ spec:
resources: resources:
requests: requests:
cpu: 100m cpu: 100m
memory: 128Mi # Request drives scheduling — keep low so pods fit on nodes memory: 128Mi
limits: limits:
cpu: 500m cpu: 500m
memory: 512Mi # Limit prevents OOMKill during startup spikes memory: 256Mi
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /api/health path: /api/health

View File

@@ -14,6 +14,11 @@ data:
root /usr/share/nginx/html; root /usr/share/nginx/html;
index index.html; index index.html;
# Serve static files
location / {
try_files $uri $uri/ /index.html;
}
# Proxy API requests to backend service # Proxy API requests to backend service
location /api/ { location /api/ {
proxy_pass http://backend:3001; proxy_pass http://backend:3001;
@@ -22,23 +27,5 @@ data:
proxy_set_header Connection 'upgrade'; proxy_set_header Connection 'upgrade';
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade; proxy_cache_bypass $http_upgrade;
proxy_read_timeout 60s;
}
# Proxy Socket.io (real-time notifications)
location /socket.io/ {
proxy_pass http://backend:3001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_cache_bypass $http_upgrade;
proxy_read_timeout 3600s;
}
# Serve static files — React SPA catch-all
location / {
try_files $uri $uri/ /index.html;
} }
} }

View File

@@ -7,11 +7,6 @@ metadata:
app.kubernetes.io/component: web app.kubernetes.io/component: web
spec: spec:
replicas: 2 replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0 # Don't create extra pods during update — avoids CPU pressure
maxUnavailable: 1 # Terminate one old pod first, then start new one
selector: selector:
matchLabels: matchLabels:
app.kubernetes.io/name: frontend app.kubernetes.io/name: frontend

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/name: frontend app.kubernetes.io/name: frontend
app.kubernetes.io/component: web app.kubernetes.io/component: web
spec: spec:
type: LoadBalancer type: NodePort
ports: ports:
- port: 80 - port: 80
targetPort: 80 targetPort: 80

View File

@@ -19,11 +19,6 @@ spec:
app.kubernetes.io/name: mysql app.kubernetes.io/name: mysql
app.kubernetes.io/component: database app.kubernetes.io/component: database
spec: spec:
# fsGroup 999 = mysql group in the container image.
# Without this, the hostPath volume is owned by root and MySQL
# cannot write to /var/lib/mysql → pod CrashLoops immediately.
securityContext:
fsGroup: 999
containers: containers:
- name: mysql - name: mysql
image: mysql:8.0 image: mysql:8.0
@@ -41,21 +36,6 @@ spec:
secretKeyRef: secretKeyRef:
name: mysql-secret name: mysql-secret
key: DB_NAME key: DB_NAME
# Allow root to connect from backend pods (any host), not just localhost.
- name: MYSQL_ROOT_HOST
value: "%"
# Create the app user on first init. Required if PVC is ever wiped and
# MySQL reinitializes — otherwise scrumapp user won't exist and backend fails.
- name: MYSQL_USER
valueFrom:
secretKeyRef:
name: mysql-secret
key: MYSQL_USER
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-secret
key: MYSQL_PASSWORD
volumeMounts: volumeMounts:
- name: mysql-data - name: mysql-data
mountPath: /var/lib/mysql mountPath: /var/lib/mysql
@@ -69,24 +49,25 @@ spec:
livenessProbe: livenessProbe:
exec: exec:
command: command:
- sh - mysqladmin
- -c - ping
- mysqladmin ping -h 127.0.0.1 -u root -p"$MYSQL_ROOT_PASSWORD" --silent - -h
initialDelaySeconds: 60 - localhost
initialDelaySeconds: 30
periodSeconds: 10 periodSeconds: 10
timeoutSeconds: 5 timeoutSeconds: 5
failureThreshold: 3 failureThreshold: 3
readinessProbe: readinessProbe:
exec: exec:
command: command:
- sh - mysqladmin
- -c - ping
- mysqladmin ping -h 127.0.0.1 -u root -p"$MYSQL_ROOT_PASSWORD" --silent - -h
# MySQL 8.0 first-run initialization takes 30-60s on slow disks. - localhost
initialDelaySeconds: 30 initialDelaySeconds: 10
periodSeconds: 5 periodSeconds: 5
timeoutSeconds: 3 timeoutSeconds: 3
failureThreshold: 10 failureThreshold: 5
volumes: volumes:
- name: mysql-data - name: mysql-data
persistentVolumeClaim: persistentVolumeClaim:

View File

@@ -7,13 +7,11 @@ metadata:
app.kubernetes.io/component: database app.kubernetes.io/component: database
type: Opaque type: Opaque
data: data:
# Base64 encoded values — change these for production!
# echo -n 'scrumpass' | base64 => c2NydW1wYXNz
# echo -n 'root' | base64 => cm9vdA==
# echo -n 'scrum_manager' | base64 => c2NydW1fbWFuYWdlcg==
MYSQL_ROOT_PASSWORD: c2NydW1wYXNz MYSQL_ROOT_PASSWORD: c2NydW1wYXNz
MYSQL_USER: c2NydW1hcHA= DB_USER: cm9vdA==
MYSQL_PASSWORD: c2NydW1wYXNz DB_PASSWORD: c2NydW1wYXNz
DB_NAME: c2NydW1fbWFuYWdlcg== DB_NAME: c2NydW1fbWFuYWdlcg==
# Decode reference:
# MYSQL_ROOT_PASSWORD: scrumpass
# MYSQL_USER: scrumapp
# MYSQL_PASSWORD: scrumpass
# DB_NAME: scrum_manager

View File

@@ -1,95 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# ── Scrum Manager — On-Premise Kubernetes Deploy Script ─────────────────────
# Run from the project root: bash k8s/overlays/on-premise/deploy.sh
# ────────────────────────────────────────────────────────────────────────────
OVERLAY="k8s/overlays/on-premise"
NAMESPACE="scrum-manager"
REGISTRY="${REGISTRY:-}" # Optional: set to your registry, e.g. "192.168.1.10:5000"
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; NC='\033[0m'
info() { echo -e "${GREEN}[INFO]${NC} $*"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
error() { echo -e "${RED}[ERROR]${NC} $*"; exit 1; }
# ── Pre-flight checks ────────────────────────────────────────────────────────
info "Checking prerequisites..."
command -v kubectl >/dev/null 2>&1 || error "kubectl not found"
command -v docker >/dev/null 2>&1 || error "docker not found"
kubectl cluster-info >/dev/null 2>&1 || error "Cannot reach Kubernetes cluster. Check kubeconfig."
info "Prerequisites OK."
# ── Multi-node: hostPath nodeAffinity reminder ───────────────────────────────
NODE_COUNT=$(kubectl get nodes --no-headers 2>/dev/null | wc -l)
if [ "$NODE_COUNT" -gt 1 ]; then
warn "Multi-node cluster detected ($NODE_COUNT nodes)."
warn "MySQL data is stored at /mnt/data/mysql on ONE node only."
warn "Open k8s/overlays/on-premise/mysql-pv.yaml and uncomment"
warn "the nodeAffinity block, setting it to the correct node hostname."
warn "Run: kubectl get nodes to list hostnames."
read -rp "Press ENTER to continue anyway, or Ctrl+C to abort and fix first..."
fi
# ── Build Docker images ──────────────────────────────────────────────────────
info "Building Docker images..."
BACKEND_TAG="${REGISTRY:+${REGISTRY}/}scrum-backend:latest"
FRONTEND_TAG="${REGISTRY:+${REGISTRY}/}scrum-frontend:latest"
docker build -t "$BACKEND_TAG" -f server/Dockerfile server/
docker build -t "$FRONTEND_TAG" -f Dockerfile .
# ── Push or load images into cluster ────────────────────────────────────────
if [ -n "$REGISTRY" ]; then
info "Pushing images to registry $REGISTRY..."
docker push "$BACKEND_TAG"
docker push "$FRONTEND_TAG"
else
warn "No REGISTRY set. Attempting to load images via 'docker save | ssh'..."
warn "If you have a single-node cluster and Docker runs on the same host,"
warn "set imagePullPolicy: Never in the deployments (already set)."
warn "For multi-node, set REGISTRY=<your-registry> before running this script."
warn ""
warn " Alternatively, load images manually on each node with:"
warn " docker save scrum-backend:latest | ssh NODE docker load"
warn " docker save scrum-frontend:latest | ssh NODE docker load"
fi
# ── Apply Kubernetes manifests ────────────────────────────────────────────────
info "Applying manifests via kustomize..."
kubectl apply -k "$OVERLAY"
# ── Wait for rollout ──────────────────────────────────────────────────────────
info "Waiting for MySQL to become ready (this can take up to 90s on first run)..."
kubectl rollout status deployment/mysql -n "$NAMESPACE" --timeout=120s || \
warn "MySQL rollout timed out — check: kubectl describe pod -l app.kubernetes.io/name=mysql -n $NAMESPACE"
info "Waiting for backend..."
kubectl rollout status deployment/backend -n "$NAMESPACE" --timeout=90s || \
warn "Backend rollout timed out — check: kubectl logs -l app.kubernetes.io/name=backend -n $NAMESPACE"
info "Waiting for frontend..."
kubectl rollout status deployment/frontend -n "$NAMESPACE" --timeout=60s || \
warn "Frontend rollout timed out."
# ── Show access info ──────────────────────────────────────────────────────────
echo ""
info "Deploy complete! Access the app:"
NODEPORT=$(kubectl get svc frontend -n "$NAMESPACE" -o jsonpath='{.spec.ports[0].nodePort}' 2>/dev/null || echo "")
NODE_IP=$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}' 2>/dev/null || echo "<NODE-IP>")
if [ -n "$NODEPORT" ]; then
echo ""
echo -e " NodePort: ${GREEN}http://${NODE_IP}:${NODEPORT}${NC}"
fi
echo ""
echo -e " Ingress: ${GREEN}http://scrum.local${NC} (add '$NODE_IP scrum.local' to /etc/hosts)"
echo ""
echo "Useful commands:"
echo " kubectl get pods -n $NAMESPACE"
echo " kubectl logs -f deployment/backend -n $NAMESPACE"
echo " kubectl logs -f deployment/mysql -n $NAMESPACE"

View File

@@ -4,25 +4,12 @@ metadata:
name: scrum-manager-ingress name: scrum-manager-ingress
annotations: annotations:
kubernetes.io/ingress.class: nginx kubernetes.io/ingress.class: nginx
# No rewrite-target here — the old global rewrite-target: / was nginx.ingress.kubernetes.io/rewrite-target: /
# rewriting every path (including /api/tasks) to just /, breaking the API.
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
spec: spec:
rules: rules:
- host: scrum.local - host: scrum.local
http: http:
paths: paths:
# Socket.io long-polling and WebSocket connections go directly to backend.
- path: /socket.io
pathType: Prefix
backend:
service:
name: backend
port:
number: 3001
# All other traffic (including /api/) goes to frontend nginx,
# which proxies /api/ to backend internally. This avoids double-routing.
- path: / - path: /
pathType: Prefix pathType: Prefix
backend: backend:
@@ -30,3 +17,10 @@ spec:
name: frontend name: frontend
port: port:
number: 80 number: 80
- path: /api
pathType: Prefix
backend:
service:
name: backend
port:
number: 3001

View File

@@ -1,38 +1,13 @@
# apiVersion: kustomize.config.k8s.io/v1beta1
# kind: Kustomization
# resources:
# - ../../base
# - mysql-pv.yaml
# - ingress.yaml
# patches:
# - path: mysql-pvc-patch.yaml
# target:
# kind: PersistentVolumeClaim
# name: mysql-data-pvc
apiVersion: kustomize.config.k8s.io/v1beta1 apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
resources: resources:
- ../../base - ../../base
- mysql-pv.yaml
- ingress.yaml - ingress.yaml
patches: patches:
# This patch explicitly sets storageClassName: local-path to match the live
# PVC in the cluster. Without it, the base PVC (no storageClassName = nil)
# diffs against the existing "local-path" value and kubectl apply tries to
# mutate a bound PVC, which Kubernetes forbids.
- path: mysql-pvc-patch.yaml - path: mysql-pvc-patch.yaml
target: target:
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
name: mysql-data-pvc name: mysql-data-pvc
images:
- name: scrum-frontend
newName: 192.168.108.200:80/library/scrum-frontend
newTag: latest
- name: scrum-backend
newName: 192.168.108.200:80/library/scrum-backend
newTag: latest

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-pv
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/mnt/data/mysql"

View File

@@ -3,12 +3,5 @@ kind: PersistentVolumeClaim
metadata: metadata:
name: mysql-data-pvc name: mysql-data-pvc
spec: spec:
# Must explicitly match the storageClassName already on the live PVC. storageClassName: manual
# Without this, kubectl apply diffs nil (base has no field) vs "local-path" volumeName: mysql-pv
# (cluster) and tries to mutate a bound PVC — which Kubernetes forbids.
storageClassName: local-path
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi

View File

@@ -1,3 +1,4 @@
server { server {
listen 80; listen 80;
server_name localhost; server_name localhost;
@@ -5,7 +6,12 @@ server {
root /usr/share/nginx/html; root /usr/share/nginx/html;
index index.html; index index.html;
# Proxy API requests to backend service # Serve static files
location / {
try_files $uri $uri/ /index.html;
}
# Proxy API requests to backend
location /api/ { location /api/ {
proxy_pass http://backend:3001; proxy_pass http://backend:3001;
proxy_http_version 1.1; proxy_http_version 1.1;
@@ -13,23 +19,5 @@ server {
proxy_set_header Connection 'upgrade'; proxy_set_header Connection 'upgrade';
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade; proxy_cache_bypass $http_upgrade;
proxy_read_timeout 60s;
}
# Proxy Socket.io (real-time notifications)
location /socket.io/ {
proxy_pass http://backend:3001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_cache_bypass $http_upgrade;
proxy_read_timeout 3600s;
}
# Serve static files — React SPA catch-all
location / {
try_files $uri $uri/ /index.html;
} }
} }

View File

@@ -25,10 +25,7 @@ const VIEW_PAGES = ['calendar', 'kanban', 'list'];
export default function App() { export default function App() {
const now = new Date(); const now = new Date();
const [currentUser, setCurrentUser] = useState<User | null>(() => { const [currentUser, setCurrentUser] = useState<User | null>(null);
try { const s = localStorage.getItem('currentUser'); return s ? JSON.parse(s) : null; }
catch { return null; }
});
const [users, setUsers] = useState<User[]>([]); const [users, setUsers] = useState<User[]>([]);
const [tasks, setTasks] = useState<Task[]>([]); const [tasks, setTasks] = useState<Task[]>([]);
const [activePage, setActivePage] = useState('calendar'); const [activePage, setActivePage] = useState('calendar');
@@ -61,7 +58,7 @@ export default function App() {
.finally(() => setLoading(false)); .finally(() => setLoading(false));
}, [currentUser]); }, [currentUser]);
if (!currentUser) return <LoginPage onLogin={u => { localStorage.setItem('currentUser', JSON.stringify(u)); setCurrentUser(u); setActivePage('calendar'); setActiveView('calendar'); }} />; if (!currentUser) return <LoginPage onLogin={u => { setCurrentUser(u); setActivePage('calendar'); setActiveView('calendar'); }} />;
const handleNavigate = (page: string) => { const handleNavigate = (page: string) => {
setActivePage(page); setActivePage(page);
@@ -253,7 +250,7 @@ export default function App() {
onOpenSidebar={() => setSidebarOpen(true)} users={users} /> onOpenSidebar={() => setSidebarOpen(true)} users={users} />
<div className="app-body"> <div className="app-body">
<Sidebar currentUser={currentUser} activePage={activePage} onNavigate={handleNavigate} <Sidebar currentUser={currentUser} activePage={activePage} onNavigate={handleNavigate}
onSignOut={() => { localStorage.removeItem('currentUser'); setCurrentUser(null); setActivePage('calendar'); setActiveView('calendar'); setSidebarOpen(false); }} onSignOut={() => { setCurrentUser(null); setActivePage('calendar'); setActiveView('calendar'); setSidebarOpen(false); }}
isOpen={sidebarOpen} onClose={() => setSidebarOpen(false)} users={users} /> isOpen={sidebarOpen} onClose={() => setSidebarOpen(false)} users={users} />
<div className="main-content"> <div className="main-content">
{displayPage === 'calendar' && ( {displayPage === 'calendar' && (