veza/k8s/backups/redis-backup-cronjob.yaml

141 lines
4.7 KiB
YAML

apiVersion: batch/v1
kind: CronJob
metadata:
name: redis-backup
namespace: veza-production
labels:
app: redis-backup
component: backup
spec:
# Run daily at 3:30 AM (30 minutes after postgres backup)
schedule: "30 3 * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
concurrencyPolicy: Forbid
jobTemplate:
spec:
template:
metadata:
labels:
app: redis-backup
spec:
restartPolicy: OnFailure
containers:
- name: redis-backup
image: redis:7-alpine
command:
- /bin/sh
- -c
- |
set -e
BACKUP_DIR="/backups/redis"
BACKUP_FILE="${BACKUP_DIR}/redis_$(date +%Y%m%d_%H%M%S).rdb"
RETENTION_DAYS=${BACKUP_RETENTION_DAYS:-30}
echo "Starting Redis backup at $(date)"
# Create backup directory
mkdir -p "${BACKUP_DIR}"
# Connect to Redis and save
redis-cli -h "${REDIS_HOST}" -p "${REDIS_PORT:-6379}" ${REDIS_PASSWORD:+-a "${REDIS_PASSWORD}"} SAVE
# Copy RDB file from Redis data directory (if accessible)
# Note: This assumes Redis data is accessible via volume mount
# If not, use redis-cli --rdb to stream the backup
if [ -f "/data/dump.rdb" ]; then
cp /data/dump.rdb "${BACKUP_FILE}"
else
# Alternative: Use redis-cli --rdb to stream backup
redis-cli -h "${REDIS_HOST}" -p "${REDIS_PORT:-6379}" ${REDIS_PASSWORD:+-a "${REDIS_PASSWORD}"} --rdb "${BACKUP_FILE}" || {
echo "WARNING: Redis backup may have failed, but continuing..."
}
fi
# Verify backup was created
if [ ! -f "${BACKUP_FILE}" ]; then
echo "ERROR: Backup file was not created!"
exit 1
fi
BACKUP_SIZE=$(du -h "${BACKUP_FILE}" | cut -f1)
echo "Backup created successfully: ${BACKUP_FILE} (${BACKUP_SIZE})"
# Cleanup old backups
echo "Cleaning up backups older than ${RETENTION_DAYS} days..."
find "${BACKUP_DIR}" -name "redis_*.rdb" -type f -mtime +${RETENTION_DAYS} -delete
# List remaining backups
echo "Remaining backups:"
ls -lh "${BACKUP_DIR}" || true
# Optional: Upload to S3
if [ -n "${S3_BUCKET}" ] && [ -n "${AWS_ACCESS_KEY_ID}" ] && [ -n "${AWS_SECRET_ACCESS_KEY}" ]; then
echo "Uploading backup to S3..."
apk add --no-cache aws-cli || true
aws s3 cp "${BACKUP_FILE}" "s3://${S3_BUCKET}/redis-backups/$(basename ${BACKUP_FILE})" || echo "S3 upload failed, continuing..."
fi
echo "Backup completed at $(date)"
env:
- name: REDIS_HOST
valueFrom:
secretKeyRef:
name: veza-secrets
key: redis-host
- name: REDIS_PORT
value: "6379"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: veza-secrets
key: redis-password
optional: true
- name: BACKUP_RETENTION_DAYS
value: "30"
- name: S3_BUCKET
valueFrom:
secretKeyRef:
name: veza-secrets
key: s3-backup-bucket
optional: true
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: veza-secrets
key: aws-access-key-id
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: veza-secrets
key: aws-secret-access-key
optional: true
volumeMounts:
- name: backup-storage
mountPath: /backups
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "500m"
memory: "512Mi"
volumes:
- name: backup-storage
persistentVolumeClaim:
claimName: redis-backup-storage
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-backup-storage
namespace: veza-production
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi