This commit is contained in:
353
.drone.yml
353
.drone.yml
@ -1,310 +1,76 @@
|
|||||||
---
|
## Universal .drone.yml for all project types
|
||||||
|
## Configure your project via service.yaml (see ci-templates/docs/requirements.md)
|
||||||
|
|
||||||
kind: pipeline
|
kind: pipeline
|
||||||
type: kubernetes
|
type: kubernetes
|
||||||
name: main-pipeline
|
name: ci
|
||||||
|
|
||||||
# Триггер: запускать при изменениях в backend, frontend или .drone.yml
|
|
||||||
trigger:
|
|
||||||
branch:
|
|
||||||
- main
|
|
||||||
- master
|
|
||||||
event:
|
|
||||||
- push
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
# ============================================================
|
- name: prepare
|
||||||
# СБОРКА ОБРАЗОВ (параллельно)
|
image: alpine:3.19
|
||||||
# ============================================================
|
|
||||||
|
|
||||||
# --- Сборка Backend образа ---
|
|
||||||
- name: build-backend
|
|
||||||
image: plugins/kaniko
|
|
||||||
when:
|
|
||||||
changeset:
|
|
||||||
includes:
|
|
||||||
- backend/**
|
|
||||||
- .drone.yml
|
|
||||||
excludes:
|
|
||||||
- backend/README.md
|
|
||||||
- backend/**/*.md
|
|
||||||
settings:
|
|
||||||
registry: registry.vigdorov.ru
|
|
||||||
repo: registry.vigdorov.ru/library/team-planner-backend
|
|
||||||
dockerfile: backend/Dockerfile
|
|
||||||
context: backend
|
|
||||||
tags:
|
|
||||||
- ${DRONE_COMMIT_SHA:0:7}
|
|
||||||
- latest
|
|
||||||
cache: true
|
|
||||||
cache_repo: registry.vigdorov.ru/library/team-planner-backend-cache
|
|
||||||
username:
|
|
||||||
from_secret: HARBOR_USER
|
|
||||||
password:
|
|
||||||
from_secret: HARBOR_PASSWORD
|
|
||||||
no_push_metadata: true
|
|
||||||
|
|
||||||
# --- Сборка Frontend образа (параллельно с backend) ---
|
|
||||||
- name: build-frontend
|
|
||||||
image: plugins/kaniko
|
|
||||||
when:
|
|
||||||
changeset:
|
|
||||||
includes:
|
|
||||||
- frontend/**
|
|
||||||
- .drone.yml
|
|
||||||
excludes:
|
|
||||||
- frontend/README.md
|
|
||||||
- frontend/**/*.md
|
|
||||||
settings:
|
|
||||||
registry: registry.vigdorov.ru
|
|
||||||
repo: registry.vigdorov.ru/library/team-planner-frontend
|
|
||||||
dockerfile: frontend/Dockerfile
|
|
||||||
context: frontend
|
|
||||||
tags:
|
|
||||||
- ${DRONE_COMMIT_SHA:0:7}
|
|
||||||
- latest
|
|
||||||
cache: true
|
|
||||||
cache_repo: registry.vigdorov.ru/library/team-planner-frontend-cache
|
|
||||||
username:
|
|
||||||
from_secret: HARBOR_USER
|
|
||||||
password:
|
|
||||||
from_secret: HARBOR_PASSWORD
|
|
||||||
no_push_metadata: true
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================
|
|
||||||
# ДЕПЛОЙ (только после завершения ОБЕИХ сборок)
|
|
||||||
# ============================================================
|
|
||||||
|
|
||||||
# --- Развертывание Backend в PROD ---
|
|
||||||
- name: deploy-backend
|
|
||||||
image: alpine/k8s:1.28.2
|
|
||||||
depends_on:
|
|
||||||
- build-backend
|
|
||||||
- build-frontend
|
|
||||||
when:
|
|
||||||
changeset:
|
|
||||||
includes:
|
|
||||||
- backend/**
|
|
||||||
- .drone.yml
|
|
||||||
excludes:
|
|
||||||
- backend/README.md
|
|
||||||
- backend/**/*.md
|
|
||||||
environment:
|
environment:
|
||||||
KUBE_CONFIG_CONTENT:
|
GITEA_TOKEN:
|
||||||
|
from_secret: GITEA_TOKEN
|
||||||
|
commands:
|
||||||
|
- apk add --no-cache git bash yq
|
||||||
|
- git clone --depth 1 https://token:$GITEA_TOKEN@git.vigdorov.ru/vigdorov/ci-templates.git .ci
|
||||||
|
- chmod +x .ci/scripts/*.sh
|
||||||
|
- bash .ci/scripts/prepare.sh
|
||||||
|
|
||||||
|
- name: build
|
||||||
|
image: gcr.io/kaniko-project/executor:v1.23.2-debug
|
||||||
|
depends_on: [prepare]
|
||||||
|
environment:
|
||||||
|
HARBOR_USER:
|
||||||
|
from_secret: HARBOR_USER
|
||||||
|
HARBOR_PASSWORD:
|
||||||
|
from_secret: HARBOR_PASSWORD
|
||||||
|
commands:
|
||||||
|
- /busybox/sh .ci/scripts/build.sh
|
||||||
|
|
||||||
|
- name: deploy
|
||||||
|
image: alpine:3.19
|
||||||
|
depends_on: [build]
|
||||||
|
environment:
|
||||||
|
KUBE_CONFIG:
|
||||||
from_secret: KUBE_CONFIG
|
from_secret: KUBE_CONFIG
|
||||||
commands:
|
commands:
|
||||||
- mkdir -p ~/.kube
|
- apk add --no-cache bash yq kubectl helm
|
||||||
- echo "$KUBE_CONFIG_CONTENT" > ~/.kube/config
|
- bash .ci/scripts/deploy.sh
|
||||||
- chmod 600 ~/.kube/config
|
|
||||||
- sed -i "s|https://127.0.0.1:6443|https://10.10.10.100:6443|g" ~/.kube/config
|
|
||||||
- export APP_NAMESPACE="team-planner"
|
|
||||||
- export IMAGE_TAG="${DRONE_COMMIT_SHA:0:7}"
|
|
||||||
- export BACKEND_IMAGE="registry.vigdorov.ru/library/team-planner-backend"
|
|
||||||
- kubectl cluster-info
|
|
||||||
- sed -e "s|__BACKEND_IMAGE__|$BACKEND_IMAGE:$IMAGE_TAG|g" k8s/backend-deployment.yaml | kubectl apply -n $APP_NAMESPACE -f -
|
|
||||||
- kubectl apply -n $APP_NAMESPACE -f k8s/backend-service.yaml
|
|
||||||
- echo "📋 Waiting for rollout..."
|
|
||||||
- echo "=== CURRENT PODS STATE (before rollout) ==="
|
|
||||||
- kubectl get pods -n $APP_NAMESPACE -l app=team-planner-backend -o wide
|
|
||||||
- |
|
|
||||||
if ! kubectl rollout status deployment/team-planner-backend -n $APP_NAMESPACE --timeout=120s; then
|
|
||||||
echo "❌ Rollout failed! Collecting diagnostics..."
|
|
||||||
echo ""
|
|
||||||
echo "=== DEPLOYMENT STATUS ==="
|
|
||||||
kubectl get deployment team-planner-backend -n $APP_NAMESPACE -o wide
|
|
||||||
echo ""
|
|
||||||
echo "=== PODS STATUS ==="
|
|
||||||
kubectl get pods -n $APP_NAMESPACE -l app=team-planner-backend -o wide
|
|
||||||
echo ""
|
|
||||||
echo "=== DESCRIBE DEPLOYMENT ==="
|
|
||||||
kubectl describe deployment team-planner-backend -n $APP_NAMESPACE
|
|
||||||
echo ""
|
|
||||||
echo "=== RECENT EVENTS ==="
|
|
||||||
kubectl get events -n $APP_NAMESPACE --sort-by='.lastTimestamp' | tail -30
|
|
||||||
echo ""
|
|
||||||
echo "=== POD LOGS (last 100 lines) ==="
|
|
||||||
POD_NAME=$(kubectl get pods -n $APP_NAMESPACE -l app=team-planner-backend -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
|
||||||
if [ -n "$POD_NAME" ]; then
|
|
||||||
kubectl logs $POD_NAME -n $APP_NAMESPACE --tail=100 2>/dev/null || echo "No logs available"
|
|
||||||
echo ""
|
|
||||||
echo "=== DESCRIBE POD ==="
|
|
||||||
kubectl describe pod $POD_NAME -n $APP_NAMESPACE
|
|
||||||
else
|
|
||||||
echo "No pods found"
|
|
||||||
fi
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
- echo "✅ Backend deployed to PROD (image:$IMAGE_TAG)"
|
|
||||||
|
|
||||||
# --- Развертывание Frontend в PROD ---
|
trigger:
|
||||||
- name: deploy-frontend
|
branch: [main, master]
|
||||||
image: alpine/k8s:1.28.2
|
event: [push]
|
||||||
depends_on:
|
|
||||||
- build-backend
|
|
||||||
- build-frontend
|
|
||||||
when:
|
|
||||||
changeset:
|
|
||||||
includes:
|
|
||||||
- frontend/**
|
|
||||||
- .drone.yml
|
|
||||||
excludes:
|
|
||||||
- frontend/README.md
|
|
||||||
- frontend/**/*.md
|
|
||||||
environment:
|
|
||||||
KUBE_CONFIG_CONTENT:
|
|
||||||
from_secret: KUBE_CONFIG
|
|
||||||
commands:
|
|
||||||
- mkdir -p ~/.kube
|
|
||||||
- echo "$KUBE_CONFIG_CONTENT" > ~/.kube/config
|
|
||||||
- chmod 600 ~/.kube/config
|
|
||||||
- sed -i "s|https://127.0.0.1:6443|https://10.10.10.100:6443|g" ~/.kube/config
|
|
||||||
- export APP_NAMESPACE="team-planner"
|
|
||||||
- export IMAGE_TAG="${DRONE_COMMIT_SHA:0:7}"
|
|
||||||
- export FRONTEND_IMAGE="registry.vigdorov.ru/library/team-planner-frontend"
|
|
||||||
- kubectl cluster-info
|
|
||||||
- sed -e "s|__FRONTEND_IMAGE__|$FRONTEND_IMAGE:$IMAGE_TAG|g" k8s/frontend-deployment.yaml | kubectl apply -n $APP_NAMESPACE -f -
|
|
||||||
- kubectl apply -n $APP_NAMESPACE -f k8s/frontend-service.yaml
|
|
||||||
- echo "📋 Waiting for rollout..."
|
|
||||||
- |
|
|
||||||
if ! kubectl rollout status deployment/team-planner-frontend -n $APP_NAMESPACE --timeout=300s; then
|
|
||||||
echo "❌ Rollout failed! Collecting diagnostics..."
|
|
||||||
echo ""
|
|
||||||
echo "=== DEPLOYMENT STATUS ==="
|
|
||||||
kubectl get deployment team-planner-frontend -n $APP_NAMESPACE -o wide
|
|
||||||
echo ""
|
|
||||||
echo "=== PODS STATUS ==="
|
|
||||||
kubectl get pods -n $APP_NAMESPACE -l app=team-planner-frontend -o wide
|
|
||||||
echo ""
|
|
||||||
echo "=== DESCRIBE DEPLOYMENT ==="
|
|
||||||
kubectl describe deployment team-planner-frontend -n $APP_NAMESPACE
|
|
||||||
echo ""
|
|
||||||
echo "=== RECENT EVENTS ==="
|
|
||||||
kubectl get events -n $APP_NAMESPACE --sort-by='.lastTimestamp' | tail -30
|
|
||||||
echo ""
|
|
||||||
echo "=== POD LOGS (last 100 lines) ==="
|
|
||||||
POD_NAME=$(kubectl get pods -n $APP_NAMESPACE -l app=team-planner-frontend -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
|
||||||
if [ -n "$POD_NAME" ]; then
|
|
||||||
kubectl logs $POD_NAME -n $APP_NAMESPACE --tail=100 2>/dev/null || echo "No logs available"
|
|
||||||
echo ""
|
|
||||||
echo "=== DESCRIBE POD ==="
|
|
||||||
kubectl describe pod $POD_NAME -n $APP_NAMESPACE
|
|
||||||
else
|
|
||||||
echo "No pods found"
|
|
||||||
fi
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
- echo "✅ Frontend deployed to PROD (image:$IMAGE_TAG)"
|
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: pipeline
|
kind: pipeline
|
||||||
type: kubernetes
|
type: kubernetes
|
||||||
name: infra-pipeline
|
name: keycloak-theme
|
||||||
|
|
||||||
# Триггер: запускать только при изменениях в k8s конфигах
|
|
||||||
trigger:
|
|
||||||
branch:
|
|
||||||
- main
|
|
||||||
- master
|
|
||||||
event:
|
|
||||||
- push
|
|
||||||
paths:
|
|
||||||
include:
|
|
||||||
- k8s/**
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# --- Создание секретов (УДАЛИТЬ ПОСЛЕ ПЕРВОГО ДЕПЛОЯ) ---
|
|
||||||
- name: create-secrets
|
|
||||||
image: alpine/k8s:1.28.2
|
|
||||||
environment:
|
|
||||||
KUBE_CONFIG_CONTENT:
|
|
||||||
from_secret: KUBE_CONFIG
|
|
||||||
DB_NAME:
|
|
||||||
from_secret: DB_NAME
|
|
||||||
DB_USER:
|
|
||||||
from_secret: DB_USER
|
|
||||||
DB_PASSWORD:
|
|
||||||
from_secret: DB_PASSWORD
|
|
||||||
commands:
|
|
||||||
- mkdir -p ~/.kube
|
|
||||||
- echo "$KUBE_CONFIG_CONTENT" > ~/.kube/config
|
|
||||||
- chmod 600 ~/.kube/config
|
|
||||||
- sed -i "s|https://127.0.0.1:6443|https://10.10.10.100:6443|g" ~/.kube/config
|
|
||||||
- export APP_NAMESPACE="team-planner"
|
|
||||||
- kubectl create namespace $APP_NAMESPACE --dry-run=client -o yaml | kubectl apply -f -
|
|
||||||
- |
|
|
||||||
kubectl create secret generic team-planner-secrets \
|
|
||||||
--from-literal=db-name="$DB_NAME" \
|
|
||||||
--from-literal=db-user="$DB_USER" \
|
|
||||||
--from-literal=db-password="$DB_PASSWORD" \
|
|
||||||
--namespace=$APP_NAMESPACE \
|
|
||||||
--dry-run=client -o yaml | kubectl apply -f -
|
|
||||||
- echo "✅ Secrets created/updated"
|
|
||||||
|
|
||||||
# --- Развертывание инфраструктуры (PostgreSQL, Services, Ingress) ---
|
|
||||||
- name: deploy-infra
|
|
||||||
image: alpine/k8s:1.28.2
|
|
||||||
depends_on:
|
|
||||||
- create-secrets
|
|
||||||
environment:
|
|
||||||
KUBE_CONFIG_CONTENT:
|
|
||||||
from_secret: KUBE_CONFIG
|
|
||||||
commands:
|
|
||||||
- mkdir -p ~/.kube
|
|
||||||
- echo "$KUBE_CONFIG_CONTENT" > ~/.kube/config
|
|
||||||
- chmod 600 ~/.kube/config
|
|
||||||
- sed -i "s|https://127.0.0.1:6443|https://10.10.10.100:6443|g" ~/.kube/config
|
|
||||||
- export APP_NAMESPACE="team-planner"
|
|
||||||
- export HOSTNAME="team-planner.vigdorov.ru"
|
|
||||||
- export SECRET_NAME="wildcard-cert"
|
|
||||||
- kubectl cluster-info
|
|
||||||
- kubectl create namespace $APP_NAMESPACE --dry-run=client -o yaml | kubectl apply -f -
|
|
||||||
- kubectl apply -n $APP_NAMESPACE -f k8s/postgres-service.yaml
|
|
||||||
- kubectl apply -n $APP_NAMESPACE -f k8s/backend-service.yaml
|
|
||||||
- kubectl apply -n $APP_NAMESPACE -f k8s/frontend-service.yaml
|
|
||||||
- sed -e "s|__HOSTNAME__|$HOSTNAME|g" -e "s|__SECRET_NAME__|$SECRET_NAME|g" k8s/ingress.yaml | kubectl apply -n $APP_NAMESPACE -f -
|
|
||||||
- echo "✅ Infrastructure updated"
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
type: kubernetes
|
|
||||||
name: keycloak-theme-pipeline
|
|
||||||
|
|
||||||
trigger:
|
|
||||||
branch:
|
|
||||||
- main
|
|
||||||
- master
|
|
||||||
event:
|
|
||||||
- push
|
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: shared
|
- name: shared
|
||||||
temp: {}
|
temp: {}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
# --- Шаг 1: Проверка изменений ---
|
- name: check-changes
|
||||||
- name: check-changes
|
|
||||||
image: alpine/git
|
image: alpine/git
|
||||||
volumes:
|
volumes:
|
||||||
- name: shared
|
- name: shared
|
||||||
path: /shared
|
path: /shared
|
||||||
commands:
|
commands:
|
||||||
- |
|
- |
|
||||||
echo "🔍 Checking for changes in keycloak-theme/..."
|
|
||||||
CHANGED_FILES=$(git diff --name-only HEAD~1 HEAD -- keycloak-theme/ 2>/dev/null | grep -v '\.md$' || true)
|
CHANGED_FILES=$(git diff --name-only HEAD~1 HEAD -- keycloak-theme/ 2>/dev/null | grep -v '\.md$' || true)
|
||||||
if [ -z "$CHANGED_FILES" ]; then
|
if [ -z "$CHANGED_FILES" ]; then
|
||||||
echo "✅ No changes in keycloak-theme/ - will skip build and deploy"
|
echo "No changes in keycloak-theme/ - skipping"
|
||||||
touch /shared/.skip
|
touch /shared/.skip
|
||||||
else
|
else
|
||||||
echo "📝 Changed files:"
|
echo "Changed files:"
|
||||||
echo "$CHANGED_FILES"
|
echo "$CHANGED_FILES"
|
||||||
echo "🔨 Will proceed with build and deploy"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# --- Шаг 2: Сборка образа (только если есть изменения) ---
|
- name: build-keycloak-theme
|
||||||
- name: build-keycloak-theme
|
|
||||||
image: gcr.io/kaniko-project/executor:debug
|
image: gcr.io/kaniko-project/executor:debug
|
||||||
depends_on:
|
depends_on: [check-changes]
|
||||||
- check-changes
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: shared
|
- name: shared
|
||||||
path: /shared
|
path: /shared
|
||||||
@ -316,34 +82,25 @@ steps:
|
|||||||
commands:
|
commands:
|
||||||
- |
|
- |
|
||||||
if [ -f /shared/.skip ]; then
|
if [ -f /shared/.skip ]; then
|
||||||
echo "⏭️ Skipping build - no changes in keycloak-theme/"
|
echo "Skipping build"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
- |
|
- |
|
||||||
echo "🔨 Building Keycloak theme image..."
|
|
||||||
export IMAGE_TAG=$(echo $DRONE_COMMIT_SHA | cut -c1-7)
|
export IMAGE_TAG=$(echo $DRONE_COMMIT_SHA | cut -c1-7)
|
||||||
export REGISTRY="registry.vigdorov.ru"
|
export REGISTRY="registry.vigdorov.ru"
|
||||||
export REPO="$REGISTRY/library/keycloak-team-planner"
|
export REPO="$REGISTRY/library/keycloak-team-planner"
|
||||||
|
|
||||||
# Создаём конфиг для kaniko
|
|
||||||
mkdir -p /kaniko/.docker
|
mkdir -p /kaniko/.docker
|
||||||
echo "{\"auths\":{\"$REGISTRY\":{\"username\":\"$HARBOR_USER\",\"password\":\"$HARBOR_PASSWORD\"}}}" > /kaniko/.docker/config.json
|
echo "{\"auths\":{\"$REGISTRY\":{\"username\":\"$HARBOR_USER\",\"password\":\"$HARBOR_PASSWORD\"}}}" > /kaniko/.docker/config.json
|
||||||
|
|
||||||
/kaniko/executor \
|
/kaniko/executor \
|
||||||
--dockerfile=keycloak-theme/Dockerfile \
|
--dockerfile=keycloak-theme/Dockerfile \
|
||||||
--context=dir:///drone/src/keycloak-theme \
|
--context=dir:///drone/src/keycloak-theme \
|
||||||
--destination=$REPO:$IMAGE_TAG \
|
--destination=$REPO:$IMAGE_TAG \
|
||||||
--destination=$REPO:26.5.0 \
|
|
||||||
--destination=$REPO:latest \
|
--destination=$REPO:latest \
|
||||||
--cache=false
|
--cache=false
|
||||||
|
|
||||||
echo "✅ Image built: $REPO:$IMAGE_TAG"
|
- name: deploy-keycloak-theme
|
||||||
|
|
||||||
# --- Шаг 3: Деплой (только если есть изменения) ---
|
|
||||||
- name: deploy-keycloak-theme
|
|
||||||
image: alpine/k8s:1.28.2
|
image: alpine/k8s:1.28.2
|
||||||
depends_on:
|
depends_on: [build-keycloak-theme]
|
||||||
- build-keycloak-theme
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: shared
|
- name: shared
|
||||||
path: /shared
|
path: /shared
|
||||||
@ -353,28 +110,18 @@ steps:
|
|||||||
commands:
|
commands:
|
||||||
- |
|
- |
|
||||||
if [ -f /shared/.skip ]; then
|
if [ -f /shared/.skip ]; then
|
||||||
echo "⏭️ Skipping deploy - no changes in keycloak-theme/"
|
echo "Skipping deploy"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
- |
|
- |
|
||||||
echo "🚀 Deploying Keycloak theme..."
|
|
||||||
mkdir -p ~/.kube
|
mkdir -p ~/.kube
|
||||||
echo "$KUBE_CONFIG_CONTENT" > ~/.kube/config
|
echo "$KUBE_CONFIG_CONTENT" > ~/.kube/config
|
||||||
chmod 600 ~/.kube/config
|
chmod 600 ~/.kube/config
|
||||||
sed -i "s|https://127.0.0.1:6443|https://10.10.10.100:6443|g" ~/.kube/config
|
sed -i "s|https://127.0.0.1:6443|https://10.10.10.100:6443|g" ~/.kube/config
|
||||||
|
|
||||||
export KEYCLOAK_NAMESPACE="auth"
|
|
||||||
export IMAGE_TAG=$(echo $DRONE_COMMIT_SHA | cut -c1-7)
|
export IMAGE_TAG=$(echo $DRONE_COMMIT_SHA | cut -c1-7)
|
||||||
export KEYCLOAK_IMAGE="registry.vigdorov.ru/library/keycloak-team-planner:$IMAGE_TAG"
|
kubectl set image statefulset/keycloak-keycloakx keycloak=registry.vigdorov.ru/library/keycloak-team-planner:$IMAGE_TAG -n auth
|
||||||
|
kubectl rollout status statefulset/keycloak-keycloakx -n auth --timeout=180s
|
||||||
|
|
||||||
kubectl cluster-info
|
trigger:
|
||||||
kubectl set image statefulset/keycloak-keycloakx keycloak=$KEYCLOAK_IMAGE -n $KEYCLOAK_NAMESPACE
|
branch: [main, master]
|
||||||
|
event: [push]
|
||||||
echo "📋 Waiting for rollout..."
|
|
||||||
if ! kubectl rollout status statefulset/keycloak-keycloakx -n $KEYCLOAK_NAMESPACE --timeout=180s; then
|
|
||||||
echo "❌ Rollout failed! Collecting diagnostics..."
|
|
||||||
kubectl get pods -n $KEYCLOAK_NAMESPACE -l app.kubernetes.io/name=keycloakx -o wide
|
|
||||||
kubectl describe statefulset keycloak-keycloakx -n $KEYCLOAK_NAMESPACE
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "✅ Keycloak theme deployed (image:$IMAGE_TAG)"
|
|
||||||
|
|||||||
@ -17,6 +17,7 @@
|
|||||||
|
|
||||||
| Дата | Что сделано |
|
| Дата | Что сделано |
|
||||||
|------|-------------|
|
|------|-------------|
|
||||||
|
| 2026-02-08 | **Инфра:** Миграция CI/CD на ci-templates (service.yaml + .drone.yml), удалены Dockerfile/k8s/nginx/docker-compose |
|
||||||
| 2025-12-29 | Созданы REQUIREMENTS.md, CLAUDE.md, CONTEXT.md |
|
| 2025-12-29 | Созданы REQUIREMENTS.md, CLAUDE.md, CONTEXT.md |
|
||||||
| 2025-12-29 | Создан ARCHITECTURE.md (C4, sequences, API, UI prototypes, спецификация) |
|
| 2025-12-29 | Создан ARCHITECTURE.md (C4, sequences, API, UI prototypes, спецификация) |
|
||||||
| 2025-12-29 | Создан ROADMAP.md — план разработки по фазам |
|
| 2025-12-29 | Создан ROADMAP.md — план разработки по фазам |
|
||||||
|
|||||||
@ -1,48 +0,0 @@
|
|||||||
# Build stage
|
|
||||||
FROM node:20-alpine AS builder
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copy package files
|
|
||||||
COPY package*.json ./
|
|
||||||
|
|
||||||
# Install ALL dependencies (including devDependencies for build)
|
|
||||||
RUN npm install --include=dev
|
|
||||||
|
|
||||||
# Copy source code
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build the application
|
|
||||||
RUN npm run build
|
|
||||||
|
|
||||||
# Production stage
|
|
||||||
FROM node:20-alpine
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Install only production dependencies
|
|
||||||
COPY package*.json ./
|
|
||||||
RUN npm install --only=production && npm cache clean --force
|
|
||||||
|
|
||||||
# Copy built application from builder
|
|
||||||
COPY --from=builder /app/dist ./dist
|
|
||||||
|
|
||||||
# Create non-root user
|
|
||||||
RUN addgroup -g 1001 -S nodejs && \
|
|
||||||
adduser -S nestjs -u 1001
|
|
||||||
|
|
||||||
# Change ownership
|
|
||||||
RUN chown -R nestjs:nodejs /app
|
|
||||||
|
|
||||||
# Switch to non-root user
|
|
||||||
USER nestjs
|
|
||||||
|
|
||||||
# Expose port
|
|
||||||
EXPOSE 4001
|
|
||||||
|
|
||||||
# Health check
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=40s \
|
|
||||||
CMD node -e "require('http').get('http://localhost:4001/api/health', (r) => {process.exit(r.statusCode === 200 ? 0 : 1)})"
|
|
||||||
|
|
||||||
# Start the application
|
|
||||||
CMD ["node", "dist/main"]
|
|
||||||
@ -1,8 +0,0 @@
|
|||||||
version: '3.8'
|
|
||||||
|
|
||||||
# PostgreSQL предоставляется shared dev-инстансом на сервере (10.10.10.100:30432)
|
|
||||||
# Для подключения используйте переменные из .env:
|
|
||||||
# DB_HOST=10.10.10.100
|
|
||||||
# DB_PORT=30432
|
|
||||||
#
|
|
||||||
# Этот файл оставлен для возможного добавления других локальных сервисов.
|
|
||||||
@ -1,46 +0,0 @@
|
|||||||
# Build stage
|
|
||||||
FROM node:20-alpine AS builder
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copy package files
|
|
||||||
COPY package*.json ./
|
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
RUN npm install
|
|
||||||
|
|
||||||
# Copy source code
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build arguments
|
|
||||||
ARG VITE_API_URL=""
|
|
||||||
ARG VITE_KEYCLOAK_URL="https://auth.vigdorov.ru"
|
|
||||||
ARG VITE_KEYCLOAK_REALM="team-planner"
|
|
||||||
ARG VITE_KEYCLOAK_CLIENT_ID="team-planner-frontend"
|
|
||||||
|
|
||||||
ENV VITE_API_URL=$VITE_API_URL
|
|
||||||
ENV VITE_KEYCLOAK_URL=$VITE_KEYCLOAK_URL
|
|
||||||
ENV VITE_KEYCLOAK_REALM=$VITE_KEYCLOAK_REALM
|
|
||||||
ENV VITE_KEYCLOAK_CLIENT_ID=$VITE_KEYCLOAK_CLIENT_ID
|
|
||||||
|
|
||||||
# Build the application
|
|
||||||
RUN npm run build
|
|
||||||
|
|
||||||
# Production stage
|
|
||||||
FROM nginx:alpine
|
|
||||||
|
|
||||||
# Copy built files from builder
|
|
||||||
COPY --from=builder /app/dist /usr/share/nginx/html
|
|
||||||
|
|
||||||
# Copy nginx configuration
|
|
||||||
COPY nginx.conf /etc/nginx/nginx.conf
|
|
||||||
|
|
||||||
# Expose port
|
|
||||||
EXPOSE 80
|
|
||||||
|
|
||||||
# Health check
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s \
|
|
||||||
CMD wget --quiet --tries=1 --spider http://localhost/health || exit 1
|
|
||||||
|
|
||||||
# Start nginx
|
|
||||||
CMD ["nginx", "-g", "daemon off;"]
|
|
||||||
@ -1,62 +0,0 @@
|
|||||||
events {}
|
|
||||||
|
|
||||||
http {
|
|
||||||
include /etc/nginx/mime.types;
|
|
||||||
default_type application/octet-stream;
|
|
||||||
|
|
||||||
# Logging
|
|
||||||
access_log /var/log/nginx/access.log;
|
|
||||||
error_log /var/log/nginx/error.log;
|
|
||||||
|
|
||||||
# Performance
|
|
||||||
sendfile on;
|
|
||||||
tcp_nopush on;
|
|
||||||
tcp_nodelay on;
|
|
||||||
keepalive_timeout 65;
|
|
||||||
types_hash_max_size 2048;
|
|
||||||
|
|
||||||
# Gzip compression
|
|
||||||
gzip on;
|
|
||||||
gzip_vary on;
|
|
||||||
gzip_min_length 1024;
|
|
||||||
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml+rss application/json application/javascript;
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 80;
|
|
||||||
server_name _;
|
|
||||||
|
|
||||||
root /usr/share/nginx/html;
|
|
||||||
index index.html;
|
|
||||||
|
|
||||||
# Health check endpoint for k8s
|
|
||||||
location /health {
|
|
||||||
access_log off;
|
|
||||||
return 200 "healthy\n";
|
|
||||||
add_header Content-Type text/plain;
|
|
||||||
}
|
|
||||||
|
|
||||||
# Proxy API requests to backend
|
|
||||||
location /api/ {
|
|
||||||
proxy_pass http://team-planner-backend-service:4001;
|
|
||||||
proxy_http_version 1.1;
|
|
||||||
proxy_set_header Upgrade $http_upgrade;
|
|
||||||
proxy_set_header Connection 'upgrade';
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_cache_bypass $http_upgrade;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
||||||
proxy_set_header X-Forwarded-Proto $scheme;
|
|
||||||
}
|
|
||||||
|
|
||||||
# SPA fallback - all routes go to index.html
|
|
||||||
location / {
|
|
||||||
try_files $uri $uri/ /index.html;
|
|
||||||
}
|
|
||||||
|
|
||||||
# Static assets with caching
|
|
||||||
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
|
|
||||||
expires 1y;
|
|
||||||
add_header Cache-Control "public, immutable";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,75 +0,0 @@
|
|||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: team-planner-backend
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: team-planner-backend
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: team-planner-backend
|
|
||||||
spec:
|
|
||||||
imagePullSecrets:
|
|
||||||
- name: harbor-creds
|
|
||||||
containers:
|
|
||||||
- name: team-planner-backend
|
|
||||||
image: __BACKEND_IMAGE__
|
|
||||||
ports:
|
|
||||||
- containerPort: 4001
|
|
||||||
env:
|
|
||||||
- name: NODE_ENV
|
|
||||||
value: "production"
|
|
||||||
- name: PORT
|
|
||||||
value: "4001"
|
|
||||||
- name: DB_HOST
|
|
||||||
value: "shared-postgres-service.shared-db.svc.cluster.local"
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "5432"
|
|
||||||
- name: DB_DATABASE
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: team-planner-secrets
|
|
||||||
key: db-name
|
|
||||||
- name: DB_USERNAME
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: team-planner-secrets
|
|
||||||
key: db-user
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: team-planner-secrets
|
|
||||||
key: db-password
|
|
||||||
- name: KEYCLOAK_REALM_URL
|
|
||||||
value: "https://auth.vigdorov.ru/realms/team-planner"
|
|
||||||
- name: AI_PROXY_BASE_URL
|
|
||||||
value: "http://ai-proxy-service.ai-proxy.svc.cluster.local:3000"
|
|
||||||
- name: AI_PROXY_API_KEY
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: team-planner-secrets
|
|
||||||
key: ai-proxy-api-key
|
|
||||||
- name: NATS_URL
|
|
||||||
value: "nats://nats.nats.svc:4222"
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: "256Mi"
|
|
||||||
cpu: "250m"
|
|
||||||
limits:
|
|
||||||
memory: "512Mi"
|
|
||||||
cpu: "500m"
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /api/health
|
|
||||||
port: 4001
|
|
||||||
initialDelaySeconds: 30
|
|
||||||
periodSeconds: 10
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /api/health
|
|
||||||
port: 4001
|
|
||||||
initialDelaySeconds: 10
|
|
||||||
periodSeconds: 5
|
|
||||||
@ -1,12 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: team-planner-backend-service
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: team-planner-backend
|
|
||||||
ports:
|
|
||||||
- protocol: TCP
|
|
||||||
port: 4001
|
|
||||||
targetPort: 4001
|
|
||||||
type: ClusterIP
|
|
||||||
@ -1,40 +0,0 @@
|
|||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: team-planner-frontend
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: team-planner-frontend
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: team-planner-frontend
|
|
||||||
spec:
|
|
||||||
imagePullSecrets:
|
|
||||||
- name: harbor-creds
|
|
||||||
containers:
|
|
||||||
- name: team-planner-frontend
|
|
||||||
image: __FRONTEND_IMAGE__
|
|
||||||
ports:
|
|
||||||
- containerPort: 80
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: "64Mi"
|
|
||||||
cpu: "100m"
|
|
||||||
limits:
|
|
||||||
memory: "128Mi"
|
|
||||||
cpu: "200m"
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /health
|
|
||||||
port: 80
|
|
||||||
initialDelaySeconds: 10
|
|
||||||
periodSeconds: 10
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /health
|
|
||||||
port: 80
|
|
||||||
initialDelaySeconds: 5
|
|
||||||
periodSeconds: 5
|
|
||||||
@ -1,12 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: team-planner-frontend-service
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: team-planner-frontend
|
|
||||||
ports:
|
|
||||||
- protocol: TCP
|
|
||||||
port: 80
|
|
||||||
targetPort: 80
|
|
||||||
type: ClusterIP
|
|
||||||
@ -1,33 +0,0 @@
|
|||||||
apiVersion: networking.k8s.io/v1
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: team-planner-ingress
|
|
||||||
annotations:
|
|
||||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
|
||||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
|
||||||
spec:
|
|
||||||
ingressClassName: traefik
|
|
||||||
tls:
|
|
||||||
- hosts:
|
|
||||||
- __HOSTNAME__
|
|
||||||
secretName: __SECRET_NAME__
|
|
||||||
rules:
|
|
||||||
- host: __HOSTNAME__
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
# Backend API routes
|
|
||||||
- path: /api
|
|
||||||
pathType: Prefix
|
|
||||||
backend:
|
|
||||||
service:
|
|
||||||
name: team-planner-backend-service
|
|
||||||
port:
|
|
||||||
number: 4001
|
|
||||||
# Frontend routes (all other paths)
|
|
||||||
- path: /
|
|
||||||
pathType: Prefix
|
|
||||||
backend:
|
|
||||||
service:
|
|
||||||
name: team-planner-frontend-service
|
|
||||||
port:
|
|
||||||
number: 80
|
|
||||||
@ -1,12 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: postgres-service
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: postgres
|
|
||||||
ports:
|
|
||||||
- protocol: TCP
|
|
||||||
port: 5432
|
|
||||||
targetPort: 5432
|
|
||||||
type: ClusterIP
|
|
||||||
@ -1,16 +0,0 @@
|
|||||||
# This is an example file. Create the actual secrets.yaml with your real values
|
|
||||||
# DO NOT commit secrets.yaml to git!
|
|
||||||
#
|
|
||||||
# To create the secrets in your cluster, run:
|
|
||||||
# kubectl create -f secrets.yaml -n prod-ns
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: team-planner-secrets
|
|
||||||
type: Opaque
|
|
||||||
stringData:
|
|
||||||
# PostgreSQL credentials
|
|
||||||
db-name: "teamplanner"
|
|
||||||
db-user: "teamplanner"
|
|
||||||
db-password: "CHANGE_ME_STRONG_PASSWORD"
|
|
||||||
45
service.yaml
Normal file
45
service.yaml
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
service:
|
||||||
|
name: team-planner
|
||||||
|
type: full-stack
|
||||||
|
|
||||||
|
backend:
|
||||||
|
context: .
|
||||||
|
port: 4001
|
||||||
|
healthcheck: /api/health
|
||||||
|
resources:
|
||||||
|
cpu: 250m
|
||||||
|
memory: 256Mi
|
||||||
|
|
||||||
|
frontend:
|
||||||
|
context: .
|
||||||
|
buildArgs:
|
||||||
|
VITE_KEYCLOAK_URL: "https://auth.vigdorov.ru"
|
||||||
|
VITE_KEYCLOAK_REALM: "team-planner"
|
||||||
|
VITE_KEYCLOAK_CLIENT_ID: "team-planner-frontend"
|
||||||
|
|
||||||
|
deploy:
|
||||||
|
namespace: team-planner
|
||||||
|
domain: team-planner.vigdorov.ru
|
||||||
|
|
||||||
|
infrastructure:
|
||||||
|
postgres: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
- name: NODE_ENV
|
||||||
|
value: "production"
|
||||||
|
- name: PORT
|
||||||
|
value: "4001"
|
||||||
|
- name: DB_HOST
|
||||||
|
value: "shared-postgres-service.shared-db.svc.cluster.local"
|
||||||
|
- name: DB_PORT
|
||||||
|
value: "5432"
|
||||||
|
- name: KEYCLOAK_REALM_URL
|
||||||
|
value: "https://auth.vigdorov.ru/realms/team-planner"
|
||||||
|
- name: AI_PROXY_BASE_URL
|
||||||
|
value: "http://ai-proxy-service.ai-proxy.svc.cluster.local:3000"
|
||||||
|
- name: NATS_URL
|
||||||
|
value: "nats://nats.nats:4222"
|
||||||
|
|
||||||
|
envFrom:
|
||||||
|
- secretRef:
|
||||||
|
name: team-planner-secrets
|
||||||
Reference in New Issue
Block a user