migrate to ci-templates
Some checks failed
continuous-integration/drone/push Build is failing

This commit is contained in:
2026-02-08 14:11:40 +03:00
parent 75015c1c85
commit 593c573985
14 changed files with 153 additions and 724 deletions

View File

@ -1,380 +1,127 @@
---
## Universal .drone.yml for all project types
## Configure your project via service.yaml (see ci-templates/docs/requirements.md)
kind: pipeline
type: kubernetes
name: main-pipeline
# Триггер: запускать при изменениях в backend, frontend или .drone.yml
trigger:
branch:
- main
- master
event:
- push
name: ci
steps:
# ============================================================
# СБОРКА ОБРАЗОВ (параллельно)
# ============================================================
- name: prepare
image: alpine:3.19
environment:
GITEA_TOKEN:
from_secret: GITEA_TOKEN
commands:
- apk add --no-cache git bash yq
- git clone --depth 1 https://token:$GITEA_TOKEN@git.vigdorov.ru/vigdorov/ci-templates.git .ci
- chmod +x .ci/scripts/*.sh
- bash .ci/scripts/prepare.sh
# --- Сборка Backend образа ---
- name: build-backend
image: plugins/kaniko
when:
changeset:
includes:
- backend/**
- .drone.yml
excludes:
- backend/README.md
- backend/**/*.md
settings:
registry: registry.vigdorov.ru
repo: registry.vigdorov.ru/library/team-planner-backend
dockerfile: backend/Dockerfile
context: backend
tags:
- ${DRONE_COMMIT_SHA:0:7}
- latest
cache: true
cache_repo: registry.vigdorov.ru/library/team-planner-backend-cache
username:
from_secret: HARBOR_USER
password:
from_secret: HARBOR_PASSWORD
no_push_metadata: true
- name: build
image: gcr.io/kaniko-project/executor:v1.23.2-debug
depends_on: [prepare]
environment:
HARBOR_USER:
from_secret: HARBOR_USER
HARBOR_PASSWORD:
from_secret: HARBOR_PASSWORD
commands:
- /busybox/sh .ci/scripts/build.sh
# --- Сборка Frontend образа (параллельно с backend) ---
- name: build-frontend
image: plugins/kaniko
when:
changeset:
includes:
- frontend/**
- .drone.yml
excludes:
- frontend/README.md
- frontend/**/*.md
settings:
registry: registry.vigdorov.ru
repo: registry.vigdorov.ru/library/team-planner-frontend
dockerfile: frontend/Dockerfile
context: frontend
tags:
- ${DRONE_COMMIT_SHA:0:7}
- latest
cache: true
cache_repo: registry.vigdorov.ru/library/team-planner-frontend-cache
username:
from_secret: HARBOR_USER
password:
from_secret: HARBOR_PASSWORD
no_push_metadata: true
- name: deploy
image: alpine:3.19
depends_on: [build]
environment:
KUBE_CONFIG:
from_secret: KUBE_CONFIG
commands:
- apk add --no-cache bash yq kubectl helm
- bash .ci/scripts/deploy.sh
# ============================================================
# ДЕПЛОЙ (только после завершения ОБЕИХ сборок)
# ============================================================
# --- Развертывание Backend в PROD ---
- name: deploy-backend
image: alpine/k8s:1.28.2
depends_on:
- build-backend
- build-frontend
when:
changeset:
includes:
- backend/**
- .drone.yml
excludes:
- backend/README.md
- backend/**/*.md
environment:
KUBE_CONFIG_CONTENT:
from_secret: KUBE_CONFIG
commands:
- mkdir -p ~/.kube
- echo "$KUBE_CONFIG_CONTENT" > ~/.kube/config
- chmod 600 ~/.kube/config
- sed -i "s|https://127.0.0.1:6443|https://10.10.10.100:6443|g" ~/.kube/config
- export APP_NAMESPACE="team-planner"
- export IMAGE_TAG="${DRONE_COMMIT_SHA:0:7}"
- export BACKEND_IMAGE="registry.vigdorov.ru/library/team-planner-backend"
- kubectl cluster-info
- sed -e "s|__BACKEND_IMAGE__|$BACKEND_IMAGE:$IMAGE_TAG|g" k8s/backend-deployment.yaml | kubectl apply -n $APP_NAMESPACE -f -
- kubectl apply -n $APP_NAMESPACE -f k8s/backend-service.yaml
- echo "📋 Waiting for rollout..."
- echo "=== CURRENT PODS STATE (before rollout) ==="
- kubectl get pods -n $APP_NAMESPACE -l app=team-planner-backend -o wide
- |
if ! kubectl rollout status deployment/team-planner-backend -n $APP_NAMESPACE --timeout=120s; then
echo "❌ Rollout failed! Collecting diagnostics..."
echo ""
echo "=== DEPLOYMENT STATUS ==="
kubectl get deployment team-planner-backend -n $APP_NAMESPACE -o wide
echo ""
echo "=== PODS STATUS ==="
kubectl get pods -n $APP_NAMESPACE -l app=team-planner-backend -o wide
echo ""
echo "=== DESCRIBE DEPLOYMENT ==="
kubectl describe deployment team-planner-backend -n $APP_NAMESPACE
echo ""
echo "=== RECENT EVENTS ==="
kubectl get events -n $APP_NAMESPACE --sort-by='.lastTimestamp' | tail -30
echo ""
echo "=== POD LOGS (last 100 lines) ==="
POD_NAME=$(kubectl get pods -n $APP_NAMESPACE -l app=team-planner-backend -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
if [ -n "$POD_NAME" ]; then
kubectl logs $POD_NAME -n $APP_NAMESPACE --tail=100 2>/dev/null || echo "No logs available"
echo ""
echo "=== DESCRIBE POD ==="
kubectl describe pod $POD_NAME -n $APP_NAMESPACE
else
echo "No pods found"
fi
exit 1
fi
- echo "✅ Backend deployed to PROD (image:$IMAGE_TAG)"
# --- Развертывание Frontend в PROD ---
- name: deploy-frontend
image: alpine/k8s:1.28.2
depends_on:
- build-backend
- build-frontend
when:
changeset:
includes:
- frontend/**
- .drone.yml
excludes:
- frontend/README.md
- frontend/**/*.md
environment:
KUBE_CONFIG_CONTENT:
from_secret: KUBE_CONFIG
commands:
- mkdir -p ~/.kube
- echo "$KUBE_CONFIG_CONTENT" > ~/.kube/config
- chmod 600 ~/.kube/config
- sed -i "s|https://127.0.0.1:6443|https://10.10.10.100:6443|g" ~/.kube/config
- export APP_NAMESPACE="team-planner"
- export IMAGE_TAG="${DRONE_COMMIT_SHA:0:7}"
- export FRONTEND_IMAGE="registry.vigdorov.ru/library/team-planner-frontend"
- kubectl cluster-info
- sed -e "s|__FRONTEND_IMAGE__|$FRONTEND_IMAGE:$IMAGE_TAG|g" k8s/frontend-deployment.yaml | kubectl apply -n $APP_NAMESPACE -f -
- kubectl apply -n $APP_NAMESPACE -f k8s/frontend-service.yaml
- echo "📋 Waiting for rollout..."
- |
if ! kubectl rollout status deployment/team-planner-frontend -n $APP_NAMESPACE --timeout=300s; then
echo "❌ Rollout failed! Collecting diagnostics..."
echo ""
echo "=== DEPLOYMENT STATUS ==="
kubectl get deployment team-planner-frontend -n $APP_NAMESPACE -o wide
echo ""
echo "=== PODS STATUS ==="
kubectl get pods -n $APP_NAMESPACE -l app=team-planner-frontend -o wide
echo ""
echo "=== DESCRIBE DEPLOYMENT ==="
kubectl describe deployment team-planner-frontend -n $APP_NAMESPACE
echo ""
echo "=== RECENT EVENTS ==="
kubectl get events -n $APP_NAMESPACE --sort-by='.lastTimestamp' | tail -30
echo ""
echo "=== POD LOGS (last 100 lines) ==="
POD_NAME=$(kubectl get pods -n $APP_NAMESPACE -l app=team-planner-frontend -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
if [ -n "$POD_NAME" ]; then
kubectl logs $POD_NAME -n $APP_NAMESPACE --tail=100 2>/dev/null || echo "No logs available"
echo ""
echo "=== DESCRIBE POD ==="
kubectl describe pod $POD_NAME -n $APP_NAMESPACE
else
echo "No pods found"
fi
exit 1
fi
- echo "✅ Frontend deployed to PROD (image:$IMAGE_TAG)"
trigger:
branch: [main, master]
event: [push]
---
kind: pipeline
type: kubernetes
name: infra-pipeline
# Триггер: запускать только при изменениях в k8s конфигах
trigger:
branch:
- main
- master
event:
- push
paths:
include:
- k8s/**
steps:
# --- Создание секретов (УДАЛИТЬ ПОСЛЕ ПЕРВОГО ДЕПЛОЯ) ---
- name: create-secrets
image: alpine/k8s:1.28.2
environment:
KUBE_CONFIG_CONTENT:
from_secret: KUBE_CONFIG
DB_NAME:
from_secret: DB_NAME
DB_USER:
from_secret: DB_USER
DB_PASSWORD:
from_secret: DB_PASSWORD
commands:
- mkdir -p ~/.kube
- echo "$KUBE_CONFIG_CONTENT" > ~/.kube/config
- chmod 600 ~/.kube/config
- sed -i "s|https://127.0.0.1:6443|https://10.10.10.100:6443|g" ~/.kube/config
- export APP_NAMESPACE="team-planner"
- kubectl create namespace $APP_NAMESPACE --dry-run=client -o yaml | kubectl apply -f -
- |
kubectl create secret generic team-planner-secrets \
--from-literal=db-name="$DB_NAME" \
--from-literal=db-user="$DB_USER" \
--from-literal=db-password="$DB_PASSWORD" \
--namespace=$APP_NAMESPACE \
--dry-run=client -o yaml | kubectl apply -f -
- echo "✅ Secrets created/updated"
# --- Развертывание инфраструктуры (PostgreSQL, Services, Ingress) ---
- name: deploy-infra
image: alpine/k8s:1.28.2
depends_on:
- create-secrets
environment:
KUBE_CONFIG_CONTENT:
from_secret: KUBE_CONFIG
commands:
- mkdir -p ~/.kube
- echo "$KUBE_CONFIG_CONTENT" > ~/.kube/config
- chmod 600 ~/.kube/config
- sed -i "s|https://127.0.0.1:6443|https://10.10.10.100:6443|g" ~/.kube/config
- export APP_NAMESPACE="team-planner"
- export HOSTNAME="team-planner.vigdorov.ru"
- export SECRET_NAME="wildcard-cert"
- kubectl cluster-info
- kubectl create namespace $APP_NAMESPACE --dry-run=client -o yaml | kubectl apply -f -
- kubectl apply -n $APP_NAMESPACE -f k8s/postgres-service.yaml
- kubectl apply -n $APP_NAMESPACE -f k8s/backend-service.yaml
- kubectl apply -n $APP_NAMESPACE -f k8s/frontend-service.yaml
- sed -e "s|__HOSTNAME__|$HOSTNAME|g" -e "s|__SECRET_NAME__|$SECRET_NAME|g" k8s/ingress.yaml | kubectl apply -n $APP_NAMESPACE -f -
- echo "✅ Infrastructure updated"
---
kind: pipeline
type: kubernetes
name: keycloak-theme-pipeline
trigger:
branch:
- main
- master
event:
- push
name: keycloak-theme
volumes:
- name: shared
temp: {}
steps:
# --- Шаг 1: Проверка изменений ---
- name: check-changes
image: alpine/git
volumes:
- name: shared
path: /shared
commands:
- |
echo "🔍 Checking for changes in keycloak-theme/..."
CHANGED_FILES=$(git diff --name-only HEAD~1 HEAD -- keycloak-theme/ 2>/dev/null | grep -v '\.md$' || true)
if [ -z "$CHANGED_FILES" ]; then
echo "✅ No changes in keycloak-theme/ - will skip build and deploy"
touch /shared/.skip
else
echo "📝 Changed files:"
echo "$CHANGED_FILES"
echo "🔨 Will proceed with build and deploy"
fi
- name: check-changes
image: alpine/git
volumes:
- name: shared
path: /shared
commands:
- |
CHANGED_FILES=$(git diff --name-only HEAD~1 HEAD -- keycloak-theme/ 2>/dev/null | grep -v '\.md$' || true)
if [ -z "$CHANGED_FILES" ]; then
echo "No changes in keycloak-theme/ - skipping"
touch /shared/.skip
else
echo "Changed files:"
echo "$CHANGED_FILES"
fi
# --- Шаг 2: Сборка образа (только если есть изменения) ---
- name: build-keycloak-theme
image: gcr.io/kaniko-project/executor:debug
depends_on:
- check-changes
volumes:
- name: shared
path: /shared
environment:
HARBOR_USER:
from_secret: HARBOR_USER
HARBOR_PASSWORD:
from_secret: HARBOR_PASSWORD
commands:
- |
if [ -f /shared/.skip ]; then
echo "⏭️ Skipping build - no changes in keycloak-theme/"
exit 0
fi
- |
echo "🔨 Building Keycloak theme image..."
export IMAGE_TAG=$(echo $DRONE_COMMIT_SHA | cut -c1-7)
export REGISTRY="registry.vigdorov.ru"
export REPO="$REGISTRY/library/keycloak-team-planner"
- name: build-keycloak-theme
image: gcr.io/kaniko-project/executor:debug
depends_on: [check-changes]
volumes:
- name: shared
path: /shared
environment:
HARBOR_USER:
from_secret: HARBOR_USER
HARBOR_PASSWORD:
from_secret: HARBOR_PASSWORD
commands:
- |
if [ -f /shared/.skip ]; then
echo "Skipping build"
exit 0
fi
- |
export IMAGE_TAG=$(echo $DRONE_COMMIT_SHA | cut -c1-7)
export REGISTRY="registry.vigdorov.ru"
export REPO="$REGISTRY/library/keycloak-team-planner"
mkdir -p /kaniko/.docker
echo "{\"auths\":{\"$REGISTRY\":{\"username\":\"$HARBOR_USER\",\"password\":\"$HARBOR_PASSWORD\"}}}" > /kaniko/.docker/config.json
/kaniko/executor \
--dockerfile=keycloak-theme/Dockerfile \
--context=dir:///drone/src/keycloak-theme \
--destination=$REPO:$IMAGE_TAG \
--destination=$REPO:latest \
--cache=false
# Создаём конфиг для kaniko
mkdir -p /kaniko/.docker
echo "{\"auths\":{\"$REGISTRY\":{\"username\":\"$HARBOR_USER\",\"password\":\"$HARBOR_PASSWORD\"}}}" > /kaniko/.docker/config.json
- name: deploy-keycloak-theme
image: alpine/k8s:1.28.2
depends_on: [build-keycloak-theme]
volumes:
- name: shared
path: /shared
environment:
KUBE_CONFIG_CONTENT:
from_secret: KUBE_CONFIG
commands:
- |
if [ -f /shared/.skip ]; then
echo "Skipping deploy"
exit 0
fi
- |
mkdir -p ~/.kube
echo "$KUBE_CONFIG_CONTENT" > ~/.kube/config
chmod 600 ~/.kube/config
sed -i "s|https://127.0.0.1:6443|https://10.10.10.100:6443|g" ~/.kube/config
export IMAGE_TAG=$(echo $DRONE_COMMIT_SHA | cut -c1-7)
kubectl set image statefulset/keycloak-keycloakx keycloak=registry.vigdorov.ru/library/keycloak-team-planner:$IMAGE_TAG -n auth
kubectl rollout status statefulset/keycloak-keycloakx -n auth --timeout=180s
/kaniko/executor \
--dockerfile=keycloak-theme/Dockerfile \
--context=dir:///drone/src/keycloak-theme \
--destination=$REPO:$IMAGE_TAG \
--destination=$REPO:26.5.0 \
--destination=$REPO:latest \
--cache=false
echo "✅ Image built: $REPO:$IMAGE_TAG"
# --- Шаг 3: Деплой (только если есть изменения) ---
- name: deploy-keycloak-theme
image: alpine/k8s:1.28.2
depends_on:
- build-keycloak-theme
volumes:
- name: shared
path: /shared
environment:
KUBE_CONFIG_CONTENT:
from_secret: KUBE_CONFIG
commands:
- |
if [ -f /shared/.skip ]; then
echo "⏭️ Skipping deploy - no changes in keycloak-theme/"
exit 0
fi
- |
echo "🚀 Deploying Keycloak theme..."
mkdir -p ~/.kube
echo "$KUBE_CONFIG_CONTENT" > ~/.kube/config
chmod 600 ~/.kube/config
sed -i "s|https://127.0.0.1:6443|https://10.10.10.100:6443|g" ~/.kube/config
export KEYCLOAK_NAMESPACE="auth"
export IMAGE_TAG=$(echo $DRONE_COMMIT_SHA | cut -c1-7)
export KEYCLOAK_IMAGE="registry.vigdorov.ru/library/keycloak-team-planner:$IMAGE_TAG"
kubectl cluster-info
kubectl set image statefulset/keycloak-keycloakx keycloak=$KEYCLOAK_IMAGE -n $KEYCLOAK_NAMESPACE
echo "📋 Waiting for rollout..."
if ! kubectl rollout status statefulset/keycloak-keycloakx -n $KEYCLOAK_NAMESPACE --timeout=180s; then
echo "❌ Rollout failed! Collecting diagnostics..."
kubectl get pods -n $KEYCLOAK_NAMESPACE -l app.kubernetes.io/name=keycloakx -o wide
kubectl describe statefulset keycloak-keycloakx -n $KEYCLOAK_NAMESPACE
exit 1
fi
echo "✅ Keycloak theme deployed (image:$IMAGE_TAG)"
trigger:
branch: [main, master]
event: [push]