mirror of
https://gitea.ingwaz.work/Ingwaz/openbrain-mcp.git
synced 2026-03-31 14:49:06 +00:00
Initial public release
This commit is contained in:
213
.gitea/deploy.sh
Executable file
213
.gitea/deploy.sh
Executable file
@@ -0,0 +1,213 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# OpenBrain MCP Deployment Script
|
||||
# Deploys the OpenBrain MCP server to the VPS
|
||||
#
|
||||
# Usage: ./deploy.sh [options]
|
||||
# Options:
|
||||
# --build-local Build on local machine (requires cross-compilation)
|
||||
# --build-remote Build on VPS (default)
|
||||
# --skip-model Skip model download
|
||||
# --restart-only Only restart the service
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
VPS_HOST="${VPS_HOST:-}"
|
||||
VPS_USER="${VPS_USER:-root}"
|
||||
DEPLOY_DIR="/opt/openbrain-mcp"
|
||||
SERVICE_NAME="openbrain-mcp"
|
||||
SSH_KEY="${SSH_KEY:-/tmp/id_ed25519}"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
log_info() { echo -e "${GREEN}[INFO]${NC} $1"; }
|
||||
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
|
||||
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
|
||||
|
||||
# Parse arguments
|
||||
BUILD_REMOTE=true
|
||||
SKIP_MODEL=false
|
||||
RESTART_ONLY=false
|
||||
|
||||
for arg in "$@"; do
|
||||
case $arg in
|
||||
--build-local) BUILD_REMOTE=false ;;
|
||||
--build-remote) BUILD_REMOTE=true ;;
|
||||
--skip-model) SKIP_MODEL=true ;;
|
||||
--restart-only) RESTART_ONLY=true ;;
|
||||
*) log_error "Unknown argument: $arg"; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Get script directory (where .gitea folder is)
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
|
||||
log_info "Project root: $PROJECT_ROOT"
|
||||
if [ -z "$VPS_HOST" ]; then
|
||||
log_error "VPS_HOST is required. Export VPS_HOST before running deploy.sh"
|
||||
exit 1
|
||||
fi
|
||||
log_info "Deploying to: $VPS_USER@$VPS_HOST:$DEPLOY_DIR"
|
||||
|
||||
# SSH command helper
|
||||
ssh_cmd() {
|
||||
ssh -i "$SSH_KEY" -o StrictHostKeyChecking=no "$VPS_USER@$VPS_HOST" "$@"
|
||||
}
|
||||
|
||||
scp_cmd() {
|
||||
scp -i "$SSH_KEY" -o StrictHostKeyChecking=no "$@"
|
||||
}
|
||||
|
||||
# Restart only mode
|
||||
if [ "$RESTART_ONLY" = true ]; then
|
||||
log_info "Restarting service only..."
|
||||
ssh_cmd "systemctl restart $SERVICE_NAME"
|
||||
ssh_cmd "systemctl status $SERVICE_NAME --no-pager"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Step 1: Create deployment directory on VPS
|
||||
log_info "Creating deployment directory on VPS..."
|
||||
ssh_cmd "mkdir -p $DEPLOY_DIR/{src,models,logs,lib,.gitea}"
|
||||
|
||||
# Step 2: Sync source code to VPS
|
||||
log_info "Syncing source code to VPS..."
|
||||
rsync -avz --delete \
|
||||
-e "ssh -i $SSH_KEY -o StrictHostKeyChecking=no" \
|
||||
--exclude 'target/' \
|
||||
--exclude '.git/' \
|
||||
--exclude '.a0proj/' \
|
||||
--exclude 'models/' \
|
||||
--exclude '*.md' \
|
||||
"$PROJECT_ROOT/" \
|
||||
"$VPS_USER@$VPS_HOST:$DEPLOY_DIR/"
|
||||
|
||||
# Step 3: Copy .env if it doesn't exist on VPS
|
||||
if ! ssh_cmd "test -f $DEPLOY_DIR/.env"; then
|
||||
log_warn ".env not found on VPS. Copying .env.example..."
|
||||
ssh_cmd "cp $DEPLOY_DIR/.env.example $DEPLOY_DIR/.env"
|
||||
log_warn "Please edit $DEPLOY_DIR/.env on VPS with actual credentials!"
|
||||
fi
|
||||
|
||||
# Step 4: Download model if needed
|
||||
if [ "$SKIP_MODEL" = false ]; then
|
||||
log_info "Checking/downloading embedding model..."
|
||||
ssh_cmd "bash $DEPLOY_DIR/.gitea/download-model.sh"
|
||||
fi
|
||||
|
||||
# Step 5: Build on VPS
|
||||
if [ "$BUILD_REMOTE" = true ]; then
|
||||
log_info "Building on VPS (this may take a while on first run)..."
|
||||
ssh_cmd "cd $DEPLOY_DIR && \
|
||||
source ~/.cargo/env 2>/dev/null || true && \
|
||||
cargo build --release 2>&1"
|
||||
else
|
||||
log_error "Local cross-compilation not yet implemented"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 5b: Install the built binary where systemd expects it
|
||||
log_info "Installing built binary..."
|
||||
ssh_cmd "cp $DEPLOY_DIR/target/release/openbrain-mcp $DEPLOY_DIR/openbrain-mcp && chmod +x $DEPLOY_DIR/openbrain-mcp"
|
||||
|
||||
# Step 5c: Bootstrap runtime prerequisites
|
||||
log_info "Bootstrapping runtime prerequisites..."
|
||||
ssh -i "$SSH_KEY" -o StrictHostKeyChecking=no "$VPS_USER@$VPS_HOST" \
|
||||
"DEPLOY_DIR=$DEPLOY_DIR SERVICE_USER=openbrain SERVICE_GROUP=openbrain ORT_VERSION=1.24.3 bash -s" <<'EOS'
|
||||
set -euo pipefail
|
||||
|
||||
DEPLOY_DIR="${DEPLOY_DIR:-/opt/openbrain-mcp}"
|
||||
SERVICE_USER="${SERVICE_USER:-openbrain}"
|
||||
SERVICE_GROUP="${SERVICE_GROUP:-openbrain}"
|
||||
ORT_VERSION="${ORT_VERSION:-1.24.3}"
|
||||
|
||||
if command -v apt-get >/dev/null 2>&1; then
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends ca-certificates curl tar libssl3
|
||||
fi
|
||||
|
||||
if ! getent group "$SERVICE_GROUP" >/dev/null 2>&1; then
|
||||
groupadd --system "$SERVICE_GROUP"
|
||||
fi
|
||||
|
||||
if ! id -u "$SERVICE_USER" >/dev/null 2>&1; then
|
||||
useradd --system --gid "$SERVICE_GROUP" --home "$DEPLOY_DIR" --shell /usr/sbin/nologin "$SERVICE_USER"
|
||||
fi
|
||||
|
||||
install -d -m 0755 "$DEPLOY_DIR" "$DEPLOY_DIR/models" "$DEPLOY_DIR/logs" "$DEPLOY_DIR/lib"
|
||||
|
||||
ARCH="$(uname -m)"
|
||||
case "$ARCH" in
|
||||
x86_64) ORT_ARCH="x64" ;;
|
||||
aarch64|arm64) ORT_ARCH="aarch64" ;;
|
||||
*) echo "Unsupported arch: $ARCH"; exit 1 ;;
|
||||
esac
|
||||
|
||||
if [[ ! -f "$DEPLOY_DIR/lib/libonnxruntime.so" ]]; then
|
||||
TMP_DIR="$(mktemp -d)"
|
||||
ORT_TGZ="onnxruntime-linux-${ORT_ARCH}-${ORT_VERSION}.tgz"
|
||||
ORT_URL="https://github.com/microsoft/onnxruntime/releases/download/v${ORT_VERSION}/${ORT_TGZ}"
|
||||
curl -fL "$ORT_URL" -o "$TMP_DIR/$ORT_TGZ"
|
||||
tar -xzf "$TMP_DIR/$ORT_TGZ" -C "$TMP_DIR"
|
||||
ORT_ROOT="$TMP_DIR/onnxruntime-linux-${ORT_ARCH}-${ORT_VERSION}"
|
||||
cp "$ORT_ROOT/lib/libonnxruntime.so" "$DEPLOY_DIR/lib/libonnxruntime.so"
|
||||
cp "$ORT_ROOT/lib/libonnxruntime.so.${ORT_VERSION}" "$DEPLOY_DIR/lib/libonnxruntime.so.${ORT_VERSION}" || true
|
||||
rm -rf "$TMP_DIR"
|
||||
fi
|
||||
|
||||
ENV_FILE="$DEPLOY_DIR/.env"
|
||||
if [[ ! -f "$ENV_FILE" ]]; then
|
||||
if [[ -f "$DEPLOY_DIR/.env.example" ]]; then
|
||||
cp "$DEPLOY_DIR/.env.example" "$ENV_FILE"
|
||||
else
|
||||
touch "$ENV_FILE"
|
||||
fi
|
||||
fi
|
||||
|
||||
upsert_env() {
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
if grep -qE "^${key}=" "$ENV_FILE"; then
|
||||
sed -i "s|^${key}=.*|${key}=${value}|" "$ENV_FILE"
|
||||
else
|
||||
printf '%s=%s\n' "$key" "$value" >> "$ENV_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
upsert_env "OPENBRAIN__EMBEDDING__MODEL_PATH" "$DEPLOY_DIR/models/all-MiniLM-L6-v2"
|
||||
upsert_env "ORT_DYLIB_PATH" "$DEPLOY_DIR/lib/libonnxruntime.so"
|
||||
upsert_env "OPENBRAIN__SERVER__HOST" "0.0.0.0"
|
||||
|
||||
chmod +x "$DEPLOY_DIR/openbrain-mcp" "$DEPLOY_DIR/.gitea/download-model.sh"
|
||||
chown -R "$SERVICE_USER:$SERVICE_GROUP" "$DEPLOY_DIR"
|
||||
EOS
|
||||
|
||||
# Step 5d: Run database migrations with the newly deployed binary
|
||||
log_info "Running database migrations..."
|
||||
ssh_cmd "cd $DEPLOY_DIR && ./openbrain-mcp migrate"
|
||||
|
||||
# Step 6: Install systemd service
|
||||
log_info "Installing systemd service..."
|
||||
scp_cmd "$SCRIPT_DIR/openbrain.service" "$VPS_USER@$VPS_HOST:/etc/systemd/system/$SERVICE_NAME.service"
|
||||
ssh_cmd "systemctl daemon-reload"
|
||||
ssh_cmd "systemctl enable $SERVICE_NAME"
|
||||
|
||||
# Step 7: Restart service
|
||||
log_info "Restarting service..."
|
||||
ssh_cmd "systemctl restart $SERVICE_NAME"
|
||||
sleep 2
|
||||
|
||||
# Step 8: Check status
|
||||
log_info "Checking service status..."
|
||||
ssh_cmd "systemctl status $SERVICE_NAME --no-pager" || true
|
||||
|
||||
log_info "Deployment complete!"
|
||||
log_info "Service URL: http://$VPS_HOST:3100/mcp/health"
|
||||
92
.gitea/download-model.sh
Executable file
92
.gitea/download-model.sh
Executable file
@@ -0,0 +1,92 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Download ONNX embedding model for OpenBrain MCP
|
||||
# Downloads all-MiniLM-L6-v2 from Hugging Face
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
DEPLOY_DIR="${DEPLOY_DIR:-/opt/openbrain-mcp}"
|
||||
MODEL_DIR="$DEPLOY_DIR/models/all-MiniLM-L6-v2"
|
||||
MODEL_NAME="sentence-transformers/all-MiniLM-L6-v2"
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
log_info() { echo -e "${GREEN}[INFO]${NC} $1"; }
|
||||
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
|
||||
|
||||
# Check if model already exists
|
||||
if [ -f "$MODEL_DIR/model.onnx" ] && [ -f "$MODEL_DIR/tokenizer.json" ]; then
|
||||
log_info "Model already exists at $MODEL_DIR"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
log_info "Downloading embedding model to $MODEL_DIR..."
|
||||
mkdir -p "$MODEL_DIR"
|
||||
|
||||
# Method 1: Try using huggingface-cli if available
|
||||
if command -v huggingface-cli &> /dev/null; then
|
||||
log_info "Using huggingface-cli to download model..."
|
||||
huggingface-cli download "$MODEL_NAME" \
|
||||
--local-dir "$MODEL_DIR" \
|
||||
--include "*.onnx" "*.json" "*.txt" \
|
||||
--exclude "*.bin" "*.safetensors" "*.h5"
|
||||
else
|
||||
# Method 2: Direct download from Hugging Face
|
||||
log_info "Downloading directly from Hugging Face..."
|
||||
|
||||
BASE_URL="https://huggingface.co/$MODEL_NAME/resolve/main"
|
||||
|
||||
# Download ONNX model (we need the optimized one)
|
||||
# First try the onnx directory
|
||||
ONNX_URL="https://huggingface.co/$MODEL_NAME/resolve/main/onnx/model.onnx"
|
||||
|
||||
log_info "Downloading model.onnx..."
|
||||
if ! curl -fSL "$ONNX_URL" -o "$MODEL_DIR/model.onnx" 2>/dev/null; then
|
||||
# Fallback: convert from pytorch (requires python)
|
||||
log_warn "ONNX model not found, will need to convert from PyTorch..."
|
||||
log_warn "Installing optimum for ONNX export..."
|
||||
pip install --quiet optimum[exporters] onnx onnxruntime
|
||||
|
||||
python3 << PYEOF
|
||||
from optimum.onnxruntime import ORTModelForFeatureExtraction
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
model = ORTModelForFeatureExtraction.from_pretrained("$MODEL_NAME", export=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained("$MODEL_NAME")
|
||||
|
||||
model.save_pretrained("$MODEL_DIR")
|
||||
tokenizer.save_pretrained("$MODEL_DIR")
|
||||
print("Model exported to ONNX successfully!")
|
||||
PYEOF
|
||||
fi
|
||||
|
||||
# Download tokenizer files
|
||||
log_info "Downloading tokenizer.json..."
|
||||
curl -fSL "$BASE_URL/tokenizer.json" -o "$MODEL_DIR/tokenizer.json" 2>/dev/null || true
|
||||
|
||||
log_info "Downloading tokenizer_config.json..."
|
||||
curl -fSL "$BASE_URL/tokenizer_config.json" -o "$MODEL_DIR/tokenizer_config.json" 2>/dev/null || true
|
||||
|
||||
log_info "Downloading config.json..."
|
||||
curl -fSL "$BASE_URL/config.json" -o "$MODEL_DIR/config.json" 2>/dev/null || true
|
||||
|
||||
log_info "Downloading vocab.txt..."
|
||||
curl -fSL "$BASE_URL/vocab.txt" -o "$MODEL_DIR/vocab.txt" 2>/dev/null || true
|
||||
|
||||
log_info "Downloading special_tokens_map.json..."
|
||||
curl -fSL "$BASE_URL/special_tokens_map.json" -o "$MODEL_DIR/special_tokens_map.json" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Verify download
|
||||
if [ -f "$MODEL_DIR/model.onnx" ]; then
|
||||
MODEL_SIZE=$(du -h "$MODEL_DIR/model.onnx" | cut -f1)
|
||||
log_info "Model downloaded successfully! Size: $MODEL_SIZE"
|
||||
ls -la "$MODEL_DIR/"
|
||||
else
|
||||
log_warn "Warning: model.onnx not found after download"
|
||||
exit 1
|
||||
fi
|
||||
31
.gitea/openbrain.service
Normal file
31
.gitea/openbrain.service
Normal file
@@ -0,0 +1,31 @@
|
||||
[Unit]
|
||||
Description=OpenBrain MCP Server - Vector Memory for AI Agents
|
||||
After=network-online.target postgresql.service
|
||||
Wants=network-online.target postgresql.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=openbrain
|
||||
Group=openbrain
|
||||
WorkingDirectory=/opt/openbrain-mcp
|
||||
EnvironmentFile=/opt/openbrain-mcp/.env
|
||||
ExecStart=/opt/openbrain-mcp/openbrain-mcp
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=openbrain-mcp
|
||||
|
||||
# Security hardening
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/opt/openbrain-mcp /opt/openbrain-mcp/logs /opt/openbrain-mcp/models /opt/openbrain-mcp/lib
|
||||
|
||||
# Resource limits
|
||||
LimitNOFILE=65535
|
||||
MemoryMax=1G
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
188
.gitea/workflows/ci-cd.yaml
Normal file
188
.gitea/workflows/ci-cd.yaml
Normal file
@@ -0,0 +1,188 @@
|
||||
name: OpenBrain MCP Build and Deploy
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
VPS_HOST: ${{ vars.VPS_HOST }}
|
||||
VPS_USER: ${{ vars.VPS_USER }}
|
||||
DEPLOY_DIR: /opt/openbrain-mcp
|
||||
SERVICE_NAME: openbrain-mcp
|
||||
steps:
|
||||
- name: Install prerequisites
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
if command -v apt-get >/dev/null 2>&1; then
|
||||
if command -v sudo >/dev/null 2>&1; then SUDO=sudo; else SUDO=; fi
|
||||
$SUDO apt-get update
|
||||
$SUDO apt-get install -y --no-install-recommends git ca-certificates curl
|
||||
elif command -v apk >/dev/null 2>&1; then
|
||||
apk add --no-cache git ca-certificates curl
|
||||
fi
|
||||
|
||||
- name: Checkout repository
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
git clone --depth 1 "https://${{ github.token }}@gitea.ingwaz.work/${{ github.repository }}.git" .
|
||||
git fetch origin "${{ github.ref }}"
|
||||
git checkout FETCH_HEAD
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
if command -v sudo >/dev/null 2>&1; then SUDO=sudo; else SUDO=; fi
|
||||
$SUDO apt-get install -y --no-install-recommends build-essential pkg-config libssl-dev openssh-client
|
||||
|
||||
- name: Install Rust toolchain
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
|
||||
. "$HOME/.cargo/env"
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
- name: CI checks
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
. "$HOME/.cargo/env"
|
||||
cargo check
|
||||
cargo test --no-run
|
||||
|
||||
- name: Build release
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
. "$HOME/.cargo/env"
|
||||
cargo build --release
|
||||
test -x target/release/openbrain-mcp
|
||||
|
||||
- name: Setup SSH auth
|
||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master'
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
: "${VPS_HOST:?Set repository variable VPS_HOST}"
|
||||
: "${VPS_USER:=root}"
|
||||
install -d -m 700 "$HOME/.ssh"
|
||||
printf '%s\n' "${{ secrets.VPS_SSH_KEY }}" > "$HOME/.ssh/deploy_key"
|
||||
chmod 600 "$HOME/.ssh/deploy_key"
|
||||
ssh-keyscan -H "$VPS_HOST" >> "$HOME/.ssh/known_hosts"
|
||||
|
||||
- name: Deploy artifacts
|
||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master'
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
: "${VPS_HOST:?Set repository variable VPS_HOST}"
|
||||
: "${VPS_USER:=root}"
|
||||
SSH="ssh -i $HOME/.ssh/deploy_key -o IdentitiesOnly=yes"
|
||||
SCP="scp -i $HOME/.ssh/deploy_key -o IdentitiesOnly=yes"
|
||||
|
||||
# Stop service before deploying to avoid "Text file busy" error
|
||||
$SSH "$VPS_USER@$VPS_HOST" "systemctl stop $SERVICE_NAME 2>/dev/null || true"
|
||||
|
||||
$SSH "$VPS_USER@$VPS_HOST" "mkdir -p $DEPLOY_DIR/.gitea $DEPLOY_DIR/models $DEPLOY_DIR/logs $DEPLOY_DIR/lib"
|
||||
|
||||
$SCP target/release/openbrain-mcp "$VPS_USER@$VPS_HOST:$DEPLOY_DIR/openbrain-mcp"
|
||||
$SCP .gitea/openbrain.service "$VPS_USER@$VPS_HOST:/etc/systemd/system/$SERVICE_NAME.service"
|
||||
$SCP .gitea/download-model.sh "$VPS_USER@$VPS_HOST:$DEPLOY_DIR/.gitea/download-model.sh"
|
||||
|
||||
- name: Bootstrap VPS and restart service
|
||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master'
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
: "${VPS_HOST:?Set repository variable VPS_HOST}"
|
||||
: "${VPS_USER:=root}"
|
||||
SSH="ssh -i $HOME/.ssh/deploy_key -o IdentitiesOnly=yes"
|
||||
|
||||
$SSH "$VPS_USER@$VPS_HOST" "DEPLOY_DIR=$DEPLOY_DIR SERVICE_USER=openbrain SERVICE_GROUP=openbrain ORT_VERSION=1.24.3 bash -s" <<'EOS'
|
||||
set -euo pipefail
|
||||
DEPLOY_DIR="${DEPLOY_DIR:-/opt/openbrain-mcp}"
|
||||
SERVICE_USER="${SERVICE_USER:-openbrain}"
|
||||
SERVICE_GROUP="${SERVICE_GROUP:-openbrain}"
|
||||
ORT_VERSION="${ORT_VERSION:-1.24.3}"
|
||||
|
||||
if command -v apt-get >/dev/null 2>&1; then
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends ca-certificates curl tar libssl3
|
||||
fi
|
||||
|
||||
if ! getent group "$SERVICE_GROUP" >/dev/null 2>&1; then
|
||||
groupadd --system "$SERVICE_GROUP"
|
||||
fi
|
||||
if ! id -u "$SERVICE_USER" >/dev/null 2>&1; then
|
||||
useradd --system --gid "$SERVICE_GROUP" --home "$DEPLOY_DIR" --shell /usr/sbin/nologin "$SERVICE_USER"
|
||||
fi
|
||||
|
||||
install -d -m 0755 "$DEPLOY_DIR" "$DEPLOY_DIR/models" "$DEPLOY_DIR/logs" "$DEPLOY_DIR/lib"
|
||||
|
||||
ARCH="$(uname -m)"
|
||||
case "$ARCH" in
|
||||
x86_64) ORT_ARCH="x64" ;;
|
||||
aarch64|arm64) ORT_ARCH="aarch64" ;;
|
||||
*) echo "Unsupported arch: $ARCH"; exit 1 ;;
|
||||
esac
|
||||
|
||||
if [[ ! -f "$DEPLOY_DIR/lib/libonnxruntime.so" ]]; then
|
||||
TMP_DIR="$(mktemp -d)"
|
||||
ORT_TGZ="onnxruntime-linux-${ORT_ARCH}-${ORT_VERSION}.tgz"
|
||||
ORT_URL="https://github.com/microsoft/onnxruntime/releases/download/v${ORT_VERSION}/${ORT_TGZ}"
|
||||
curl -fL "$ORT_URL" -o "$TMP_DIR/$ORT_TGZ"
|
||||
tar -xzf "$TMP_DIR/$ORT_TGZ" -C "$TMP_DIR"
|
||||
ORT_ROOT="$TMP_DIR/onnxruntime-linux-${ORT_ARCH}-${ORT_VERSION}"
|
||||
cp "$ORT_ROOT/lib/libonnxruntime.so" "$DEPLOY_DIR/lib/libonnxruntime.so"
|
||||
cp "$ORT_ROOT/lib/libonnxruntime.so.${ORT_VERSION}" "$DEPLOY_DIR/lib/libonnxruntime.so.${ORT_VERSION}" || true
|
||||
rm -rf "$TMP_DIR"
|
||||
fi
|
||||
|
||||
ENV_FILE="$DEPLOY_DIR/.env"
|
||||
if [[ ! -f "$ENV_FILE" ]]; then
|
||||
if [[ -f "$DEPLOY_DIR/.env.example" ]]; then cp "$DEPLOY_DIR/.env.example" "$ENV_FILE"; else touch "$ENV_FILE"; fi
|
||||
fi
|
||||
|
||||
upsert_env() {
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
if grep -qE "^${key}=" "$ENV_FILE"; then
|
||||
sed -i "s|^${key}=.*|${key}=${value}|" "$ENV_FILE"
|
||||
else
|
||||
printf '%s=%s\n' "$key" "$value" >> "$ENV_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
upsert_env "OPENBRAIN__EMBEDDING__MODEL_PATH" "$DEPLOY_DIR/models/all-MiniLM-L6-v2"
|
||||
upsert_env "ORT_DYLIB_PATH" "$DEPLOY_DIR/lib/libonnxruntime.so"
|
||||
upsert_env "OPENBRAIN__SERVER__HOST" "0.0.0.0"
|
||||
|
||||
chmod +x "$DEPLOY_DIR/openbrain-mcp" "$DEPLOY_DIR/.gitea/download-model.sh"
|
||||
chown -R "$SERVICE_USER:$SERVICE_GROUP" "$DEPLOY_DIR"
|
||||
EOS
|
||||
|
||||
$SSH "$VPS_USER@$VPS_HOST" "DEPLOY_DIR=$DEPLOY_DIR bash $DEPLOY_DIR/.gitea/download-model.sh"
|
||||
$SSH "$VPS_USER@$VPS_HOST" "cd $DEPLOY_DIR && ./openbrain-mcp migrate"
|
||||
|
||||
$SSH "$VPS_USER@$VPS_HOST" "systemctl daemon-reload"
|
||||
$SSH "$VPS_USER@$VPS_HOST" "systemctl enable $SERVICE_NAME"
|
||||
$SSH "$VPS_USER@$VPS_HOST" "systemctl restart $SERVICE_NAME"
|
||||
|
||||
- name: Verify deployment
|
||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master'
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
: "${VPS_HOST:?Set repository variable VPS_HOST}"
|
||||
: "${VPS_USER:=root}"
|
||||
SSH="ssh -i $HOME/.ssh/deploy_key -o IdentitiesOnly=yes"
|
||||
|
||||
sleep 5
|
||||
$SSH "$VPS_USER@$VPS_HOST" "systemctl status $SERVICE_NAME --no-pager || journalctl -u $SERVICE_NAME --no-pager -n 80"
|
||||
curl -fsS "http://$VPS_HOST:3100/health"
|
||||
curl -fsS "http://$VPS_HOST:3100/ready"
|
||||
|
||||
- name: Cleanup SSH key
|
||||
if: always()
|
||||
run: |
|
||||
rm -f "$HOME/.ssh/deploy_key"
|
||||
Reference in New Issue
Block a user