Add VPS deployment workflow
This commit is contained in:
parent
280247e1e5
commit
df7e4de7f9
5 changed files with 238 additions and 4 deletions
22
.gitea/workflows/deploy.yml
Normal file
22
.gitea/workflows/deploy.yml
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
name: Deploy to BrowserUse VPS
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- feature/api-for-subagent
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: deploy-vps
|
||||||
|
env:
|
||||||
|
DEPLOY_DIR: /home/BrowserUse-vps/apps/BrowserUse_and_ComputerUse_skills
|
||||||
|
DEPLOY_BRANCH: feature/api-for-subagent
|
||||||
|
HEALTH_URL: http://127.0.0.1:8088/health
|
||||||
|
steps:
|
||||||
|
- name: Deploy Docker Compose stack
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -Eeuo pipefail
|
||||||
|
cd "$DEPLOY_DIR"
|
||||||
|
bash scripts/deploy_vps.sh
|
||||||
14
docker-compose.vps.yml
Normal file
14
docker-compose.vps.yml
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
services:
|
||||||
|
browser-api:
|
||||||
|
networks:
|
||||||
|
- browser-net
|
||||||
|
- lambdalab_frontend
|
||||||
|
|
||||||
|
browser-view-proxy:
|
||||||
|
networks:
|
||||||
|
- browser-net
|
||||||
|
- lambdalab_frontend
|
||||||
|
|
||||||
|
networks:
|
||||||
|
lambdalab_frontend:
|
||||||
|
external: true
|
||||||
|
|
@ -12,8 +12,8 @@ services:
|
||||||
- BROWSER_USE_RPC_HOST=0.0.0.0
|
- BROWSER_USE_RPC_HOST=0.0.0.0
|
||||||
- BROWSER_USE_RPC_PORT=8787
|
- BROWSER_USE_RPC_PORT=8787
|
||||||
ports:
|
ports:
|
||||||
- "6080:6080"
|
- "${BROWSER_NOVNC_PUBLISH:-6080:6080}"
|
||||||
- "9222:9222"
|
- "${BROWSER_CDP_PUBLISH:-9222:9222}"
|
||||||
networks:
|
networks:
|
||||||
browser-net:
|
browser-net:
|
||||||
aliases:
|
aliases:
|
||||||
|
|
@ -54,7 +54,7 @@ services:
|
||||||
browser:
|
browser:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
ports:
|
ports:
|
||||||
- "8088:8088"
|
- "${BROWSER_API_PUBLISH:-8088:8088}"
|
||||||
volumes:
|
volumes:
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
healthcheck:
|
healthcheck:
|
||||||
|
|
@ -80,7 +80,7 @@ services:
|
||||||
browser:
|
browser:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
ports:
|
ports:
|
||||||
- "6081:8080"
|
- "${BROWSER_VIEW_PROXY_PUBLISH:-6081:8080}"
|
||||||
restart: always
|
restart: always
|
||||||
networks:
|
networks:
|
||||||
- browser-net
|
- browser-net
|
||||||
|
|
|
||||||
133
docs/vps-deploy.md
Normal file
133
docs/vps-deploy.md
Normal file
|
|
@ -0,0 +1,133 @@
|
||||||
|
# BrowserUse VPS Deployment
|
||||||
|
|
||||||
|
This project deploys to `BrowserUse-vps@lambda.coredump.ru` with a Gitea/Forgejo Actions runner installed on the VPS.
|
||||||
|
|
||||||
|
The server already has a root-owned `/opt/lambdalab` stack with Caddy on ports `80/443`. Keep this browser service as a separate app under the deploy user home directory, then attach the public-facing containers to the existing `lambdalab_frontend` Docker network through `docker-compose.vps.yml`.
|
||||||
|
|
||||||
|
## SSH Access
|
||||||
|
|
||||||
|
Add the public SSH key to the VPS user:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
ssh BrowserUse-vps@lambda.coredump.ru
|
||||||
|
mkdir -p ~/.ssh
|
||||||
|
chmod 700 ~/.ssh
|
||||||
|
printf '%s\n' '<ssh-ed25519 public key>' >> ~/.ssh/authorized_keys
|
||||||
|
chmod 600 ~/.ssh/authorized_keys
|
||||||
|
```
|
||||||
|
|
||||||
|
The fingerprint `SHA256:/XC5ifPX8j+uRyp0Yw2zAl5nteWc3YcHeVHfCG+rhP4` is not enough by itself. `authorized_keys` needs the full public key line that starts with `ssh-ed25519`.
|
||||||
|
|
||||||
|
## Initial Server Checkout
|
||||||
|
|
||||||
|
Run once on the VPS:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
mkdir -p ~/apps
|
||||||
|
cd ~/apps
|
||||||
|
git clone -b feature/api-for-subagent https://git.lambda.coredump.ru/APEX/BrowserUse_and_ComputerUse_skills.git
|
||||||
|
cd BrowserUse_and_ComputerUse_skills
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a server-local `.env` file in the checkout. It is intentionally not committed:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
OPENAI_API_KEY=...
|
||||||
|
OPENAI_BASE_URL=...
|
||||||
|
MODEL_DEFAULT=qwen3.5-122b
|
||||||
|
BROWSER_VIEW_BASE_URL=https://browser-view.lambda.coredump.ru
|
||||||
|
BROWSER_API_PUBLISH=127.0.0.1:8088:8088
|
||||||
|
BROWSER_VIEW_PROXY_PUBLISH=127.0.0.1:6081:8080
|
||||||
|
BROWSER_NOVNC_PUBLISH=127.0.0.1:6080:6080
|
||||||
|
BROWSER_CDP_PUBLISH=127.0.0.1:9222:9222
|
||||||
|
```
|
||||||
|
|
||||||
|
Then run the first deploy manually:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
bash scripts/deploy_vps.sh
|
||||||
|
curl -fsS http://127.0.0.1:8088/health
|
||||||
|
```
|
||||||
|
|
||||||
|
The deploy script uses both Compose files by default:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker-compose.yml:docker-compose.vps.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
`docker-compose.vps.yml` connects `browser-api` and `browser-view-proxy` to the existing external `lambdalab_frontend` network so Caddy can reach them by Docker DNS.
|
||||||
|
|
||||||
|
## Domain Binding
|
||||||
|
|
||||||
|
The active Caddy config is root-owned at:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
/opt/lambdalab/caddy/Caddyfile
|
||||||
|
```
|
||||||
|
|
||||||
|
Add these vhosts to that file from an admin/root account:
|
||||||
|
|
||||||
|
```caddyfile
|
||||||
|
browser-api.lambda.coredump.ru {
|
||||||
|
reverse_proxy browser-use-api:8088
|
||||||
|
}
|
||||||
|
|
||||||
|
browser-view.lambda.coredump.ru {
|
||||||
|
reverse_proxy browser-use-view-proxy:8080
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Then reload the existing Caddy container from `/opt/lambdalab`:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd /opt/lambdalab
|
||||||
|
docker compose exec caddy caddy reload --config /etc/caddy/Caddyfile
|
||||||
|
```
|
||||||
|
|
||||||
|
DNS must point both subdomains to the VPS public IP `155.212.185.120`. At inspection time, `lambda.coredump.ru` resolved to that IP, while `browser-api.lambda.coredump.ru` and `browser-view.lambda.coredump.ru` did not resolve yet.
|
||||||
|
|
||||||
|
## Gitea/Forgejo Runner
|
||||||
|
|
||||||
|
Install `act_runner` as the `BrowserUse-vps` user and register it with the repository, organization, or instance runner token:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
mkdir -p ~/act_runner
|
||||||
|
cd ~/act_runner
|
||||||
|
./act_runner generate-config > config.yaml
|
||||||
|
./act_runner --config config.yaml register \
|
||||||
|
--no-interactive \
|
||||||
|
--instance https://git.lambda.coredump.ru \
|
||||||
|
--token '<runner-registration-token>' \
|
||||||
|
--name BrowserUse-vps \
|
||||||
|
--labels deploy-vps:host
|
||||||
|
```
|
||||||
|
|
||||||
|
Start it under the same user:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd ~/act_runner
|
||||||
|
nohup ./act_runner daemon --config config.yaml > act_runner.log 2>&1 &
|
||||||
|
```
|
||||||
|
|
||||||
|
Because this account has `sudo: no`, a system-wide service cannot be installed from this user. If an admin enables a user-level systemd service for this account, run the same daemon command from that service instead of `nohup`.
|
||||||
|
|
||||||
|
## CI/CD Behavior
|
||||||
|
|
||||||
|
The workflow lives at `.gitea/workflows/deploy.yml`.
|
||||||
|
|
||||||
|
It runs on:
|
||||||
|
|
||||||
|
- push to `feature/api-for-subagent`
|
||||||
|
- manual `workflow_dispatch`
|
||||||
|
|
||||||
|
The job expects a runner label named `deploy-vps`, registered as `deploy-vps:host`. It enters:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
/home/BrowserUse-vps/apps/BrowserUse_and_ComputerUse_skills
|
||||||
|
```
|
||||||
|
|
||||||
|
Then it fetches `origin/feature/api-for-subagent`, resets the tracked checkout to that commit, runs Docker Compose, and verifies:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -fsS http://127.0.0.1:8088/health
|
||||||
|
```
|
||||||
65
scripts/deploy_vps.sh
Executable file
65
scripts/deploy_vps.sh
Executable file
|
|
@ -0,0 +1,65 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -Eeuo pipefail
|
||||||
|
|
||||||
|
DEPLOY_BRANCH="${DEPLOY_BRANCH:-feature/api-for-subagent}"
|
||||||
|
HEALTH_URL="${HEALTH_URL:-http://127.0.0.1:8088/health}"
|
||||||
|
COMPOSE_FILES="${COMPOSE_FILES:-docker-compose.yml:docker-compose.vps.yml}"
|
||||||
|
|
||||||
|
log() {
|
||||||
|
printf '[deploy] %s\n' "$*"
|
||||||
|
}
|
||||||
|
|
||||||
|
fail() {
|
||||||
|
printf '[deploy] fatal: %s\n' "$*" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
command -v git >/dev/null 2>&1 || fail "git is not installed"
|
||||||
|
command -v docker >/dev/null 2>&1 || fail "docker is not installed"
|
||||||
|
command -v curl >/dev/null 2>&1 || fail "curl is not installed"
|
||||||
|
docker compose version >/dev/null 2>&1 || fail "docker compose plugin is not available"
|
||||||
|
|
||||||
|
[ -d .git ] || fail "current directory is not a git checkout"
|
||||||
|
[ -f docker-compose.yml ] || fail "docker-compose.yml not found in current directory"
|
||||||
|
[ -f .env ] || fail ".env is missing; create it on the VPS with OPENAI_API_KEY and related runtime settings"
|
||||||
|
|
||||||
|
compose_args=()
|
||||||
|
IFS=':' read -r -a compose_files <<< "$COMPOSE_FILES"
|
||||||
|
for compose_file in "${compose_files[@]}"; do
|
||||||
|
if [ -f "$compose_file" ]; then
|
||||||
|
compose_args+=("-f" "$compose_file")
|
||||||
|
else
|
||||||
|
fail "compose file not found: ${compose_file}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
log "fetching origin/${DEPLOY_BRANCH}"
|
||||||
|
git fetch --prune origin "+refs/heads/${DEPLOY_BRANCH}:refs/remotes/origin/${DEPLOY_BRANCH}"
|
||||||
|
|
||||||
|
log "checking out ${DEPLOY_BRANCH}"
|
||||||
|
git checkout -B "$DEPLOY_BRANCH" "origin/$DEPLOY_BRANCH"
|
||||||
|
git reset --hard "origin/$DEPLOY_BRANCH"
|
||||||
|
|
||||||
|
log "building Docker Compose services"
|
||||||
|
docker compose "${compose_args[@]}" build
|
||||||
|
|
||||||
|
log "starting Docker Compose stack"
|
||||||
|
docker compose "${compose_args[@]}" up -d --remove-orphans
|
||||||
|
|
||||||
|
log "current service state"
|
||||||
|
docker compose "${compose_args[@]}" ps
|
||||||
|
|
||||||
|
log "waiting for API health at ${HEALTH_URL}"
|
||||||
|
for attempt in {1..30}; do
|
||||||
|
if curl -fsS "$HEALTH_URL" >/dev/null; then
|
||||||
|
log "API is healthy"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "health check failed, retry ${attempt}/30"
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
|
||||||
|
log "API did not become healthy; browser-api logs follow"
|
||||||
|
docker compose "${compose_args[@]}" logs --tail=120 browser-api || true
|
||||||
|
fail "health check failed: ${HEALTH_URL}"
|
||||||
Loading…
Add table
Add a link
Reference in a new issue