feat: ship blog platform admin and deploy stack
This commit is contained in:
83
deploy/caddy/Caddyfile.tohka.example
Normal file
83
deploy/caddy/Caddyfile.tohka.example
Normal file
@@ -0,0 +1,83 @@
|
||||
# termi-blog / tohka Caddyfile 模板
|
||||
#
|
||||
# 说明:
|
||||
# - 这是“参考模板”,不是已部署配置
|
||||
# - 适合 tohka 上“宿主机大 Caddyfile -> localhost 端口 -> Docker 容器”的模式
|
||||
# - 默认假设:
|
||||
# - frontend 绑定到 localhost:4321
|
||||
# - admin 绑定到 localhost:4322
|
||||
# - backend 绑定到 localhost:5150
|
||||
|
||||
|
||||
# -----------------------------
|
||||
# 方案 A:推荐,子域名分流
|
||||
# -----------------------------
|
||||
|
||||
blog.init.cool {
|
||||
import common
|
||||
reverse_proxy http://localhost:4321
|
||||
}
|
||||
|
||||
admin.blog.init.cool {
|
||||
import common
|
||||
# 推荐:admin 域名整体走 TinyAuth / Pocket ID 保护
|
||||
# tohka 现成片段会转发:
|
||||
# Remote-User / Remote-Name / Remote-Email / Remote-Groups
|
||||
import tinyauth
|
||||
|
||||
# admin 静态资源与后台 API 都走同一受保护域名
|
||||
# 如果 backend 开启了 TERMI_ADMIN_PROXY_SHARED_SECRET,
|
||||
# 记得在转发 /api 到 backend 时补一个共享密钥头,避免直接伪造 Remote-User。
|
||||
handle /api/* {
|
||||
reverse_proxy http://localhost:5150 {
|
||||
header_up X-Termi-Proxy-Secret {$TERMI_ADMIN_PROXY_SHARED_SECRET}
|
||||
}
|
||||
}
|
||||
|
||||
handle {
|
||||
reverse_proxy http://localhost:4322
|
||||
}
|
||||
}
|
||||
|
||||
# 前台公开 API 可单独暴露(评论 / 搜索 / AI 问答等)
|
||||
api.blog.init.cool {
|
||||
import common
|
||||
reverse_proxy http://localhost:5150
|
||||
}
|
||||
|
||||
|
||||
# -----------------------------
|
||||
# 方案 B:单域名 + 路径分流
|
||||
# -----------------------------
|
||||
#
|
||||
# 注意:
|
||||
# 1. /admin 方案要求 admin 构建时设置:
|
||||
# VITE_ADMIN_BASENAME=/admin
|
||||
# 2. /admin 使用 handle_path,进入 admin 容器前会去掉 /admin 前缀
|
||||
# 3. /api 不要用 handle_path;backend 当前路由本身就包含 /api 前缀
|
||||
# 4. 如果 /admin 也要调用受保护 API,需要同时把 /api 接到 backend 并加 tinyauth
|
||||
|
||||
init.cool {
|
||||
import common
|
||||
|
||||
handle_path /admin* {
|
||||
import tinyauth
|
||||
reverse_proxy http://localhost:4322
|
||||
}
|
||||
|
||||
handle /api* {
|
||||
import tinyauth
|
||||
reverse_proxy http://localhost:5150 {
|
||||
header_up X-Termi-Proxy-Secret {$TERMI_ADMIN_PROXY_SHARED_SECRET}
|
||||
}
|
||||
}
|
||||
|
||||
handle {
|
||||
reverse_proxy http://localhost:4321
|
||||
}
|
||||
}
|
||||
|
||||
# 部署时 backend 记得配套:
|
||||
# TERMI_ADMIN_TRUST_PROXY_AUTH=true
|
||||
# TERMI_ADMIN_LOCAL_LOGIN_ENABLED=false
|
||||
# TERMI_ADMIN_PROXY_SHARED_SECRET=<随机长字符串>
|
||||
43
deploy/caddy/Caddyfile.tohka.production.example
Normal file
43
deploy/caddy/Caddyfile.tohka.production.example
Normal file
@@ -0,0 +1,43 @@
|
||||
# 直接粘到 tohka 宿主机大 Caddyfile 里的推荐块
|
||||
# 前提:
|
||||
# 1. 已存在 import common / import tinyauth 片段
|
||||
# 2. docker compose 使用 compose.tohka.override.yml,把容器端口绑到 127.0.0.1
|
||||
# 3. 环境里已设置:
|
||||
# TERMI_ADMIN_PROXY_SHARED_SECRET=<随机长字符串>
|
||||
|
||||
blog.init.cool {
|
||||
import common
|
||||
reverse_proxy http://127.0.0.1:4321
|
||||
}
|
||||
|
||||
admin.blog.init.cool {
|
||||
import common
|
||||
import tinyauth
|
||||
|
||||
# 后台 API:受 TinyAuth 保护,并附带后端共享密钥
|
||||
handle /api/* {
|
||||
reverse_proxy http://127.0.0.1:5150 {
|
||||
header_up X-Termi-Proxy-Secret {$TERMI_ADMIN_PROXY_SHARED_SECRET}
|
||||
}
|
||||
}
|
||||
|
||||
# 后台静态资源 / SPA
|
||||
handle {
|
||||
reverse_proxy http://127.0.0.1:4322
|
||||
}
|
||||
}
|
||||
|
||||
# 前台公开 API(评论 / 搜索 / AI 问答 / 订阅管理)
|
||||
api.blog.init.cool {
|
||||
import common
|
||||
reverse_proxy http://127.0.0.1:5150
|
||||
}
|
||||
|
||||
# 对应 deploy/docker/.env 关键项:
|
||||
# APP_BASE_URL=https://admin.blog.init.cool
|
||||
# PUBLIC_API_BASE_URL=https://api.blog.init.cool
|
||||
# ADMIN_API_BASE_URL=https://admin.blog.init.cool
|
||||
# ADMIN_FRONTEND_BASE_URL=https://blog.init.cool
|
||||
# TERMI_ADMIN_TRUST_PROXY_AUTH=true
|
||||
# TERMI_ADMIN_LOCAL_LOGIN_ENABLED=false
|
||||
# TERMI_ADMIN_PROXY_SHARED_SECRET=<随机长字符串>
|
||||
62
deploy/docker/.env.example
Normal file
62
deploy/docker/.env.example
Normal file
@@ -0,0 +1,62 @@
|
||||
# Compose runtime variables (package image deployment)
|
||||
BACKEND_PORT=5150
|
||||
FRONTEND_PORT=4321
|
||||
ADMIN_PORT=4322
|
||||
|
||||
# frontend SSR 服务端访问 backend 用这个内部地址(compose 默认可直接使用)
|
||||
INTERNAL_API_BASE_URL=http://backend:5150/api
|
||||
|
||||
# 浏览器里评论 / AI 问答 / 搜索等请求优先读取这个公开 API 地址。
|
||||
# 如果留空,frontend 会在生产环境按“当前访问主机 + :5150/api”回退。
|
||||
# 走反向代理时建议显式设置,例如:
|
||||
# PUBLIC_API_BASE_URL=https://your-frontend.example.com/api
|
||||
PUBLIC_API_BASE_URL=
|
||||
|
||||
# 前台 /_img 图片优化端点默认只放行“当前站点同域”图片。
|
||||
# 如果你的文章封面或对象存储图片来自额外 CDN / R2 公网域名,
|
||||
# 可以在这里填逗号分隔的 host 列表,例如:
|
||||
# PUBLIC_IMAGE_ALLOWED_HOSTS=cdn.example.com,pub-xxxx.r2.dev
|
||||
PUBLIC_IMAGE_ALLOWED_HOSTS=
|
||||
|
||||
# admin 浏览器请求 backend API 优先读取这个公开地址。
|
||||
# 如果留空,admin 会在生产环境按“当前访问主机 + :5150”回退。
|
||||
# 如果你采用推荐方案(admin 域名同域转发 /api 到 backend),
|
||||
# 建议直接填后台域名 origin,例如:
|
||||
# ADMIN_API_BASE_URL=https://admin.example.com
|
||||
ADMIN_API_BASE_URL=
|
||||
|
||||
# admin 页面里的“打开前台 / AI 问答 / 文章预览”链接优先读取这个运行时变量。
|
||||
# 如果你不是直接把前台暴露在 http://<host>:4321,而是走独立域名 / HTTPS / 反向代理,
|
||||
# 建议设置为正式前台地址,例如:
|
||||
# ADMIN_FRONTEND_BASE_URL=https://your-frontend.example.com
|
||||
ADMIN_FRONTEND_BASE_URL=
|
||||
|
||||
APP_BASE_URL=http://localhost:5150
|
||||
DATABASE_URL=postgres://<user>:<password>@<external-db-host>:5432/termi_api
|
||||
REDIS_URL=redis://<external-redis-host>:6379
|
||||
JWT_SECRET=change-me-before-production
|
||||
JWT_EXPIRATION_SECONDS=604800
|
||||
RUST_LOG=info
|
||||
|
||||
# 邮件确认 / 通知投递需要 SMTP
|
||||
SMTP_ENABLE=false
|
||||
SMTP_HOST=localhost
|
||||
SMTP_PORT=1025
|
||||
SMTP_SECURE=false
|
||||
SMTP_USER=
|
||||
SMTP_PASSWORD=
|
||||
SMTP_HELLO_NAME=
|
||||
|
||||
# 启用 TinyAuth / Pocket ID / Caddy forward_auth 时建议:
|
||||
# - TERMI_ADMIN_TRUST_PROXY_AUTH=true
|
||||
# - TERMI_ADMIN_LOCAL_LOGIN_ENABLED=false
|
||||
# - 额外配置一个共享密钥,并在 Caddy 转发 /api 到 backend 时附带:
|
||||
# X-Termi-Proxy-Secret: {$TERMI_ADMIN_PROXY_SHARED_SECRET}
|
||||
TERMI_ADMIN_TRUST_PROXY_AUTH=false
|
||||
TERMI_ADMIN_LOCAL_LOGIN_ENABLED=true
|
||||
TERMI_ADMIN_PROXY_SHARED_SECRET=
|
||||
|
||||
# Optional: override package tags if needed
|
||||
BACKEND_IMAGE=git.init.cool/cool/termi-astro-backend:latest
|
||||
FRONTEND_IMAGE=git.init.cool/cool/termi-astro-frontend:latest
|
||||
ADMIN_IMAGE=git.init.cool/cool/termi-astro-admin:latest
|
||||
224
deploy/docker/ARCHITECTURE.md
Normal file
224
deploy/docker/ARCHITECTURE.md
Normal file
@@ -0,0 +1,224 @@
|
||||
# Docker / 反代架构说明
|
||||
|
||||
本文记录当前项目在 `tohka` 这类宿主机上的推荐部署结构,以及为什么会同时出现:
|
||||
|
||||
- 宿主机层的 **Caddy**
|
||||
- `admin` 容器内的 **Nginx**
|
||||
|
||||
## 1. 总体分层
|
||||
|
||||
推荐生产结构:
|
||||
|
||||
```text
|
||||
Internet
|
||||
-> Host Caddy (:80 / :443)
|
||||
-> frontend container (Astro SSR / Node :4321)
|
||||
-> admin container (Nginx :80, 静态 SPA)
|
||||
-> backend container (Loco.rs API :5150)
|
||||
-> backend-worker container (Loco.rs worker / Redis queue)
|
||||
```
|
||||
|
||||
职责划分:
|
||||
|
||||
- **Host Caddy**
|
||||
- 统一接收公网流量
|
||||
- 处理域名、HTTPS、证书续签
|
||||
- 反向代理到各个内部容器
|
||||
- **frontend**
|
||||
- Astro SSR(Node) 应用
|
||||
- 不是纯静态站,所以容器内直接运行 Node 服务即可
|
||||
- **admin**
|
||||
- React/Vite 打包后的纯静态 SPA
|
||||
- 容器内使用 Nginx 提供静态文件
|
||||
- 生产推荐前面接 TinyAuth / Pocket ID 做 SSO
|
||||
- **backend**
|
||||
- API、后台鉴权、审计、版本历史、订阅投递
|
||||
- **backend-worker**
|
||||
- 消费 Redis 队列
|
||||
- 负责通知异步投递、失败重试、digest 任务触发后的实际发送
|
||||
|
||||
## 2. 为什么上层已经有 Caddy,admin 容器里还要 Nginx?
|
||||
|
||||
这两层并不冲突,职责不同:
|
||||
|
||||
- **Caddy** 是入口网关
|
||||
- **Nginx** 是 admin 容器内部的静态文件服务器
|
||||
|
||||
也就是说:
|
||||
|
||||
```text
|
||||
Browser
|
||||
-> Caddy
|
||||
-> admin nginx
|
||||
-> /usr/share/nginx/html
|
||||
```
|
||||
|
||||
这样做的好处:
|
||||
|
||||
- admin 镜像本身自带可用的静态文件服务能力
|
||||
- 宿主机层仍然保留统一的域名 / HTTPS / 路由管理
|
||||
- admin 作为独立前端,可以单独构建、单独发布
|
||||
|
||||
## 2.2 为什么现在多了 `backend-worker`?
|
||||
|
||||
因为当前通知系统已经改成:
|
||||
|
||||
```text
|
||||
backend (web)
|
||||
-> 写入 notification_deliveries
|
||||
-> enqueue 到 Redis
|
||||
backend-worker
|
||||
-> 消费队列
|
||||
-> 发送 email / webhook / discord / telegram / ntfy
|
||||
```
|
||||
|
||||
如果只启动 `backend` 而没有 `backend-worker`,通知会入队但没人消费。
|
||||
|
||||
## 2.1 推荐的后台认证链路
|
||||
|
||||
当前最推荐:
|
||||
|
||||
```text
|
||||
Browser
|
||||
-> Caddy (import tinyauth)
|
||||
-> TinyAuth
|
||||
-> Pocket ID (OIDC)
|
||||
-> admin nginx / backend API
|
||||
```
|
||||
|
||||
关键点:
|
||||
|
||||
- admin 页面和它调用的 `/api/*` 建议走 **同一个受保护后台域名**
|
||||
- Caddy 使用 `forward_auth`
|
||||
- backend 开启 `TERMI_ADMIN_TRUST_PROXY_AUTH=true`
|
||||
- 生产推荐再配 `TERMI_ADMIN_PROXY_SHARED_SECRET=<随机长字符串>`
|
||||
- Caddy 在 `/api/*` 反代到 backend 时补 `X-Termi-Proxy-Secret`
|
||||
- backend 读取 TinyAuth 转发的 `Remote-User / Remote-Email / Remote-Groups`
|
||||
- 本地开发可以保留 `TERMI_ADMIN_LOCAL_LOGIN_ENABLED=true`
|
||||
|
||||
## 3. 为什么 frontend 不使用同样的 Nginx 模式?
|
||||
|
||||
因为当前 `frontend` 是 **Astro SSR**:
|
||||
|
||||
- `output: 'server'`
|
||||
- `@astrojs/node` standalone
|
||||
|
||||
它需要在请求期执行服务端逻辑,因此更适合:
|
||||
|
||||
```text
|
||||
Caddy -> frontend Node server
|
||||
```
|
||||
|
||||
而不是先打成纯静态文件再由 Nginx 托管。
|
||||
|
||||
## 4. admin 容器内 Nginx 当前负责什么?
|
||||
|
||||
当前 `admin/nginx.conf` 主要负责:
|
||||
|
||||
- SPA fallback:`try_files ... /index.html`
|
||||
- `assets/` 长缓存(hash 资源可 `immutable`)
|
||||
- `index.html` / `runtime-config.js` 禁缓存,避免配置或入口文件陈旧
|
||||
- `gzip` 压缩
|
||||
- 基础安全响应头
|
||||
- `/healthz` 健康检查入口
|
||||
|
||||
### 为什么 `runtime-config.js` 要禁缓存?
|
||||
|
||||
因为 admin 现在支持运行时环境变量注入,例如:
|
||||
|
||||
- `ADMIN_API_BASE_URL`
|
||||
- `ADMIN_FRONTEND_BASE_URL`
|
||||
|
||||
容器启动时会生成 `runtime-config.js`。
|
||||
如果它被强缓存,改完环境变量重启容器后,浏览器可能仍然读到旧地址。
|
||||
|
||||
## 5. 为什么没有在 admin 容器里启用 Brotli?
|
||||
|
||||
当前基础镜像是官方 `nginx:alpine`。
|
||||
这个镜像默认不一定带 Brotli 模块,所以这里先启用通用的 `gzip`。
|
||||
|
||||
如果后续确实需要 Brotli,有两个常见做法:
|
||||
|
||||
- 让宿主机层的 Caddy 统一负责压缩
|
||||
- 改用带 Brotli 模块的自定义 Nginx 镜像
|
||||
|
||||
对当前项目而言,优先让 **宿主机 Caddy 做统一公网入口**,admin 容器内部只负责稳妥地提供静态文件,是更简单的方案。
|
||||
|
||||
## 6. 推荐的 tohka 思路
|
||||
|
||||
如果 `tohka` 上已经有一个统一的大 Caddyfile,推荐继续保持:
|
||||
|
||||
- Caddy 统一暴露 `80/443`
|
||||
- `frontend/admin/backend` 只走内网端口
|
||||
- 不把数据库 / Redis 直接暴露到公网
|
||||
- backend 如果启用了代理 SSO,不要再把 `:5150` 直接开放给公网
|
||||
|
||||
仓库里已经额外提供:
|
||||
|
||||
- `deploy/docker/compose.tohka.override.yml`
|
||||
|
||||
用它叠加 `compose.package.yml` 后,会把:
|
||||
|
||||
- `frontend:4321`
|
||||
- `admin:4322`
|
||||
- `backend:5150`
|
||||
|
||||
都只绑定到 `127.0.0.1`
|
||||
|
||||
## 7. 当前和配置相关的关键文件
|
||||
|
||||
- 宿主机入口反代:`tohka` 上的大 Caddyfile
|
||||
- admin 静态服务:`admin/nginx.conf`
|
||||
- admin 镜像:`admin/Dockerfile`
|
||||
- Caddy 参考模板:`deploy/caddy/Caddyfile.tohka.example`
|
||||
- compose 示例:`deploy/docker/compose.package.yml`
|
||||
- tohka override:`deploy/docker/compose.tohka.override.yml`
|
||||
- OIDC / Pocket ID 落地:`deploy/docker/TOHKA_POCKET_ID.md`
|
||||
- 运行时环境示例:`deploy/docker/.env.example`
|
||||
|
||||
## 8. Caddy 路由推荐
|
||||
|
||||
默认更推荐:
|
||||
|
||||
- `blog.init.cool` -> frontend
|
||||
- `admin.blog.init.cool` -> admin + backend(`/api/*`)
|
||||
- `api.blog.init.cool` -> backend
|
||||
|
||||
这样最省心,也最不容易碰到路径前缀、资源基路径、Cookie Path 等问题。
|
||||
|
||||
如果一定要用:
|
||||
|
||||
- `init.cool/admin`
|
||||
- `init.cool/api`
|
||||
|
||||
也可以,但 `admin` 需要在构建时设置:
|
||||
|
||||
- `VITE_ADMIN_BASENAME=/admin`
|
||||
|
||||
对应模板见:
|
||||
|
||||
- `deploy/caddy/Caddyfile.tohka.example`
|
||||
|
||||
## 9. 备份 / 恢复入口
|
||||
|
||||
当前仓库内已经补了:
|
||||
|
||||
- `deploy/scripts/backup/backup-postgres.sh`
|
||||
- `deploy/scripts/backup/backup-markdown.sh`
|
||||
- `deploy/scripts/backup/backup-media.sh`
|
||||
- `deploy/scripts/backup/restore-postgres.sh`
|
||||
- `deploy/scripts/backup/restore-markdown.sh`
|
||||
- `deploy/scripts/backup/restore-media.sh`
|
||||
- `deploy/docker/BACKUP_AND_RECOVERY.md`
|
||||
|
||||
建议把这些接进生产上的 cron / systemd timer,而不是只停留在仓库里。
|
||||
|
||||
## 10. 健康检查与启动顺序
|
||||
|
||||
当前推荐闭环:
|
||||
|
||||
- backend 启动时先自动跑 migration
|
||||
- backend 提供 `/healthz`
|
||||
- frontend 提供 `/healthz`
|
||||
- admin 由 Nginx 提供 `/healthz`
|
||||
- compose 中 frontend / admin / backend-worker 都依赖 backend healthy
|
||||
148
deploy/docker/BACKUP_AND_RECOVERY.md
Normal file
148
deploy/docker/BACKUP_AND_RECOVERY.md
Normal file
@@ -0,0 +1,148 @@
|
||||
# 备份与恢复说明
|
||||
|
||||
这套博客现在已经有:
|
||||
|
||||
- PostgreSQL 数据库
|
||||
- Markdown 原文内容
|
||||
- 媒体文件 / 对象存储
|
||||
- 版本历史 / 审计日志 / 订阅数据
|
||||
|
||||
所以生产上最重要的不是再多一两个功能,而是**出事后能不能快速恢复**。
|
||||
|
||||
## 1. 建议的最小备份策略
|
||||
|
||||
### PostgreSQL
|
||||
- **频率**:每天至少 1 次;高频站点建议每 6~12 小时 1 次
|
||||
- **工具**:`pg_dump --format=custom`
|
||||
- **脚本**:`deploy/scripts/backup/backup-postgres.sh`
|
||||
|
||||
### Markdown 原文
|
||||
- **频率**:每次发布后 + 每天定时 1 次
|
||||
- **脚本**:`deploy/scripts/backup/backup-markdown.sh`
|
||||
- **原因**:Markdown 是内容源,恢复速度最快
|
||||
|
||||
### 媒体文件
|
||||
- 如果是本地目录:打包归档
|
||||
- 如果是 R2 / S3 / MinIO:定时 `aws s3 sync`
|
||||
- **脚本**:`deploy/scripts/backup/backup-media.sh`
|
||||
|
||||
## 2. 一键脚本
|
||||
|
||||
```bash
|
||||
# 全量备份
|
||||
./deploy/scripts/backup/backup-all.sh
|
||||
|
||||
# 单独备份数据库
|
||||
DATABASE_URL=postgres://... ./deploy/scripts/backup/backup-postgres.sh
|
||||
|
||||
# 单独备份 Markdown
|
||||
MARKDOWN_SOURCE_DIR=./backend/content/posts ./deploy/scripts/backup/backup-markdown.sh
|
||||
|
||||
# 单独备份媒体(本地目录)
|
||||
MEDIA_SOURCE_DIR=./uploads ./deploy/scripts/backup/backup-media.sh
|
||||
|
||||
# 单独备份媒体(R2 / S3)
|
||||
MEDIA_S3_SOURCE=s3://bucket-name ./deploy/scripts/backup/backup-media.sh
|
||||
```
|
||||
|
||||
## 3. 恢复步骤
|
||||
|
||||
### 恢复 PostgreSQL
|
||||
|
||||
```bash
|
||||
DATABASE_URL=postgres://... ./deploy/scripts/backup/restore-postgres.sh ./backups/postgres/latest.dump
|
||||
```
|
||||
|
||||
### 恢复 Markdown
|
||||
|
||||
```bash
|
||||
MARKDOWN_TARGET_DIR=./backend/content/posts ./deploy/scripts/backup/restore-markdown.sh ./backups/markdown/latest.tar.gz
|
||||
```
|
||||
|
||||
### 恢复媒体
|
||||
|
||||
```bash
|
||||
# 本地目录方式
|
||||
MEDIA_TARGET_DIR=./uploads ./deploy/scripts/backup/restore-media.sh ./backups/media/latest.tar.gz
|
||||
|
||||
# R2 / S3 方式
|
||||
MEDIA_S3_TARGET=s3://bucket-name ./deploy/scripts/backup/restore-media.sh ./backups/media/media-20260331T120000Z
|
||||
```
|
||||
|
||||
## 4. 推荐的生产 Cron 示例
|
||||
|
||||
```cron
|
||||
# 每天 03:10 备份 PostgreSQL
|
||||
10 3 * * * cd /opt/termi-astro && DATABASE_URL=postgres://... ./deploy/scripts/backup/backup-postgres.sh >> /var/log/termi-backup.log 2>&1
|
||||
|
||||
# 每天 03:25 备份 Markdown
|
||||
25 3 * * * cd /opt/termi-astro && MARKDOWN_SOURCE_DIR=./backend/content/posts ./deploy/scripts/backup/backup-markdown.sh >> /var/log/termi-backup.log 2>&1
|
||||
|
||||
# 每天 03:40 备份媒体
|
||||
40 3 * * * cd /opt/termi-astro && MEDIA_S3_SOURCE=s3://bucket-name ./deploy/scripts/backup/backup-media.sh >> /var/log/termi-backup.log 2>&1
|
||||
|
||||
# 每天 04:15 清理过期备份
|
||||
15 4 * * * cd /opt/termi-astro && ./deploy/scripts/backup/prune-backups.sh >> /var/log/termi-backup.log 2>&1
|
||||
|
||||
# 每天 04:40 异地同步
|
||||
40 4 * * * cd /opt/termi-astro && OFFSITE_TARGET=/mnt/offsite/termi-astro-backups ./deploy/scripts/backup/sync-backups-offsite.sh >> /var/log/termi-backup.log 2>&1
|
||||
```
|
||||
|
||||
## 5. 建议你们再加一层异地备份
|
||||
|
||||
仅仅把备份留在同一台服务器上不够。
|
||||
|
||||
至少保证:
|
||||
- 主机本地保留最近 7~14 天
|
||||
- 再同步一份到另一块存储 / 另一台主机 / 对象存储冷备桶
|
||||
|
||||
## 6. 恢复演练建议
|
||||
|
||||
建议每个月至少做 1 次演练:
|
||||
|
||||
1. 用最新数据库备份恢复到临时环境
|
||||
2. 用 Markdown 备份恢复内容目录
|
||||
3. 用媒体备份恢复对象
|
||||
4. 校验:
|
||||
- 首页可打开
|
||||
- 文章详情可打开
|
||||
- 图片可访问
|
||||
- 后台可登录
|
||||
- 审计 / 版本 / 订阅表存在数据
|
||||
|
||||
也可以直接用恢复演练脚本:
|
||||
|
||||
```bash
|
||||
DATABASE_URL=postgres://... \
|
||||
POSTGRES_BACKUP=./backups/postgres/latest.dump \
|
||||
MARKDOWN_BACKUP=./backups/markdown/latest.tar.gz \
|
||||
MEDIA_BACKUP=./backups/media/latest.tar.gz \
|
||||
./deploy/scripts/backup/verify-restore.sh
|
||||
```
|
||||
|
||||
## 7. 当前架构下的恢复优先级
|
||||
|
||||
发生事故时建议按这个顺序:
|
||||
|
||||
1. 恢复数据库
|
||||
2. 恢复 Markdown 原文
|
||||
3. 恢复媒体资源
|
||||
4. 启动 backend / frontend / admin
|
||||
5. 进入后台检查:
|
||||
- 审计日志
|
||||
- 文章版本历史
|
||||
- 订阅目标与最近投递
|
||||
|
||||
## 8. 说明
|
||||
|
||||
这些脚本是**仓库内参考实现**,没有在你们生产机上自动执行。
|
||||
正式上线前请按你们实际目录、R2/S3 桶、数据库连接串、cron 规范再过一遍。
|
||||
|
||||
另外仓库里已经提供:
|
||||
|
||||
- `deploy/systemd/README.md`
|
||||
- `deploy/systemd/termi-backup-all.timer`
|
||||
- `deploy/systemd/termi-backup-prune.timer`
|
||||
- `deploy/systemd/termi-backup-offsite-sync.timer`
|
||||
|
||||
如果宿主机使用 systemd,建议优先启用 timer,而不是只靠手工执行。
|
||||
173
deploy/docker/README.md
Normal file
173
deploy/docker/README.md
Normal file
@@ -0,0 +1,173 @@
|
||||
# Docker 部署(Package 镜像)
|
||||
|
||||
补充架构说明见:
|
||||
|
||||
- `deploy/docker/ARCHITECTURE.md`
|
||||
- `deploy/caddy/Caddyfile.tohka.example`
|
||||
- `deploy/caddy/Caddyfile.tohka.production.example`
|
||||
- `deploy/docker/TOHKA_POCKET_ID.md`
|
||||
- `deploy/docker/TOHKA_DEPLOY_RUNBOOK.md`
|
||||
- `deploy/docker/config.yaml.example`
|
||||
- `deploy/docker/BACKUP_AND_RECOVERY.md`
|
||||
|
||||
## 1) 准备主配置文件(config.yaml)
|
||||
|
||||
现在推荐把:
|
||||
|
||||
- `deploy/docker/config.yaml`
|
||||
|
||||
作为部署主配置源;`deploy/docker/.env` 改为脚本生成产物。
|
||||
|
||||
先复制模板:
|
||||
|
||||
```bash
|
||||
cp deploy/docker/config.yaml.example deploy/docker/config.yaml
|
||||
```
|
||||
|
||||
然后填写至少这些核心项:
|
||||
|
||||
- `compose_env.DATABASE_URL`
|
||||
- `compose_env.REDIS_URL`
|
||||
- `compose_env.JWT_SECRET`
|
||||
- 邮件确认 / 邮件通知上线前,请同时补齐 SMTP 配置
|
||||
|
||||
填完后执行:
|
||||
|
||||
```bash
|
||||
python deploy/scripts/render_compose_env.py \
|
||||
--input deploy/docker/config.yaml \
|
||||
--output deploy/docker/.env
|
||||
```
|
||||
|
||||
如果你们内部喜欢先审 YAML,再部署,这就是现在的推荐流程。
|
||||
|
||||
建议在 `config.yaml -> compose_env` 下同时检查这些运行时变量:
|
||||
|
||||
- `INTERNAL_API_BASE_URL`:frontend SSR 容器访问 backend 用,compose 默认推荐 `http://backend:5150/api`
|
||||
- `PUBLIC_API_BASE_URL`:浏览器访问 backend API 用;留空时前台会回退到“当前主机 + `:5150/api`”
|
||||
- `PUBLIC_IMAGE_ALLOWED_HOSTS`:前台 `/_img` 图片优化端点允许的额外图片 host(逗号分隔)
|
||||
- `ADMIN_API_BASE_URL`:admin 浏览器访问 backend API 用;留空时后台会回退到“当前主机 + `:5150`”
|
||||
- `ADMIN_FRONTEND_BASE_URL`:admin 里“打开前台 / 问答页 / 文章页预览”跳转用
|
||||
- `TERMI_ADMIN_TRUST_PROXY_AUTH`:是否信任前置代理(如 Caddy + TinyAuth)注入的后台认证头
|
||||
- `TERMI_ADMIN_LOCAL_LOGIN_ENABLED`:是否保留本地账号密码登录兜底
|
||||
- `TERMI_ADMIN_PROXY_SHARED_SECRET`:代理 SSO 共享密钥;建议和 Caddy 的 `X-Termi-Proxy-Secret` 配套使用
|
||||
- `SMTP_ENABLE / SMTP_HOST / SMTP_PORT / SMTP_SECURE / SMTP_USER / SMTP_PASSWORD / SMTP_HELLO_NAME`:订阅确认和邮件通知需要
|
||||
|
||||
例如:
|
||||
|
||||
```yaml
|
||||
compose_env:
|
||||
PUBLIC_API_BASE_URL: https://api.blog.init.cool
|
||||
ADMIN_API_BASE_URL: https://admin.blog.init.cool
|
||||
ADMIN_FRONTEND_BASE_URL: https://blog.init.cool
|
||||
TERMI_ADMIN_TRUST_PROXY_AUTH: true
|
||||
TERMI_ADMIN_LOCAL_LOGIN_ENABLED: false
|
||||
TERMI_ADMIN_PROXY_SHARED_SECRET: replace-with-a-long-random-secret
|
||||
```
|
||||
|
||||
> 这些值最终会被渲染成 `deploy/docker/.env`,再由 `compose.package.yml` 读取。
|
||||
> 如果镜像构建期也注入了 `FRONTEND_PUBLIC_API_BASE_URL` / `ADMIN_VITE_API_BASE` / `ADMIN_VITE_FRONTEND_BASE_URL`,则运行时变量优先级更高。
|
||||
|
||||
## 2) 启动
|
||||
|
||||
在仓库根目录执行:
|
||||
|
||||
```bash
|
||||
python deploy/scripts/render_compose_env.py \
|
||||
--input deploy/docker/config.yaml \
|
||||
--output deploy/docker/.env
|
||||
|
||||
docker compose -f deploy/docker/compose.package.yml --env-file deploy/docker/.env up -d
|
||||
```
|
||||
|
||||
如果是在 `tohka` 上并且前面接宿主机 Caddy,推荐改用:
|
||||
|
||||
```bash
|
||||
python deploy/scripts/render_compose_env.py \
|
||||
--input deploy/docker/config.yaml \
|
||||
--output deploy/docker/.env
|
||||
|
||||
docker compose \
|
||||
-f deploy/docker/compose.package.yml \
|
||||
-f deploy/docker/compose.tohka.override.yml \
|
||||
--env-file deploy/docker/.env up -d
|
||||
```
|
||||
|
||||
当前 compose 推荐一起启动 4 个容器:
|
||||
|
||||
- `backend`:API / migration / `/healthz`
|
||||
- `backend-worker`:Redis 队列消费者(通知异步投递、失败重试)
|
||||
- `frontend`:Astro SSR(Node)
|
||||
- `admin`:静态 SPA + Nginx
|
||||
|
||||
`compose.tohka.override.yml` 额外会把三个对外端口绑到 `127.0.0.1`,避免把 backend / admin 直接暴露到公网。
|
||||
|
||||
## 3) 更新镜像后重拉
|
||||
|
||||
```bash
|
||||
docker compose -f deploy/docker/compose.package.yml --env-file deploy/docker/.env pull
|
||||
docker compose -f deploy/docker/compose.package.yml --env-file deploy/docker/.env up -d
|
||||
```
|
||||
|
||||
## 4) 问答记录(2026-03-31)
|
||||
|
||||
### Q1: 为什么前台容器是 `4321` 端口?
|
||||
A: 前台是 **Astro SSR(Node)**(`output: 'server'`),容器内运行 Node 服务(`dist/server/entry.mjs`),所以需要监听内部端口 `4321`。
|
||||
|
||||
### Q2: 两个前端都能做成纯静态 + 反代吗?
|
||||
A:
|
||||
- `admin` 可以,当前就是静态资源由 Nginx 提供。
|
||||
- `frontend` 当前不行(未做纯静态改造):项目里使用了 `prerender = false`、`Astro.request`、`Astro.cookies` 等请求期逻辑。
|
||||
|
||||
### Q3: SSR 和 CSR 怎么选?
|
||||
A: 当前站点对外内容页优先 SEO 与首屏可见性,保留 SSR 更稳;后台管理台继续 CSR/静态站即可。
|
||||
|
||||
### Q4: 生产推荐端口设计是什么?
|
||||
A: 推荐前置 Caddy/Nginx 统一暴露 `80/443`,`frontend:4321` / `backend:5150` / `admin:80` 仅走内网。
|
||||
当前 `compose.package.yml` 属于直连端口版,便于快速部署与联调。
|
||||
另外因为通知已经走异步队列,生产务必同时启动 `backend-worker`。
|
||||
|
||||
### Q5: 为什么 compose 里没看到 `ADMIN_VITE_FRONTEND_BASE_URL`?
|
||||
A:
|
||||
- 现在 compose 运行时可以直接设置 `ADMIN_FRONTEND_BASE_URL`,用于覆盖 admin 里所有前台跳转链接。
|
||||
- `ADMIN_VITE_FRONTEND_BASE_URL` 仍然可以作为 **镜像构建期默认值** 保留在 CI / workflow 里。
|
||||
- 如果两者都存在,**运行时 `ADMIN_FRONTEND_BASE_URL` 优先**。
|
||||
|
||||
### Q6: 前台 / 后台为什么拆成 public/internal 两种 API 地址?
|
||||
A:
|
||||
- `frontend` 是 Astro SSR,服务端渲染请求 backend 时更适合走内网地址(如 `http://backend:5150/api`)。
|
||||
- 但浏览器里的评论、问答、搜索请求必须走用户可访问的公开地址。
|
||||
- `admin` 是纯静态 SPA,也只能使用浏览器可访问的公开 API 地址。
|
||||
- 所以现在区分为:
|
||||
- `INTERNAL_API_BASE_URL`:frontend SSR 内部访问
|
||||
- `PUBLIC_API_BASE_URL`:前台浏览器访问
|
||||
- `ADMIN_API_BASE_URL`:admin 浏览器访问
|
||||
|
||||
### Q7: 现在后台 OIDC / Pocket ID 怎么接?
|
||||
A:
|
||||
- 推荐直接复用 tohka 上现成的 **TinyAuth + Pocket ID**。
|
||||
- Caddy 在 `admin` 域名入口加 `import tinyauth`,并把 `/api/*` 同域转发到 backend。
|
||||
- backend 开启:
|
||||
- `TERMI_ADMIN_TRUST_PROXY_AUTH=true`
|
||||
- `TERMI_ADMIN_LOCAL_LOGIN_ENABLED=false`
|
||||
- `TERMI_ADMIN_PROXY_SHARED_SECRET=<随机长字符串>`
|
||||
- Caddy 在 `/api/*` -> backend 时补:
|
||||
- `X-Termi-Proxy-Secret: {$TERMI_ADMIN_PROXY_SHARED_SECRET}`
|
||||
- backend 会信任 TinyAuth 转发的:
|
||||
- `Remote-User`
|
||||
- `Remote-Email`
|
||||
- `Remote-Groups`
|
||||
- 本地开发仍可保留内置账号密码登录。
|
||||
|
||||
### Q8: 备份现在放在哪看?
|
||||
A:
|
||||
- 参考脚本:`deploy/scripts/backup/`
|
||||
- 恢复文档:`deploy/docker/BACKUP_AND_RECOVERY.md`
|
||||
|
||||
### Q9: 现在健康检查和 migration 怎么处理?
|
||||
A:
|
||||
- `backend` 镜像启动时会先执行 `db migrate`
|
||||
- `backend` 提供 `/healthz`
|
||||
- `frontend` 提供 `/healthz`
|
||||
- `admin` 继续由 Nginx 提供 `/healthz`
|
||||
- compose 现在使用 `depends_on.condition: service_healthy`
|
||||
124
deploy/docker/TOHKA_DEPLOY_RUNBOOK.md
Normal file
124
deploy/docker/TOHKA_DEPLOY_RUNBOOK.md
Normal file
@@ -0,0 +1,124 @@
|
||||
# tohka 最终部署操作手册(config.yaml 版)
|
||||
|
||||
这份手册按“准备 -> 渲染配置 -> 启动 -> 接 Caddy -> 启 timers -> 验证”执行。
|
||||
|
||||
## 1. 准备文件
|
||||
|
||||
先复制配置模板:
|
||||
|
||||
```bash
|
||||
cp deploy/docker/config.yaml.example deploy/docker/config.yaml
|
||||
```
|
||||
|
||||
再按生产实际填写:
|
||||
|
||||
- 域名
|
||||
- Postgres / Redis 地址
|
||||
- JWT secret
|
||||
- SMTP
|
||||
- TinyAuth / Pocket ID 共享密钥
|
||||
- 镜像 tag
|
||||
|
||||
主配置源是:
|
||||
|
||||
- `deploy/docker/config.yaml`
|
||||
|
||||
## 2. 渲染 `.env`
|
||||
|
||||
```bash
|
||||
python deploy/scripts/render_compose_env.py \
|
||||
--input deploy/docker/config.yaml \
|
||||
--output deploy/docker/.env
|
||||
```
|
||||
|
||||
如果只是想预览,不落盘:
|
||||
|
||||
```bash
|
||||
python deploy/scripts/render_compose_env.py \
|
||||
--input deploy/docker/config.yaml \
|
||||
--stdout
|
||||
```
|
||||
|
||||
## 3. 启动容器
|
||||
|
||||
```bash
|
||||
docker compose \
|
||||
-f deploy/docker/compose.package.yml \
|
||||
-f deploy/docker/compose.tohka.override.yml \
|
||||
--env-file deploy/docker/.env up -d
|
||||
```
|
||||
|
||||
查看状态:
|
||||
|
||||
```bash
|
||||
docker compose \
|
||||
-f deploy/docker/compose.package.yml \
|
||||
-f deploy/docker/compose.tohka.override.yml \
|
||||
--env-file deploy/docker/.env ps
|
||||
```
|
||||
|
||||
## 4. 接宿主机 Caddy
|
||||
|
||||
直接参考:
|
||||
|
||||
- `deploy/caddy/Caddyfile.tohka.production.example`
|
||||
|
||||
建议域名:
|
||||
|
||||
- `blog.init.cool`
|
||||
- `admin.blog.init.cool`
|
||||
- `api.blog.init.cool`
|
||||
|
||||
关键点:
|
||||
|
||||
- `admin.blog.init.cool` 整体挂 `import tinyauth`
|
||||
- `admin.blog.init.cool/api/*` 转 backend 时带:
|
||||
- `X-Termi-Proxy-Secret {$TERMI_ADMIN_PROXY_SHARED_SECRET}`
|
||||
|
||||
## 5. 启用 systemd timers
|
||||
|
||||
```bash
|
||||
sudo cp deploy/systemd/*.service /etc/systemd/system/
|
||||
sudo cp deploy/systemd/*.timer /etc/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable --now termi-retry-deliveries.timer
|
||||
sudo systemctl enable --now termi-weekly-digest.timer
|
||||
sudo systemctl enable --now termi-monthly-digest.timer
|
||||
sudo systemctl enable --now termi-backup-all.timer
|
||||
sudo systemctl enable --now termi-backup-prune.timer
|
||||
sudo systemctl enable --now termi-backup-offsite-sync.timer
|
||||
```
|
||||
|
||||
## 6. 做首轮验证
|
||||
|
||||
至少检查:
|
||||
|
||||
- `http://127.0.0.1:5150/healthz`
|
||||
- `http://127.0.0.1:4321/healthz`
|
||||
- `http://127.0.0.1:4322/healthz`
|
||||
- `https://admin.blog.init.cool` 能正常走 Pocket ID / TinyAuth 登录
|
||||
- 订阅确认邮件能正常送达
|
||||
- 测试通知 / 周报 / 月报能正常入队并送达
|
||||
|
||||
## 7. 上线后维护动作
|
||||
|
||||
每次改 `deploy/docker/config.yaml` 后,记得重新:
|
||||
|
||||
```bash
|
||||
python deploy/scripts/render_compose_env.py \
|
||||
--input deploy/docker/config.yaml \
|
||||
--output deploy/docker/.env
|
||||
|
||||
docker compose \
|
||||
-f deploy/docker/compose.package.yml \
|
||||
-f deploy/docker/compose.tohka.override.yml \
|
||||
--env-file deploy/docker/.env up -d
|
||||
```
|
||||
|
||||
## 8. 配套文档
|
||||
|
||||
- `deploy/docker/README.md`
|
||||
- `deploy/docker/ARCHITECTURE.md`
|
||||
- `deploy/docker/TOHKA_POCKET_ID.md`
|
||||
- `deploy/systemd/GO_LIVE_CHECKLIST.md`
|
||||
- `deploy/docker/BACKUP_AND_RECOVERY.md`
|
||||
154
deploy/docker/TOHKA_POCKET_ID.md
Normal file
154
deploy/docker/TOHKA_POCKET_ID.md
Normal file
@@ -0,0 +1,154 @@
|
||||
# tohka 上接入 Pocket ID / TinyAuth / Caddy 的推荐做法
|
||||
|
||||
这份文档记录当前项目在 `tohka` 上最推荐的后台保护方式:
|
||||
|
||||
```text
|
||||
Browser
|
||||
-> Host Caddy
|
||||
-> TinyAuth
|
||||
-> Pocket ID (OIDC)
|
||||
-> admin nginx
|
||||
-> backend /api
|
||||
```
|
||||
|
||||
## 1. 目标
|
||||
|
||||
实现这些效果:
|
||||
|
||||
- `blog.init.cool` 对外公开,走 frontend SSR
|
||||
- `admin.blog.init.cool` 整体受保护
|
||||
- admin 页面和它访问的 `/api/*` 同域
|
||||
- backend 信任 TinyAuth 注入的登录身份
|
||||
- 即使 backend 端口误暴露,也额外要求一个共享密钥头,降低伪造 `Remote-User` 风险
|
||||
|
||||
## 2. 推荐使用的 compose 方式
|
||||
|
||||
基础 compose:
|
||||
|
||||
- `deploy/docker/compose.package.yml`
|
||||
|
||||
在 tohka 上再叠加这个 override:
|
||||
|
||||
- `deploy/docker/compose.tohka.override.yml`
|
||||
|
||||
部署主配置源建议使用:
|
||||
|
||||
- `deploy/docker/config.yaml`
|
||||
|
||||
启动方式:
|
||||
|
||||
```bash
|
||||
python deploy/scripts/render_compose_env.py \
|
||||
--input deploy/docker/config.yaml \
|
||||
--output deploy/docker/.env
|
||||
|
||||
docker compose \
|
||||
-f deploy/docker/compose.package.yml \
|
||||
-f deploy/docker/compose.tohka.override.yml \
|
||||
--env-file deploy/docker/.env up -d
|
||||
```
|
||||
|
||||
这个 override 会做三件事:
|
||||
|
||||
1. `frontend / admin / backend` 只绑定到 `127.0.0.1`
|
||||
2. 默认打开 `TERMI_ADMIN_TRUST_PROXY_AUTH=true`
|
||||
3. 默认关闭 `TERMI_ADMIN_LOCAL_LOGIN_ENABLED`
|
||||
|
||||
## 3. `config.yaml -> compose_env` 里最关键的变量
|
||||
|
||||
至少补这些:
|
||||
|
||||
现在推荐直接从下面这个模板开始:
|
||||
|
||||
- `deploy/docker/config.yaml.example`
|
||||
|
||||
然后复制成:
|
||||
|
||||
- `deploy/docker/config.yaml`
|
||||
|
||||
至少补这些:
|
||||
|
||||
```yaml
|
||||
compose_env:
|
||||
APP_BASE_URL: https://admin.blog.init.cool
|
||||
PUBLIC_API_BASE_URL: https://api.blog.init.cool
|
||||
ADMIN_API_BASE_URL: https://admin.blog.init.cool
|
||||
ADMIN_FRONTEND_BASE_URL: https://blog.init.cool
|
||||
|
||||
TERMI_ADMIN_TRUST_PROXY_AUTH: true
|
||||
TERMI_ADMIN_LOCAL_LOGIN_ENABLED: false
|
||||
TERMI_ADMIN_PROXY_SHARED_SECRET: replace-with-a-long-random-secret
|
||||
```
|
||||
|
||||
说明:
|
||||
|
||||
- `APP_BASE_URL` 建议填后台正式地址,便于后端生成后台相关链接
|
||||
- `TERMI_ADMIN_PROXY_SHARED_SECRET` 是 backend 和 Caddy 之间约定的共享密钥
|
||||
- backend 现在会在代理 SSO 模式下检查 `X-Termi-Proxy-Secret`
|
||||
|
||||
## 4. Caddy 侧应该怎么配
|
||||
|
||||
直接参考:
|
||||
|
||||
- `deploy/caddy/Caddyfile.tohka.example`
|
||||
- `deploy/caddy/Caddyfile.tohka.production.example`
|
||||
|
||||
关键点是:admin 域名下 `/api/*` 反代到 backend 时要带上:
|
||||
|
||||
```caddy
|
||||
header_up X-Termi-Proxy-Secret {$TERMI_ADMIN_PROXY_SHARED_SECRET}
|
||||
```
|
||||
|
||||
这样 backend 才会接受代理注入的:
|
||||
|
||||
- `Remote-User`
|
||||
- `Remote-Email`
|
||||
- `Remote-Groups`
|
||||
|
||||
## 5. 为什么要加共享密钥头
|
||||
|
||||
如果只看 `Remote-User` 这类头,而 backend 又被公网直接访问,
|
||||
理论上别人可以手工伪造请求头来冒充后台用户。
|
||||
|
||||
现在的建议闭环是:
|
||||
|
||||
- backend 端口只监听 `127.0.0.1`
|
||||
- admin 域名入口必须经过 TinyAuth / Pocket ID
|
||||
- backend 额外校验 `X-Termi-Proxy-Secret`
|
||||
|
||||
这样会稳很多。
|
||||
|
||||
## 6. 后台登录页现在的行为
|
||||
|
||||
当前逻辑:
|
||||
|
||||
- 如果 `TERMI_ADMIN_LOCAL_LOGIN_ENABLED=true`,仍可用本地账号密码兜底
|
||||
- 如果关闭本地登录,admin 登录页会提示“请从受保护入口进入,并重新检查会话”
|
||||
- backend 会优先读取代理身份头
|
||||
|
||||
## 7. 建议的 tohka 落地顺序
|
||||
|
||||
1. 在 tohka 的大 Caddyfile 里接入 `admin.blog.init.cool`
|
||||
2. 给 admin 域名启用 `import tinyauth`
|
||||
3. `/api/*` 同域转发到 `localhost:5150`
|
||||
4. 加上 `X-Termi-Proxy-Secret`
|
||||
5. 用 `compose.tohka.override.yml` 启动容器
|
||||
6. 打开 `https://admin.blog.init.cool`
|
||||
7. 在后台里确认当前 session 的 `auth_source=proxy`
|
||||
|
||||
完整部署步骤可再配合:
|
||||
|
||||
- `deploy/docker/TOHKA_DEPLOY_RUNBOOK.md`
|
||||
|
||||
## 8. digest / retry / 备份建议一起启用
|
||||
|
||||
上线时建议连同这些一起启用:
|
||||
|
||||
- `deploy/systemd/termi-retry-deliveries.timer`
|
||||
- `deploy/systemd/termi-weekly-digest.timer`
|
||||
- `deploy/systemd/termi-monthly-digest.timer`
|
||||
- `deploy/systemd/termi-backup-all.timer`
|
||||
- `deploy/systemd/termi-backup-prune.timer`
|
||||
- `deploy/systemd/termi-backup-offsite-sync.timer`
|
||||
|
||||
这样后台保护、通知投递、备份恢复三件事才算闭环。
|
||||
80
deploy/docker/compose.package.yml
Normal file
80
deploy/docker/compose.package.yml
Normal file
@@ -0,0 +1,80 @@
|
||||
services:
|
||||
backend:
|
||||
image: ${BACKEND_IMAGE:-git.init.cool/cool/termi-astro-backend:latest}
|
||||
pull_policy: always
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
PORT: 5150
|
||||
APP_BASE_URL: ${APP_BASE_URL:-http://localhost:5150}
|
||||
DATABASE_URL: ${DATABASE_URL:?DATABASE_URL is required}
|
||||
REDIS_URL: ${REDIS_URL:?REDIS_URL is required}
|
||||
JWT_SECRET: ${JWT_SECRET:?JWT_SECRET is required}
|
||||
# 当前推荐把 admin 放在受保护的后台域名下(同域转发 /api 到 backend),
|
||||
# 然后让 backend 信任 TinyAuth / Pocket ID 通过 Caddy 注入的认证头。
|
||||
# 如启用代理 SSO,建议同时配置 TERMI_ADMIN_PROXY_SHARED_SECRET,
|
||||
# 并让 Caddy 在转发 /api 到 backend 时附带 X-Termi-Proxy-Secret。
|
||||
TERMI_ADMIN_TRUST_PROXY_AUTH: ${TERMI_ADMIN_TRUST_PROXY_AUTH:-false}
|
||||
TERMI_ADMIN_LOCAL_LOGIN_ENABLED: ${TERMI_ADMIN_LOCAL_LOGIN_ENABLED:-true}
|
||||
TERMI_ADMIN_PROXY_SHARED_SECRET: ${TERMI_ADMIN_PROXY_SHARED_SECRET:-}
|
||||
RUST_LOG: ${RUST_LOG:-info}
|
||||
ports:
|
||||
# 这是“直连端口”示例;如果前面接 tohka 宿主机 Caddy,
|
||||
# 推荐叠加 compose.tohka.override.yml,把 backend 只绑定到 127.0.0.1。
|
||||
- '${BACKEND_PORT:-5150}:5150'
|
||||
|
||||
backend-worker:
|
||||
image: ${BACKEND_IMAGE:-git.init.cool/cool/termi-astro-backend:latest}
|
||||
pull_policy: always
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
backend:
|
||||
condition: service_healthy
|
||||
command: ['termi_api-cli', '-e', 'production', 'start', '--worker']
|
||||
environment:
|
||||
PORT: 5150
|
||||
APP_BASE_URL: ${APP_BASE_URL:-http://localhost:5150}
|
||||
DATABASE_URL: ${DATABASE_URL:?DATABASE_URL is required}
|
||||
REDIS_URL: ${REDIS_URL:?REDIS_URL is required}
|
||||
JWT_SECRET: ${JWT_SECRET:?JWT_SECRET is required}
|
||||
TERMI_ADMIN_TRUST_PROXY_AUTH: ${TERMI_ADMIN_TRUST_PROXY_AUTH:-false}
|
||||
TERMI_ADMIN_LOCAL_LOGIN_ENABLED: ${TERMI_ADMIN_LOCAL_LOGIN_ENABLED:-true}
|
||||
TERMI_ADMIN_PROXY_SHARED_SECRET: ${TERMI_ADMIN_PROXY_SHARED_SECRET:-}
|
||||
RUST_LOG: ${RUST_LOG:-info}
|
||||
TERMI_SKIP_MIGRATIONS: 'true'
|
||||
|
||||
frontend:
|
||||
image: ${FRONTEND_IMAGE:-git.init.cool/cool/termi-astro-frontend:latest}
|
||||
pull_policy: always
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
backend:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
# frontend 是 Astro SSR(Node):
|
||||
# - INTERNAL_API_BASE_URL 给服务端渲染访问 backend 用
|
||||
# - PUBLIC_API_BASE_URL 给浏览器里的评论 / AI 问答等请求用
|
||||
# - PUBLIC_IMAGE_ALLOWED_HOSTS 给前台图片优化端点 /_img 放行额外图片域名
|
||||
INTERNAL_API_BASE_URL: ${INTERNAL_API_BASE_URL:-http://backend:5150/api}
|
||||
PUBLIC_API_BASE_URL: ${PUBLIC_API_BASE_URL:-}
|
||||
PUBLIC_IMAGE_ALLOWED_HOSTS: ${PUBLIC_IMAGE_ALLOWED_HOSTS:-}
|
||||
# frontend 是 Astro SSR(Node) 服务,容器内部监听 4321
|
||||
# 生产建议由网关统一反代,仅对外开放 80/443
|
||||
ports:
|
||||
- '${FRONTEND_PORT:-4321}:4321'
|
||||
|
||||
admin:
|
||||
image: ${ADMIN_IMAGE:-git.init.cool/cool/termi-astro-admin:latest}
|
||||
pull_policy: always
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
backend:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
ADMIN_API_BASE_URL: ${ADMIN_API_BASE_URL:-}
|
||||
ADMIN_FRONTEND_BASE_URL: ${ADMIN_FRONTEND_BASE_URL:-}
|
||||
# admin 是静态 SPA,由 Nginx 在容器内监听 80
|
||||
# API 与“打开前台 / AI 问答 / 文章预览”这类地址都优先读取运行时环境变量
|
||||
# ADMIN_API_BASE_URL / ADMIN_FRONTEND_BASE_URL;未设置时再回退到构建期值 / 同主机默认端口
|
||||
ports:
|
||||
# 如果 admin 域名由宿主机 Caddy 统一反代,推荐改成 127.0.0.1 绑定。
|
||||
- '${ADMIN_PORT:-4322}:80'
|
||||
28
deploy/docker/compose.tohka.override.yml
Normal file
28
deploy/docker/compose.tohka.override.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
services:
|
||||
# 这个 override 专门给 tohka 这种“宿主机 Caddy -> localhost 端口 -> Docker 容器”模式使用。
|
||||
# 使用方式:
|
||||
# docker compose \
|
||||
# -f deploy/docker/compose.package.yml \
|
||||
# -f deploy/docker/compose.tohka.override.yml \
|
||||
# --env-file deploy/docker/.env up -d
|
||||
backend:
|
||||
environment:
|
||||
TERMI_ADMIN_TRUST_PROXY_AUTH: ${TERMI_ADMIN_TRUST_PROXY_AUTH:-true}
|
||||
TERMI_ADMIN_LOCAL_LOGIN_ENABLED: ${TERMI_ADMIN_LOCAL_LOGIN_ENABLED:-false}
|
||||
TERMI_ADMIN_PROXY_SHARED_SECRET: ${TERMI_ADMIN_PROXY_SHARED_SECRET:?TERMI_ADMIN_PROXY_SHARED_SECRET is required for tohka proxy mode}
|
||||
ports:
|
||||
- '127.0.0.1:${BACKEND_PORT:-5150}:5150'
|
||||
|
||||
backend-worker:
|
||||
environment:
|
||||
TERMI_ADMIN_TRUST_PROXY_AUTH: ${TERMI_ADMIN_TRUST_PROXY_AUTH:-true}
|
||||
TERMI_ADMIN_LOCAL_LOGIN_ENABLED: ${TERMI_ADMIN_LOCAL_LOGIN_ENABLED:-false}
|
||||
TERMI_ADMIN_PROXY_SHARED_SECRET: ${TERMI_ADMIN_PROXY_SHARED_SECRET:?TERMI_ADMIN_PROXY_SHARED_SECRET is required for tohka proxy mode}
|
||||
|
||||
frontend:
|
||||
ports:
|
||||
- '127.0.0.1:${FRONTEND_PORT:-4321}:4321'
|
||||
|
||||
admin:
|
||||
ports:
|
||||
- '127.0.0.1:${ADMIN_PORT:-4322}:80'
|
||||
88
deploy/docker/config.yaml.example
Normal file
88
deploy/docker/config.yaml.example
Normal file
@@ -0,0 +1,88 @@
|
||||
# tohka 生产部署主配置源(config.yaml)
|
||||
# 使用方式:
|
||||
# 1. cp deploy/docker/config.yaml.example deploy/docker/config.yaml
|
||||
# 2. 按实际环境填写下面参数
|
||||
# 3. python deploy/scripts/render_compose_env.py --input deploy/docker/config.yaml --output deploy/docker/.env
|
||||
# 4. docker compose -f deploy/docker/compose.package.yml -f deploy/docker/compose.tohka.override.yml --env-file deploy/docker/.env up -d
|
||||
|
||||
project:
|
||||
name: termi-astro
|
||||
host: tohka
|
||||
compose_files:
|
||||
- deploy/docker/compose.package.yml
|
||||
- deploy/docker/compose.tohka.override.yml
|
||||
env_output: deploy/docker/.env
|
||||
|
||||
# 仅做文档/运维留档;docker compose 实际读取 compose_env
|
||||
meta:
|
||||
blog_origin: https://blog.init.cool
|
||||
admin_origin: https://admin.blog.init.cool
|
||||
api_origin: https://api.blog.init.cool
|
||||
pocket_id_issuer: https://id.example.com
|
||||
pocket_id_client: admin.blog.init.cool
|
||||
|
||||
compose_env:
|
||||
BACKEND_PORT: 5150
|
||||
FRONTEND_PORT: 4321
|
||||
ADMIN_PORT: 4322
|
||||
|
||||
APP_BASE_URL: https://admin.blog.init.cool
|
||||
INTERNAL_API_BASE_URL: http://backend:5150/api
|
||||
PUBLIC_API_BASE_URL: https://api.blog.init.cool
|
||||
ADMIN_API_BASE_URL: https://admin.blog.init.cool
|
||||
ADMIN_FRONTEND_BASE_URL: https://blog.init.cool
|
||||
PUBLIC_IMAGE_ALLOWED_HOSTS: cdn.example.com,pub-xxxx.r2.dev
|
||||
|
||||
DATABASE_URL: postgres://termi:replace-me@postgres.internal:5432/termi_api
|
||||
REDIS_URL: redis://redis.internal:6379
|
||||
JWT_SECRET: replace-with-a-long-random-secret
|
||||
JWT_EXPIRATION_SECONDS: 604800
|
||||
RUST_LOG: info
|
||||
|
||||
SMTP_ENABLE: true
|
||||
SMTP_HOST: smtp.resend.com
|
||||
SMTP_PORT: 587
|
||||
SMTP_SECURE: false
|
||||
SMTP_USER: resend
|
||||
SMTP_PASSWORD: replace-with-smtp-password
|
||||
SMTP_HELLO_NAME: admin.blog.init.cool
|
||||
|
||||
TERMI_ADMIN_TRUST_PROXY_AUTH: true
|
||||
TERMI_ADMIN_LOCAL_LOGIN_ENABLED: false
|
||||
TERMI_ADMIN_PROXY_SHARED_SECRET: replace-with-another-long-random-secret
|
||||
|
||||
BACKEND_IMAGE: git.init.cool/cool/termi-astro-backend:latest
|
||||
FRONTEND_IMAGE: git.init.cool/cool/termi-astro-frontend:latest
|
||||
ADMIN_IMAGE: git.init.cool/cool/termi-astro-admin:latest
|
||||
|
||||
notifications:
|
||||
ntfy:
|
||||
enabled: true
|
||||
base_url: https://ntfy.sh
|
||||
example_topic: your-team-topic
|
||||
timers:
|
||||
retry_deliveries: termi-retry-deliveries.timer
|
||||
weekly_digest: termi-weekly-digest.timer
|
||||
monthly_digest: termi-monthly-digest.timer
|
||||
|
||||
backups:
|
||||
offsite_target: /mnt/offsite/termi-astro-backups
|
||||
local_retention_days:
|
||||
postgres: 14
|
||||
markdown: 14
|
||||
media: 14
|
||||
timers:
|
||||
backup_all: termi-backup-all.timer
|
||||
backup_prune: termi-backup-prune.timer
|
||||
backup_offsite_sync: termi-backup-offsite-sync.timer
|
||||
|
||||
systemd:
|
||||
repo_path: /opt/termi-astro
|
||||
install_path: /etc/systemd/system
|
||||
enable_timers:
|
||||
- termi-retry-deliveries.timer
|
||||
- termi-weekly-digest.timer
|
||||
- termi-monthly-digest.timer
|
||||
- termi-backup-all.timer
|
||||
- termi-backup-prune.timer
|
||||
- termi-backup-offsite-sync.timer
|
||||
10
deploy/scripts/backup/backup-all.sh
Normal file
10
deploy/scripts/backup/backup-all.sh
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
"${SCRIPT_DIR}/backup-postgres.sh"
|
||||
"${SCRIPT_DIR}/backup-markdown.sh"
|
||||
"${SCRIPT_DIR}/backup-media.sh"
|
||||
|
||||
echo "All backup jobs finished successfully."
|
||||
20
deploy/scripts/backup/backup-markdown.sh
Normal file
20
deploy/scripts/backup/backup-markdown.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SOURCE_DIR="${MARKDOWN_SOURCE_DIR:-./backend/content/posts}"
|
||||
BACKUP_DIR="${BACKUP_DIR:-./backups/markdown}"
|
||||
RETENTION_DAYS="${RETENTION_DAYS:-30}"
|
||||
TIMESTAMP="$(date -u +%Y%m%dT%H%M%SZ)"
|
||||
FILE_PATH="${BACKUP_DIR}/markdown-${TIMESTAMP}.tar.gz"
|
||||
|
||||
if [[ ! -d "${SOURCE_DIR}" ]]; then
|
||||
echo "Markdown source directory not found: ${SOURCE_DIR}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "${BACKUP_DIR}"
|
||||
tar -czf "${FILE_PATH}" -C "${SOURCE_DIR}" .
|
||||
ln -sfn "$(basename "${FILE_PATH}")" "${BACKUP_DIR}/latest.tar.gz"
|
||||
find "${BACKUP_DIR}" -type f -name 'markdown-*.tar.gz' -mtime +"${RETENTION_DAYS}" -delete
|
||||
|
||||
echo "Markdown backup written to ${FILE_PATH}"
|
||||
31
deploy/scripts/backup/backup-media.sh
Normal file
31
deploy/scripts/backup/backup-media.sh
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
BACKUP_DIR="${BACKUP_DIR:-./backups/media}"
|
||||
RETENTION_DAYS="${RETENTION_DAYS:-14}"
|
||||
TIMESTAMP="$(date -u +%Y%m%dT%H%M%SZ)"
|
||||
mkdir -p "${BACKUP_DIR}"
|
||||
|
||||
if [[ -n "${MEDIA_S3_SOURCE:-}" ]]; then
|
||||
TARGET_DIR="${BACKUP_DIR}/media-${TIMESTAMP}"
|
||||
mkdir -p "${TARGET_DIR}"
|
||||
aws s3 sync "${MEDIA_S3_SOURCE}" "${TARGET_DIR}" ${AWS_EXTRA_ARGS:-}
|
||||
ln -sfn "$(basename "${TARGET_DIR}")" "${BACKUP_DIR}/latest"
|
||||
find "${BACKUP_DIR}" -maxdepth 1 -mindepth 1 -type d -name 'media-*' -mtime +"${RETENTION_DAYS}" -exec rm -rf {} +
|
||||
echo "Media backup synced from ${MEDIA_S3_SOURCE} to ${TARGET_DIR}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
SOURCE_DIR="${MEDIA_SOURCE_DIR:-./uploads}"
|
||||
FILE_PATH="${BACKUP_DIR}/media-${TIMESTAMP}.tar.gz"
|
||||
|
||||
if [[ ! -d "${SOURCE_DIR}" ]]; then
|
||||
echo "Set MEDIA_SOURCE_DIR or MEDIA_S3_SOURCE before running this script" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -czf "${FILE_PATH}" -C "${SOURCE_DIR}" .
|
||||
ln -sfn "$(basename "${FILE_PATH}")" "${BACKUP_DIR}/latest.tar.gz"
|
||||
find "${BACKUP_DIR}" -type f -name 'media-*.tar.gz' -mtime +"${RETENTION_DAYS}" -delete
|
||||
|
||||
echo "Media backup written to ${FILE_PATH}"
|
||||
19
deploy/scripts/backup/backup-postgres.sh
Normal file
19
deploy/scripts/backup/backup-postgres.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
BACKUP_DIR="${BACKUP_DIR:-./backups/postgres}"
|
||||
RETENTION_DAYS="${RETENTION_DAYS:-14}"
|
||||
TIMESTAMP="$(date -u +%Y%m%dT%H%M%SZ)"
|
||||
FILE_PATH="${BACKUP_DIR}/postgres-${TIMESTAMP}.dump"
|
||||
|
||||
if [[ -z "${DATABASE_URL:-}" ]]; then
|
||||
echo "DATABASE_URL is required" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "${BACKUP_DIR}"
|
||||
pg_dump --format=custom --file="${FILE_PATH}" "${DATABASE_URL}"
|
||||
ln -sfn "$(basename "${FILE_PATH}")" "${BACKUP_DIR}/latest.dump"
|
||||
find "${BACKUP_DIR}" -type f -name 'postgres-*.dump' -mtime +"${RETENTION_DAYS}" -delete
|
||||
|
||||
echo "Postgres backup written to ${FILE_PATH}"
|
||||
49
deploy/scripts/backup/prune-backups.sh
Normal file
49
deploy/scripts/backup/prune-backups.sh
Normal file
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
BACKUP_ROOT="${BACKUP_ROOT:-./backups}"
|
||||
POSTGRES_RETENTION_DAYS="${POSTGRES_RETENTION_DAYS:-14}"
|
||||
MARKDOWN_RETENTION_DAYS="${MARKDOWN_RETENTION_DAYS:-30}"
|
||||
MEDIA_RETENTION_DAYS="${MEDIA_RETENTION_DAYS:-14}"
|
||||
DRY_RUN="${DRY_RUN:-false}"
|
||||
|
||||
prune() {
|
||||
local target_dir="$1"
|
||||
local pattern="$2"
|
||||
local retention_days="$3"
|
||||
|
||||
if [[ ! -d "${target_dir}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ "${DRY_RUN}" == "true" ]]; then
|
||||
find "${target_dir}" -type f -name "${pattern}" -mtime +"${retention_days}" -print
|
||||
return 0
|
||||
fi
|
||||
|
||||
find "${target_dir}" -type f -name "${pattern}" -mtime +"${retention_days}" -delete
|
||||
}
|
||||
|
||||
prune_dirs() {
|
||||
local target_dir="$1"
|
||||
local pattern="$2"
|
||||
local retention_days="$3"
|
||||
|
||||
if [[ ! -d "${target_dir}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ "${DRY_RUN}" == "true" ]]; then
|
||||
find "${target_dir}" -maxdepth 1 -mindepth 1 -type d -name "${pattern}" -mtime +"${retention_days}" -print
|
||||
return 0
|
||||
fi
|
||||
|
||||
find "${target_dir}" -maxdepth 1 -mindepth 1 -type d -name "${pattern}" -mtime +"${retention_days}" -exec rm -rf {} +
|
||||
}
|
||||
|
||||
prune "${BACKUP_ROOT}/postgres" 'postgres-*.dump' "${POSTGRES_RETENTION_DAYS}"
|
||||
prune "${BACKUP_ROOT}/markdown" 'markdown-*.tar.gz' "${MARKDOWN_RETENTION_DAYS}"
|
||||
prune "${BACKUP_ROOT}/media" 'media-*.tar.gz' "${MEDIA_RETENTION_DAYS}"
|
||||
prune_dirs "${BACKUP_ROOT}/media" 'media-*' "${MEDIA_RETENTION_DAYS}"
|
||||
|
||||
echo "Backup pruning completed under ${BACKUP_ROOT}"
|
||||
20
deploy/scripts/backup/restore-markdown.sh
Normal file
20
deploy/scripts/backup/restore-markdown.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo "Usage: $0 <backup-file.tar.gz>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TARGET_DIR="${MARKDOWN_TARGET_DIR:-./backend/content/posts}"
|
||||
BACKUP_FILE="$1"
|
||||
|
||||
if [[ ! -f "${BACKUP_FILE}" ]]; then
|
||||
echo "Backup file not found: ${BACKUP_FILE}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "${TARGET_DIR}"
|
||||
rm -rf "${TARGET_DIR}"/*
|
||||
tar -xzf "${BACKUP_FILE}" -C "${TARGET_DIR}"
|
||||
echo "Markdown restore completed into ${TARGET_DIR}"
|
||||
30
deploy/scripts/backup/restore-media.sh
Normal file
30
deploy/scripts/backup/restore-media.sh
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo "Usage: $0 <backup-file-or-directory>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SOURCE="$1"
|
||||
|
||||
if [[ -d "${SOURCE}" ]]; then
|
||||
if [[ -z "${MEDIA_S3_TARGET:-}" ]]; then
|
||||
echo "MEDIA_S3_TARGET is required when restoring from a synced directory backup" >&2
|
||||
exit 1
|
||||
fi
|
||||
aws s3 sync "${SOURCE}" "${MEDIA_S3_TARGET}" ${AWS_EXTRA_ARGS:-}
|
||||
echo "Media restore synced to ${MEDIA_S3_TARGET}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
TARGET_DIR="${MEDIA_TARGET_DIR:-./uploads}"
|
||||
if [[ ! -f "${SOURCE}" ]]; then
|
||||
echo "Backup source not found: ${SOURCE}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "${TARGET_DIR}"
|
||||
rm -rf "${TARGET_DIR}"/*
|
||||
tar -xzf "${SOURCE}" -C "${TARGET_DIR}"
|
||||
echo "Media restore completed into ${TARGET_DIR}"
|
||||
21
deploy/scripts/backup/restore-postgres.sh
Normal file
21
deploy/scripts/backup/restore-postgres.sh
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo "Usage: $0 <backup-file.dump>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${DATABASE_URL:-}" ]]; then
|
||||
echo "DATABASE_URL is required" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BACKUP_FILE="$1"
|
||||
if [[ ! -f "${BACKUP_FILE}" ]]; then
|
||||
echo "Backup file not found: ${BACKUP_FILE}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pg_restore --clean --if-exists --no-owner --no-privileges --dbname="${DATABASE_URL}" "${BACKUP_FILE}"
|
||||
echo "Postgres restore completed from ${BACKUP_FILE}"
|
||||
26
deploy/scripts/backup/sync-backups-offsite.sh
Normal file
26
deploy/scripts/backup/sync-backups-offsite.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
BACKUP_ROOT="${BACKUP_ROOT:-./backups}"
|
||||
OFFSITE_TARGET="${OFFSITE_TARGET:-}"
|
||||
AWS_EXTRA_ARGS="${AWS_EXTRA_ARGS:-}"
|
||||
RSYNC_EXTRA_ARGS="${RSYNC_EXTRA_ARGS:-}"
|
||||
|
||||
if [[ -z "${OFFSITE_TARGET}" ]]; then
|
||||
echo "OFFSITE_TARGET is required (rsync path or s3:// bucket)" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -d "${BACKUP_ROOT}" ]]; then
|
||||
echo "Backup root not found: ${BACKUP_ROOT}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${OFFSITE_TARGET}" == s3://* ]]; then
|
||||
aws s3 sync "${BACKUP_ROOT}" "${OFFSITE_TARGET}" ${AWS_EXTRA_ARGS}
|
||||
echo "Backups synced to ${OFFSITE_TARGET}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
rsync -av --delete ${RSYNC_EXTRA_ARGS} "${BACKUP_ROOT}/" "${OFFSITE_TARGET}/"
|
||||
echo "Backups synced to ${OFFSITE_TARGET}"
|
||||
17
deploy/scripts/backup/verify-restore.sh
Normal file
17
deploy/scripts/backup/verify-restore.sh
Normal file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
: "${DATABASE_URL:?DATABASE_URL is required}"
|
||||
: "${POSTGRES_BACKUP:?POSTGRES_BACKUP is required}"
|
||||
: "${MARKDOWN_BACKUP:?MARKDOWN_BACKUP is required}"
|
||||
: "${MEDIA_BACKUP:?MEDIA_BACKUP is required}"
|
||||
|
||||
POSTGRES_RESTORE_CMD="${POSTGRES_RESTORE_CMD:-./deploy/scripts/backup/restore-postgres.sh}"
|
||||
MARKDOWN_RESTORE_CMD="${MARKDOWN_RESTORE_CMD:-./deploy/scripts/backup/restore-markdown.sh}"
|
||||
MEDIA_RESTORE_CMD="${MEDIA_RESTORE_CMD:-./deploy/scripts/backup/restore-media.sh}"
|
||||
|
||||
"${POSTGRES_RESTORE_CMD}" "${POSTGRES_BACKUP}"
|
||||
"${MARKDOWN_RESTORE_CMD}" "${MARKDOWN_BACKUP}"
|
||||
"${MEDIA_RESTORE_CMD}" "${MEDIA_BACKUP}"
|
||||
|
||||
echo "Restore rehearsal completed. Please verify homepage, article detail, media assets, admin login, revisions, audit logs, and subscriptions manually."
|
||||
122
deploy/scripts/render_compose_env.py
Normal file
122
deploy/scripts/render_compose_env.py
Normal file
@@ -0,0 +1,122 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
try:
|
||||
import yaml
|
||||
except ImportError as exc: # pragma: no cover
|
||||
raise SystemExit(
|
||||
"Missing dependency: PyYAML. Install it with `python -m pip install pyyaml`."
|
||||
) from exc
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Render docker compose .env from deploy config.yaml"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--input",
|
||||
default="deploy/docker/config.yaml",
|
||||
help="Path to config.yaml (default: deploy/docker/config.yaml)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
default="deploy/docker/.env",
|
||||
help="Output dotenv file path (default: deploy/docker/.env)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--section",
|
||||
default="compose_env",
|
||||
help="Top-level mapping section to export (default: compose_env)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--stdout",
|
||||
action="store_true",
|
||||
help="Print rendered dotenv to stdout instead of writing file",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def load_config(path: Path) -> dict[str, Any]:
|
||||
if not path.exists():
|
||||
raise SystemExit(f"Config file not found: {path}")
|
||||
|
||||
data = yaml.safe_load(path.read_text(encoding="utf-8"))
|
||||
if not isinstance(data, dict):
|
||||
raise SystemExit("config.yaml root must be a mapping/object")
|
||||
return data
|
||||
|
||||
|
||||
def encode_env_value(value: Any) -> str:
|
||||
if value is None:
|
||||
return '""'
|
||||
if isinstance(value, bool):
|
||||
return "true" if value else "false"
|
||||
if isinstance(value, (int, float)):
|
||||
return str(value)
|
||||
if not isinstance(value, str):
|
||||
raise SystemExit(f"compose_env only supports scalar values, got: {type(value).__name__}")
|
||||
|
||||
if value == "":
|
||||
return '""'
|
||||
|
||||
needs_quotes = any(ch in value for ch in [' ', '#', '"', "'", '\t', '\n', '\r']) or value.startswith('$')
|
||||
if not needs_quotes:
|
||||
return value
|
||||
|
||||
escaped = (
|
||||
value.replace('\\', '\\\\')
|
||||
.replace('"', '\\"')
|
||||
.replace('\n', '\\n')
|
||||
.replace('\r', '\\r')
|
||||
.replace('\t', '\\t')
|
||||
)
|
||||
return f'"{escaped}"'
|
||||
|
||||
|
||||
def render_env(section_name: str, values: dict[str, Any], source_path: Path) -> str:
|
||||
lines = [
|
||||
f"# Generated from {source_path.as_posix()}::{section_name}",
|
||||
"# Do not edit this file directly; edit config.yaml and re-render.",
|
||||
"",
|
||||
]
|
||||
|
||||
for key, value in values.items():
|
||||
if not isinstance(key, str) or not key:
|
||||
raise SystemExit(f"Invalid env key: {key!r}")
|
||||
lines.append(f"{key}={encode_env_value(value)}")
|
||||
|
||||
lines.append("")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
source_path = Path(args.input)
|
||||
output_path = Path(args.output)
|
||||
|
||||
data = load_config(source_path)
|
||||
section = data.get(args.section)
|
||||
if not isinstance(section, dict):
|
||||
raise SystemExit(
|
||||
f"Section `{args.section}` must exist in config.yaml and must be a mapping/object"
|
||||
)
|
||||
|
||||
rendered = render_env(args.section, section, source_path)
|
||||
|
||||
if args.stdout:
|
||||
sys.stdout.write(rendered)
|
||||
return 0
|
||||
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(rendered, encoding="utf-8", newline="\n")
|
||||
print(f"Wrote {output_path}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
190
deploy/systemd/GO_LIVE_CHECKLIST.md
Normal file
190
deploy/systemd/GO_LIVE_CHECKLIST.md
Normal file
@@ -0,0 +1,190 @@
|
||||
# 通知 / digest / systemd 最终上线清单
|
||||
|
||||
这份清单按“上线前 -> 首次启动 -> 验证 -> 定时任务”来执行。
|
||||
|
||||
## A. 上线前参数确认
|
||||
|
||||
### 1. backend / compose
|
||||
确认 `deploy/docker/config.yaml` 至少已经填写:
|
||||
|
||||
- `compose_env.DATABASE_URL`
|
||||
- `compose_env.REDIS_URL`
|
||||
- `compose_env.JWT_SECRET`
|
||||
- `compose_env.APP_BASE_URL`
|
||||
- `compose_env.PUBLIC_API_BASE_URL`
|
||||
- `compose_env.ADMIN_API_BASE_URL`
|
||||
- `compose_env.ADMIN_FRONTEND_BASE_URL`
|
||||
- `compose_env.TERMI_ADMIN_TRUST_PROXY_AUTH=true`
|
||||
- `compose_env.TERMI_ADMIN_LOCAL_LOGIN_ENABLED=false`
|
||||
- `compose_env.TERMI_ADMIN_PROXY_SHARED_SECRET`
|
||||
|
||||
### 2. SMTP
|
||||
订阅 double opt-in 和邮件通知需要:
|
||||
|
||||
- `compose_env.SMTP_ENABLE=true`
|
||||
- `compose_env.SMTP_HOST`
|
||||
- `compose_env.SMTP_PORT`
|
||||
- `compose_env.SMTP_SECURE`
|
||||
- `compose_env.SMTP_USER`
|
||||
- `compose_env.SMTP_PASSWORD`
|
||||
- `compose_env.SMTP_HELLO_NAME`
|
||||
|
||||
### 3. Caddy / TinyAuth / Pocket ID
|
||||
确认宿主机 Caddy 已经:
|
||||
|
||||
- `blog.init.cool` -> `127.0.0.1:4321`
|
||||
- `admin.blog.init.cool` -> `127.0.0.1:4322`
|
||||
- `admin.blog.init.cool/api/*` -> `127.0.0.1:5150`
|
||||
- `admin.blog.init.cool` 整体挂了 `import tinyauth`
|
||||
- `/api/*` 转发时附带:
|
||||
- `X-Termi-Proxy-Secret: {$TERMI_ADMIN_PROXY_SHARED_SECRET}`
|
||||
|
||||
可直接参考:
|
||||
|
||||
- `deploy/caddy/Caddyfile.tohka.production.example`
|
||||
|
||||
## B. 首次启动
|
||||
|
||||
推荐命令:
|
||||
|
||||
```bash
|
||||
python deploy/scripts/render_compose_env.py \
|
||||
--input deploy/docker/config.yaml \
|
||||
--output deploy/docker/.env
|
||||
|
||||
docker compose \
|
||||
-f deploy/docker/compose.package.yml \
|
||||
-f deploy/docker/compose.tohka.override.yml \
|
||||
--env-file deploy/docker/.env up -d
|
||||
```
|
||||
|
||||
然后确认容器状态:
|
||||
|
||||
```bash
|
||||
docker compose \
|
||||
-f deploy/docker/compose.package.yml \
|
||||
-f deploy/docker/compose.tohka.override.yml \
|
||||
--env-file deploy/docker/.env ps
|
||||
```
|
||||
|
||||
应该至少看到:
|
||||
|
||||
- `backend`
|
||||
- `backend-worker`
|
||||
- `frontend`
|
||||
- `admin`
|
||||
|
||||
## C. 首次验证
|
||||
|
||||
### 1. 健康检查
|
||||
|
||||
```bash
|
||||
curl -I http://127.0.0.1:5150/healthz
|
||||
curl -I http://127.0.0.1:4321/healthz
|
||||
curl -I http://127.0.0.1:4322/healthz
|
||||
```
|
||||
|
||||
### 2. 后台 SSO
|
||||
|
||||
打开:
|
||||
|
||||
- `https://admin.blog.init.cool`
|
||||
|
||||
确认:
|
||||
|
||||
- 可以被 TinyAuth / Pocket ID 正常拦截与登录
|
||||
- 登录后后台可进入
|
||||
- 会话信息显示为代理登录(不是本地账号密码)
|
||||
|
||||
### 3. 订阅链路
|
||||
|
||||
至少做一次:
|
||||
|
||||
1. 前台提交邮箱订阅
|
||||
2. 收到确认邮件
|
||||
3. 点击确认链接
|
||||
4. 能打开偏好页
|
||||
5. 偏好页可暂停 / 恢复 / 退订
|
||||
|
||||
### 4. ntfy / webhook / digest
|
||||
|
||||
后台里至少验证一次:
|
||||
|
||||
- 手动发送测试通知
|
||||
- 手动发送周报
|
||||
- 手动发送月报
|
||||
- 查看 delivery 是否从 `queued` 变成 `sent`
|
||||
|
||||
如果 delivery 一直卡在 `queued`:
|
||||
|
||||
- 优先检查 `backend-worker` 是否在运行
|
||||
- 检查 `REDIS_URL`
|
||||
- 检查 SMTP / ntfy / webhook 目标
|
||||
|
||||
## D. 安装 systemd timers
|
||||
|
||||
```bash
|
||||
sudo cp deploy/systemd/*.service /etc/systemd/system/
|
||||
sudo cp deploy/systemd/*.timer /etc/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
```
|
||||
|
||||
启用:
|
||||
|
||||
```bash
|
||||
sudo systemctl enable --now termi-retry-deliveries.timer
|
||||
sudo systemctl enable --now termi-weekly-digest.timer
|
||||
sudo systemctl enable --now termi-monthly-digest.timer
|
||||
sudo systemctl enable --now termi-backup-all.timer
|
||||
sudo systemctl enable --now termi-backup-prune.timer
|
||||
sudo systemctl enable --now termi-backup-offsite-sync.timer
|
||||
```
|
||||
|
||||
查看状态:
|
||||
|
||||
```bash
|
||||
systemctl list-timers --all | grep termi
|
||||
```
|
||||
|
||||
## E. 当前默认调度时间
|
||||
|
||||
### 通知 / digest
|
||||
|
||||
- `termi-retry-deliveries.timer`
|
||||
- 每 5 分钟执行一次
|
||||
- `termi-weekly-digest.timer`
|
||||
- 每周一 09:00
|
||||
- `termi-monthly-digest.timer`
|
||||
- 每月 1 日 09:30
|
||||
|
||||
### 备份
|
||||
|
||||
- `termi-backup-all.timer`
|
||||
- 每天 03:10
|
||||
- `termi-backup-prune.timer`
|
||||
- 每天 04:15
|
||||
- `termi-backup-offsite-sync.timer`
|
||||
- 每天 04:40
|
||||
|
||||
如果时区不是你们想要的,改 `.timer` 里的 `OnCalendar=` 后重新:
|
||||
|
||||
```bash
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart termi-retry-deliveries.timer
|
||||
sudo systemctl restart termi-weekly-digest.timer
|
||||
sudo systemctl restart termi-monthly-digest.timer
|
||||
sudo systemctl restart termi-backup-all.timer
|
||||
sudo systemctl restart termi-backup-prune.timer
|
||||
sudo systemctl restart termi-backup-offsite-sync.timer
|
||||
```
|
||||
|
||||
## F. 上线后一周内建议额外确认
|
||||
|
||||
- [ ] 有真实订阅确认邮件成功送达
|
||||
- [ ] 有真实 ntfy / webhook 通知成功送达
|
||||
- [ ] 至少有一条 digest 成功发出
|
||||
- [ ] retry timer 能把失败投递重新入队
|
||||
- [ ] 备份目录持续产生新文件
|
||||
- [ ] prune 正常清理旧备份
|
||||
- [ ] offsite sync 确实有异地副本
|
||||
- [ ] 至少做过一次恢复演练
|
||||
27
deploy/systemd/README.md
Normal file
27
deploy/systemd/README.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# systemd timer 模板
|
||||
|
||||
这些模板默认假设:
|
||||
|
||||
- 仓库部署路径:`/opt/termi-astro`
|
||||
- 使用 `docker compose -f deploy/docker/compose.package.yml --env-file deploy/docker/.env`
|
||||
- backend / backend-worker 容器已长期运行
|
||||
|
||||
启用方式示例:
|
||||
|
||||
```bash
|
||||
sudo cp deploy/systemd/*.service /etc/systemd/system/
|
||||
sudo cp deploy/systemd/*.timer /etc/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable --now termi-backup-all.timer
|
||||
sudo systemctl enable --now termi-backup-prune.timer
|
||||
sudo systemctl enable --now termi-backup-offsite-sync.timer
|
||||
sudo systemctl enable --now termi-retry-deliveries.timer
|
||||
sudo systemctl enable --now termi-weekly-digest.timer
|
||||
sudo systemctl enable --now termi-monthly-digest.timer
|
||||
```
|
||||
|
||||
如果你们不使用 systemd,也可以直接参考同目录命令改成 cron。
|
||||
|
||||
最终上线前建议再对照:
|
||||
|
||||
- `deploy/systemd/GO_LIVE_CHECKLIST.md`
|
||||
9
deploy/systemd/termi-backup-all.service
Normal file
9
deploy/systemd/termi-backup-all.service
Normal file
@@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Termi backup all data
|
||||
After=network-online.target docker.service
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
WorkingDirectory=/opt/termi-astro
|
||||
ExecStart=/usr/bin/env bash ./deploy/scripts/backup/backup-all.sh
|
||||
10
deploy/systemd/termi-backup-all.timer
Normal file
10
deploy/systemd/termi-backup-all.timer
Normal file
@@ -0,0 +1,10 @@
|
||||
[Unit]
|
||||
Description=Run Termi full backup nightly
|
||||
|
||||
[Timer]
|
||||
OnCalendar=*-*-* 03:10:00
|
||||
Persistent=true
|
||||
Unit=termi-backup-all.service
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
10
deploy/systemd/termi-backup-offsite-sync.service
Normal file
10
deploy/systemd/termi-backup-offsite-sync.service
Normal file
@@ -0,0 +1,10 @@
|
||||
[Unit]
|
||||
Description=Termi sync backups offsite
|
||||
After=network-online.target docker.service
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
WorkingDirectory=/opt/termi-astro
|
||||
Environment=OFFSITE_TARGET=/mnt/offsite/termi-astro-backups
|
||||
ExecStart=/usr/bin/env bash ./deploy/scripts/backup/sync-backups-offsite.sh
|
||||
10
deploy/systemd/termi-backup-offsite-sync.timer
Normal file
10
deploy/systemd/termi-backup-offsite-sync.timer
Normal file
@@ -0,0 +1,10 @@
|
||||
[Unit]
|
||||
Description=Sync Termi backups offsite every morning
|
||||
|
||||
[Timer]
|
||||
OnCalendar=*-*-* 04:40:00
|
||||
Persistent=true
|
||||
Unit=termi-backup-offsite-sync.service
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
8
deploy/systemd/termi-backup-prune.service
Normal file
8
deploy/systemd/termi-backup-prune.service
Normal file
@@ -0,0 +1,8 @@
|
||||
[Unit]
|
||||
Description=Termi prune local backups
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
WorkingDirectory=/opt/termi-astro
|
||||
ExecStart=/usr/bin/env bash ./deploy/scripts/backup/prune-backups.sh
|
||||
10
deploy/systemd/termi-backup-prune.timer
Normal file
10
deploy/systemd/termi-backup-prune.timer
Normal file
@@ -0,0 +1,10 @@
|
||||
[Unit]
|
||||
Description=Prune old Termi backups daily
|
||||
|
||||
[Timer]
|
||||
OnCalendar=*-*-* 04:15:00
|
||||
Persistent=true
|
||||
Unit=termi-backup-prune.service
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
9
deploy/systemd/termi-monthly-digest.service
Normal file
9
deploy/systemd/termi-monthly-digest.service
Normal file
@@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Termi monthly digest dispatcher
|
||||
After=docker.service network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
WorkingDirectory=/opt/termi-astro
|
||||
ExecStart=/usr/bin/docker compose -f deploy/docker/compose.package.yml --env-file deploy/docker/.env exec -T backend termi_api-cli -e production task send_monthly_digest
|
||||
10
deploy/systemd/termi-monthly-digest.timer
Normal file
10
deploy/systemd/termi-monthly-digest.timer
Normal file
@@ -0,0 +1,10 @@
|
||||
[Unit]
|
||||
Description=Run monthly digest on day 1
|
||||
|
||||
[Timer]
|
||||
OnCalendar=*-*-01 09:30:00
|
||||
Persistent=true
|
||||
Unit=termi-monthly-digest.service
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
9
deploy/systemd/termi-retry-deliveries.service
Normal file
9
deploy/systemd/termi-retry-deliveries.service
Normal file
@@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Termi retry queued notification deliveries
|
||||
After=docker.service network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
WorkingDirectory=/opt/termi-astro
|
||||
ExecStart=/usr/bin/docker compose -f deploy/docker/compose.package.yml --env-file deploy/docker/.env exec -T backend termi_api-cli -e production task retry_deliveries limit:200
|
||||
10
deploy/systemd/termi-retry-deliveries.timer
Normal file
10
deploy/systemd/termi-retry-deliveries.timer
Normal file
@@ -0,0 +1,10 @@
|
||||
[Unit]
|
||||
Description=Retry notification deliveries every 5 minutes
|
||||
|
||||
[Timer]
|
||||
OnCalendar=*:0/5
|
||||
Persistent=true
|
||||
Unit=termi-retry-deliveries.service
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
9
deploy/systemd/termi-weekly-digest.service
Normal file
9
deploy/systemd/termi-weekly-digest.service
Normal file
@@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Termi weekly digest dispatcher
|
||||
After=docker.service network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
WorkingDirectory=/opt/termi-astro
|
||||
ExecStart=/usr/bin/docker compose -f deploy/docker/compose.package.yml --env-file deploy/docker/.env exec -T backend termi_api-cli -e production task send_weekly_digest
|
||||
10
deploy/systemd/termi-weekly-digest.timer
Normal file
10
deploy/systemd/termi-weekly-digest.timer
Normal file
@@ -0,0 +1,10 @@
|
||||
[Unit]
|
||||
Description=Run weekly digest every Monday morning
|
||||
|
||||
[Timer]
|
||||
OnCalendar=Mon *-*-* 09:00:00
|
||||
Persistent=true
|
||||
Unit=termi-weekly-digest.service
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
Reference in New Issue
Block a user