import { createServer } from 'node:http'
import { randomUUID } from 'node:crypto'
const PORT = Number(process.env.PLAYWRIGHT_MOCK_PORT || 5159)
const FRONTEND_ORIGIN =
process.env.PLAYWRIGHT_FRONTEND_ORIGIN || 'http://127.0.0.1:4321'
const ADMIN_ORIGIN =
process.env.PLAYWRIGHT_ADMIN_ORIGIN || 'http://127.0.0.1:4322'
const MOCK_ORIGIN = `http://127.0.0.1:${PORT}`
const SESSION_COOKIE = 'termi_admin_session'
const SESSION_VALUE = 'mock-admin-session'
const CONTENT_TYPES = {
json: 'application/json; charset=utf-8',
text: 'text/plain; charset=utf-8',
sse: 'text/event-stream; charset=utf-8',
svg: 'image/svg+xml; charset=utf-8',
}
const VALID_LOGIN = {
username: 'admin',
password: 'admin123',
}
const BASE_TS = Date.parse('2026-04-01T09:00:00.000Z')
const categoryCatalog = [
{
name: '前端工程',
slug: 'frontend-engineering',
description: '围绕 Astro、Svelte、设计系统与终端风格交互的实现记录。',
cover_image: `${MOCK_ORIGIN}/media-files/category-frontend.svg`,
accent_color: '#2563eb',
seo_title: '前端工程专题',
seo_description: 'Astro / Svelte / 终端风格 UI 相关的实现与复盘。',
},
{
name: '测试体系',
slug: 'testing-systems',
description: '记录 Playwright、CI、回归测试与稳定性治理的落地经验。',
cover_image: `${MOCK_ORIGIN}/media-files/category-testing.svg`,
accent_color: '#14b8a6',
seo_title: '测试体系专题',
seo_description: '端到端回归、CI 编排和测试工具链整理。',
},
{
name: '运维值班',
slug: 'operations-oncall',
description: '备份、恢复、对象存储与上线清单等值班经验。',
cover_image: `${MOCK_ORIGIN}/media-files/category-ops.svg`,
accent_color: '#f97316',
seo_title: '运维值班专题',
seo_description: '备份、恢复、发布和值班操作的实践手册。',
},
]
const tagCatalog = [
{
name: 'Astro',
slug: 'astro',
description: 'Astro 内容站与 SSR 落地笔记。',
cover_image: `${MOCK_ORIGIN}/media-files/tag-astro.svg`,
accent_color: '#8b5cf6',
seo_title: 'Astro 标签页',
seo_description: 'Astro、组件、内容站工程实践。',
},
{
name: 'Playwright',
slug: 'playwright',
description: 'E2E 回归与浏览器自动化。',
cover_image: `${MOCK_ORIGIN}/media-files/tag-playwright.svg`,
accent_color: '#10b981',
seo_title: 'Playwright 标签页',
seo_description: 'Playwright、回归测试与自动化流程。',
},
{
name: 'Svelte',
slug: 'svelte',
description: 'Svelte 状态管理、组件组织与交互。',
cover_image: `${MOCK_ORIGIN}/media-files/tag-svelte.svg`,
accent_color: '#f97316',
seo_title: 'Svelte 标签页',
seo_description: 'Svelte 页面状态和组件模式记录。',
},
{
name: 'AI',
slug: 'ai',
description: '站内问答、提示词和索引配置实验。',
cover_image: `${MOCK_ORIGIN}/media-files/tag-ai.svg`,
accent_color: '#ec4899',
seo_title: 'AI 标签页',
seo_description: 'AI 问答、提示词和检索增强内容。',
},
{
name: 'Docker',
slug: 'docker',
description: 'Docker 编排、发布和值班脚本。',
cover_image: `${MOCK_ORIGIN}/media-files/tag-docker.svg`,
accent_color: '#0ea5e9',
seo_title: 'Docker 标签页',
seo_description: 'Docker 发布、运行时与值班记录。',
},
{
name: 'CI',
slug: 'ci',
description: 'CI 与工作流编排。',
cover_image: `${MOCK_ORIGIN}/media-files/tag-ci.svg`,
accent_color: '#22c55e',
seo_title: 'CI 标签页',
seo_description: 'CI、工作流和自动化执行策略。',
},
]
const postCatalog = [
{
title: 'Astro 终端博客信息架构实战',
slug: 'astro-terminal-blog',
description: 'Termi 前台首页、过滤器、文章卡片和终端风格布局的整体拆解。',
category: '前端工程',
tags: ['Astro', 'Svelte', 'AI'],
post_type: 'article',
image: '/review-covers/the-long-season.svg',
images: ['/review-covers/black-myth-wukong.svg', '/review-covers/thirteen-invites.svg'],
pinned: true,
status: 'published',
visibility: 'public',
created_at: '2026-03-28T09:00:00.000Z',
updated_at: '2026-03-29T06:00:00.000Z',
paragraphs: [
'Termi 的首页不是纯展示页,而是把文章、评测、友链和订阅动作压进同一个终端式入口,确保用户第一次进入时就能感知站点的信息密度。',
'为了让首页筛选足够顺滑,我们把文章类型、分类和标签都收敛到统一的状态模型里,任何切换都只改 URL 与最小可见集,而不是整页重新渲染。',
'这一版改造也顺手补了阅读指标入口,让热门内容、完读率和阅读时长能在前台自然暴露,方便后续继续做推荐与 AI 问答。',
],
},
{
title: 'Playwright 回归工作流设计',
slug: 'playwright-regression-workflow',
description: 'Termi 前后台共用一套 Playwright smoke 回归,覆盖 CI、mock server 和关键路径。',
category: '测试体系',
tags: ['Playwright', 'CI', 'Astro'],
post_type: 'article',
image: '/review-covers/placed-within.svg',
images: [],
pinned: false,
status: 'published',
visibility: 'public',
created_at: '2026-03-20T10:30:00.000Z',
updated_at: '2026-03-20T10:30:00.000Z',
paragraphs: [
'Playwright 套件会同时拉起前台、后台和 mock server,用独立的内存状态来模拟文章、评论、友链、评测和订阅流程。',
'这样做的好处是 CI 不再依赖真实 Rust 后端或数据库,也能把前后台的大部分关键交互跑一遍。',
'只要 mock 数据结构和接口契约稳定,这套 smoke 就能快速捕获组件改版、表单失效和路由错误。',
],
},
{
title: 'Svelte 状态仓库拆分模式',
slug: 'svelte-state-patterns',
description: 'Termi 在前台筛选、搜索和订阅弹窗里如何拆分 Svelte 状态与副作用。',
category: '前端工程',
tags: ['Svelte', 'Astro', 'Playwright'],
post_type: 'article',
image: '/review-covers/journey-to-the-west-editorial.svg',
images: [],
pinned: false,
status: 'published',
visibility: 'public',
created_at: '2026-02-18T08:10:00.000Z',
updated_at: '2026-02-18T08:10:00.000Z',
paragraphs: [
'搜索模式切换、首页过滤器和订阅弹窗本质上都属于“短生命周期但高交互密度”的状态,适合被拆成局部仓库而不是抬到全局。',
'这样每个功能块都能保留独立的 URL 同步和本地缓存逻辑,避免一个页面动作把整个前台状态带乱。',
],
},
{
title: 'AI 搜索提示词调优记录',
slug: 'ai-search-prompt-design',
description: 'Termi 站内 AI 问答如何平衡检索结果、回答密度和提示词约束。',
category: '测试体系',
tags: ['AI', 'Playwright', 'CI'],
post_type: 'article',
image: '/review-covers/hero-dreams-in-tired-life.svg',
images: [],
pinned: false,
status: 'published',
visibility: 'public',
created_at: '2025-12-11T08:00:00.000Z',
updated_at: '2025-12-11T08:00:00.000Z',
paragraphs: [
'AI 问答页的目标不是单纯返回一段答案,而是把引用来源、索引时间和相关度一起暴露给用户。',
'我们更关心“这个回答是否足够可追溯”,因此来源卡片、SSE 状态提示和缓存命中都被纳入同一条链路。',
],
},
{
title: 'Docker 发布值班清单',
slug: 'docker-rollout-checklist',
description: 'Termi 的 CI/CD 与 Docker 发布手册:构建、上传、回滚和对象存储连通性检查。',
category: '运维值班',
tags: ['Docker', 'CI', 'AI'],
post_type: 'article',
image: '/review-covers/black-myth-wukong.svg',
images: [],
pinned: false,
status: 'published',
visibility: 'public',
created_at: '2025-10-02T07:20:00.000Z',
updated_at: '2025-10-02T07:20:00.000Z',
paragraphs: [
'值班手册的核心是把部署前检查、发布后验证和回滚策略写清楚,而不是只留下一个 docker compose up。',
'在 Termi 里,R2 连通性、备份导出和前后台 smoke 都属于上线前必须执行的低成本检查。',
],
},
{
title: '终端风格内容站封面系统',
slug: 'terminal-cover-system',
description: 'Termi 如何统一文章封面、评测海报和后台媒体库,减少重复素材治理成本。',
category: '前端工程',
tags: ['Astro', 'AI', 'Docker'],
post_type: 'article',
image: '/review-covers/the-long-season.svg',
images: [],
pinned: false,
status: 'published',
visibility: 'public',
created_at: '2025-07-15T11:00:00.000Z',
updated_at: '2025-07-16T04:00:00.000Z',
paragraphs: [
'无论是文章页 hero 图、评测封面还是后台素材库,最终都需要同一套 key、alt、caption 和标签模型。',
'这让媒体库不仅能服务前台渲染,也能成为后台创建文章和评测时的统一素材来源。',
],
},
{
title: '内容观测与阅读完成度',
slug: 'content-observability-lab',
description: 'Termi 的阅读进度、页面访问与完读事件如何反哺首页热门内容和后台分析页。',
category: '测试体系',
tags: ['AI', 'Playwright', 'Svelte'],
post_type: 'article',
image: '/review-covers/placed-within.svg',
images: [],
pinned: false,
status: 'published',
visibility: 'public',
created_at: '2025-04-04T06:40:00.000Z',
updated_at: '2025-04-05T09:20:00.000Z',
paragraphs: [
'阅读进度上报看似只是埋点,但如果没有统一会话标识和节流策略,后台分析很快就会被噪音淹没。',
'Termi 的做法是把 page view、read progress 和 read complete 统一成同一个内容分析模型。',
],
},
{
title: 'Markdown 导入与版本回滚',
slug: 'markdown-import-and-revisions',
description: 'Termi 后台的 Markdown 导入、版本快照和回滚流程,方便内容大批量迁移。',
category: '运维值班',
tags: ['CI', 'Docker', 'Playwright'],
post_type: 'article',
image: '/review-covers/thirteen-invites.svg',
images: [],
pinned: false,
status: 'published',
visibility: 'public',
created_at: '2024-11-21T10:00:00.000Z',
updated_at: '2024-11-21T10:00:00.000Z',
paragraphs: [
'批量导入 Markdown 时最容易漏掉的是 slug 冲突、元数据缺失和回滚策略,因此版本快照必须和写入动作绑定。',
'只要恢复接口能按 full、markdown、metadata 三种模式工作,回归测试就能覆盖大部分内容变更风险。',
],
},
{
title: '前台订阅弹窗节奏实验',
slug: 'subscription-popup-experiments',
description: 'Termi 订阅弹窗如何控制触发时机、收口文案和确认链路,避免一上来就打断阅读。',
category: '测试体系',
tags: ['AI', 'Svelte', 'Astro'],
post_type: 'article',
image: '/review-covers/journey-to-the-west-editorial.svg',
images: [],
pinned: false,
status: 'published',
visibility: 'public',
created_at: '2024-08-08T12:00:00.000Z',
updated_at: '2024-08-08T12:00:00.000Z',
paragraphs: [
'订阅弹窗如果没有上下文,就会被用户当成纯打断;因此我们只在滚动达到阈值或明确点击订阅按钮后再打开。',
'邮箱确认页、偏好管理页和退订页也被纳入同一条测试链,确保状态切换能闭环。',
],
},
{
title: '友链申请审核设计稿',
slug: 'friend-link-review-workflow',
description: 'Termi 友链从前台申请到后台审核的最小闭环,以及分类、状态与前台展示规则。',
category: '前端工程',
tags: ['Astro', 'Playwright', 'Svelte'],
post_type: 'article',
image: '/review-covers/hero-dreams-in-tired-life.svg',
images: [],
pinned: false,
status: 'published',
visibility: 'public',
created_at: '2024-06-13T13:20:00.000Z',
updated_at: '2024-06-13T13:20:00.000Z',
paragraphs: [
'友链系统看起来简单,但实际上同时涉及前台表单、后台审核、状态变更和分组展示。',
'把这条链路纳入 Playwright 回归后,能很快发现表单字段映射和审核按钮失效的问题。',
],
},
{
title: '值班手册:备份与恢复演练',
slug: 'ops-backup-runbook',
description: 'Termi 备份导出、导入、媒体清单和恢复演练的操作要点。',
category: '运维值班',
tags: ['Docker', 'AI', 'CI'],
post_type: 'tweet',
image: '/review-covers/black-myth-wukong.svg',
images: [],
pinned: false,
status: 'published',
visibility: 'public',
created_at: '2024-03-03T09:10:00.000Z',
updated_at: '2024-03-03T09:10:00.000Z',
paragraphs: [
'备份不是把 JSON 导出来就结束,恢复演练必须能证明分类、标签、评测、媒体与 Markdown 都能被重新装回。',
'我们把这篇值班手册作为 tweet 类型内容保留,用来验证前台类型过滤与后台导出逻辑。',
],
},
{
title: '私有草稿:季度回顾',
slug: 'private-quarterly-retrospective',
description: '仅后台可见的私有草稿,用于验证预览、保存和独立工作台。',
category: '测试体系',
tags: ['AI', 'Playwright'],
post_type: 'article',
image: '/review-covers/the-long-season.svg',
images: [],
pinned: false,
status: 'draft',
visibility: 'private',
created_at: '2026-03-30T11:00:00.000Z',
updated_at: '2026-03-30T11:00:00.000Z',
paragraphs: [
'这是一个私有草稿,只应出现在后台编辑器与独立预览窗口中。',
'用它可以验证 include_private、preview 和 Markdown 更新链路。',
],
},
{
title: '计划中的站点改版路线图',
slug: 'scheduled-site-roadmap',
description: '一篇已经排期但尚未到发布时间的路线图文章,用于校验后台状态统计。',
category: '测试体系',
tags: ['CI', 'Astro'],
post_type: 'article',
image: '/review-covers/placed-within.svg',
images: [],
pinned: false,
status: 'scheduled',
visibility: 'public',
publish_at: '2026-04-15T03:00:00.000Z',
unpublish_at: null,
created_at: '2026-04-01T02:00:00.000Z',
updated_at: '2026-04-01T02:00:00.000Z',
paragraphs: [
'这篇文章用于测试定时发布和后台概览统计,不应该出现在前台公开列表里。',
'当发布时间未到时,文章仍可在后台独立预览与对比。',
],
},
]
function iso(offsetMinutes = 0) {
return new Date(BASE_TS + offsetMinutes * 60_000).toISOString()
}
function json(res, status, payload, headers = {}) {
res.writeHead(status, {
'content-type': CONTENT_TYPES.json,
'cache-control': 'no-store',
...headers,
})
res.end(JSON.stringify(payload))
}
function text(res, status, body, headers = {}) {
res.writeHead(status, {
'content-type': CONTENT_TYPES.text,
'cache-control': 'no-store',
...headers,
})
res.end(body)
}
function reflectCors(req, res) {
const origin = req.headers.origin
if (!origin) {
return
}
if ([FRONTEND_ORIGIN, ADMIN_ORIGIN].includes(origin)) {
res.setHeader('access-control-allow-origin', origin)
res.setHeader('vary', 'Origin')
res.setHeader('access-control-allow-credentials', 'true')
res.setHeader(
'access-control-allow-headers',
'Content-Type, Authorization, X-Requested-With',
)
res.setHeader(
'access-control-allow-methods',
'GET, POST, PUT, PATCH, DELETE, OPTIONS',
)
}
}
function readRequestBody(req) {
return new Promise((resolve, reject) => {
const chunks = []
req.on('data', (chunk) => chunks.push(chunk))
req.on('end', () => resolve(Buffer.concat(chunks)))
req.on('error', reject)
})
}
function safeJsonParse(raw, fallback = {}) {
try {
return JSON.parse(raw)
} catch {
return fallback
}
}
function parseMultipartBody(buffer, contentType) {
const boundaryMatch = contentType.match(/boundary=(?:"([^"]+)"|([^;]+))/i)
const boundary = boundaryMatch?.[1] || boundaryMatch?.[2]
if (!boundary) {
return { fields: {}, files: [] }
}
const source = buffer.toString('latin1')
const parts = source.split(`--${boundary}`)
const fields = {}
const files = []
for (const part of parts) {
const trimmed = part.trim()
if (!trimmed || trimmed === '--') {
continue
}
const [rawHeaders, ...bodyChunks] = part.split('\r\n\r\n')
if (!rawHeaders || !bodyChunks.length) {
continue
}
const body = bodyChunks.join('\r\n\r\n').replace(/\r\n$/, '')
const headerLines = rawHeaders
.split('\r\n')
.map((line) => line.trim())
.filter(Boolean)
const disposition = headerLines.find((line) =>
line.toLowerCase().startsWith('content-disposition:'),
)
if (!disposition) {
continue
}
const nameMatch = disposition.match(/name="([^"]+)"/i)
const filenameMatch = disposition.match(/filename="([^"]*)"/i)
const fieldName = nameMatch?.[1]
if (!fieldName) {
continue
}
const contentTypeLine = headerLines.find((line) =>
line.toLowerCase().startsWith('content-type:'),
)
const fileName = filenameMatch?.[1]
if (fileName !== undefined) {
files.push({
fieldName,
filename: fileName,
contentType: contentTypeLine?.split(':')[1]?.trim() || 'application/octet-stream',
size: Buffer.from(body, 'latin1').length,
text: Buffer.from(body, 'latin1').toString('utf8'),
})
continue
}
if (fieldName in fields) {
const current = fields[fieldName]
if (Array.isArray(current)) {
current.push(body)
} else {
fields[fieldName] = [current, body]
}
} else {
fields[fieldName] = body
}
}
return { fields, files }
}
async function parseRequest(req) {
const body = await readRequestBody(req)
const contentType = String(req.headers['content-type'] || '')
if (!body.length) {
return { body, json: {}, fields: {}, files: [] }
}
if (contentType.includes('application/json')) {
return { body, json: safeJsonParse(body.toString('utf8'), {}), fields: {}, files: [] }
}
if (contentType.includes('multipart/form-data')) {
const parsed = parseMultipartBody(body, contentType)
return { body, json: {}, ...parsed }
}
return { body, json: {}, fields: {}, files: [] }
}
function getCookies(req) {
return String(req.headers.cookie || '')
.split(';')
.map((item) => item.trim())
.filter(Boolean)
.reduce((acc, item) => {
const index = item.indexOf('=')
if (index === -1) {
return acc
}
acc[item.slice(0, index)] = item.slice(index + 1)
return acc
}, {})
}
function isAuthenticated(req) {
return getCookies(req)[SESSION_COOKIE] === SESSION_VALUE
}
function ensureAdmin(req, res) {
if (isAuthenticated(req)) {
return true
}
json(res, 401, {
error: 'unauthorized',
description: '当前未登录后台。',
})
return false
}
function slugify(value) {
return String(value || '')
.trim()
.toLowerCase()
.replace(/[^a-z0-9\u4e00-\u9fa5]+/g, '-')
.replace(/^-+|-+$/g, '')
}
function normalizeText(value) {
return String(value || '').trim()
}
function toBoolean(value, fallback = false) {
if (value === undefined || value === null || value === '') {
return fallback
}
if (typeof value === 'boolean') {
return value
}
return ['1', 'true', 'yes', 'on'].includes(String(value).trim().toLowerCase())
}
function parseJsonArray(value) {
if (Array.isArray(value)) {
return value
}
if (!value) {
return []
}
if (typeof value === 'string') {
try {
const parsed = JSON.parse(value)
return Array.isArray(parsed) ? parsed : []
} catch {
return value
.split(',')
.map((item) => item.trim())
.filter(Boolean)
}
}
return []
}
function buildMarkdown(title, paragraphs) {
return `# ${title}\n\n${paragraphs.join('\n\n')}\n`
}
function normalizeParagraphText(textValue) {
return textValue.replace(/\s+/g, ' ').trim().toLowerCase()
}
function fnv1aHash(value) {
let hash = 0x811c9dc5
for (let index = 0; index < value.length; index += 1) {
hash ^= value.charCodeAt(index)
hash = Math.imul(hash, 0x01000193)
}
return (hash >>> 0).toString(16).padStart(8, '0')
}
function extractParagraphDescriptors(markdown) {
const occurrences = new Map()
const content = String(markdown || '')
.replace(/^#\s+.+\n+/, '')
.split(/\n{2,}/)
.map((block) => block.trim())
.filter((block) => block && !block.startsWith('#') && !block.startsWith('!['))
return content.map((paragraph) => {
const normalized = normalizeParagraphText(paragraph)
const hash = fnv1aHash(normalized)
const occurrence = (occurrences.get(hash) || 0) + 1
occurrences.set(hash, occurrence)
return {
key: `p-${hash}-${occurrence}`,
excerpt: paragraph.length <= 120 ? paragraph : `${paragraph.slice(0, 120).trimEnd()}...`,
text: paragraph,
}
})
}
function clone(value) {
return JSON.parse(JSON.stringify(value))
}
function createSessionResponse(authenticated) {
return {
authenticated,
username: authenticated ? VALID_LOGIN.username : null,
email: authenticated ? 'admin@termi.test' : null,
auth_source: authenticated ? 'mock-local' : null,
auth_provider: authenticated ? 'mock-session' : null,
groups: authenticated ? ['admin'] : [],
proxy_auth_enabled: false,
local_login_enabled: true,
can_logout: authenticated,
}
}
const NOW_ISO = iso(0)
const reviewCatalog = [
{
title: '《漫长的季节》',
review_type: 'anime',
rating: 5,
review_date: '2026-03-21',
status: 'completed',
description: '把东北工业景观、家庭裂痕和悬疑节奏揉进一起,后劲非常强。',
tags: ['悬疑', '现实主义', '年度最佳'],
cover: '/review-covers/the-long-season.svg',
link_url: 'https://example.invalid/reviews/the-long-season',
},
{
title: '《黑神话:悟空》',
review_type: 'game',
rating: 4,
review_date: '2026-03-18',
status: 'completed',
description: '美术和动作系统都足够能打,流程细节还有继续打磨空间。',
tags: ['动作', '国产', '年度观察'],
cover: '/review-covers/black-myth-wukong.svg',
link_url: 'https://example.invalid/reviews/black-myth-wukong',
},
{
title: '《置身事内》',
review_type: 'book',
rating: 5,
review_date: '2026-02-02',
status: 'completed',
description: '适合把财政、土地与地方治理如何互相牵引一次性理顺。',
tags: ['财政', '中国', '社会观察'],
cover: '/review-covers/placed-within.svg',
link_url: '/articles/playwright-regression-workflow',
},
{
title: '《宇宙探索编辑部》',
review_type: 'movie',
rating: 4,
review_date: '2026-01-12',
status: 'in-progress',
description: '荒诞感和理想主义都很讨喜,适合放进终端风格站点做海报实验。',
tags: ['电影', '公路片', '荒诞'],
cover: '/review-covers/journey-to-the-west-editorial.svg',
link_url: '',
},
]
const friendLinkCatalog = [
{
site_name: 'InitCool Docs',
site_url: 'https://docs.init.cool',
description: '收纳部署、值班与研发工具链的工程手册站。',
category: 'tech',
status: 'approved',
},
{
site_name: 'Svelte Terminal Lab',
site_url: 'https://svelte-terminal.example',
description: '记录 Svelte、Astro 与终端式交互实验。',
category: 'design',
status: 'approved',
},
{
site_name: 'Pending Link Review',
site_url: 'https://pending-link.example',
description: '待后台审核的新友链申请。',
category: 'other',
status: 'pending',
},
]
const mediaCatalog = [
{
key: 'covers/the-long-season.svg',
title: '漫长的季节封面',
alt_text: '终端风格的蓝色封面海报',
caption: '首页推荐位封面',
tags: ['cover', 'anime'],
notes: '默认 mock 素材库',
size_bytes: 1824,
},
{
key: 'posts/playwright-workflow.svg',
title: 'Playwright 工作流配图',
alt_text: '回归测试流程图',
caption: 'CI 与 smoke server 关系图',
tags: ['diagram', 'playwright'],
notes: '用于文章配图',
size_bytes: 2048,
},
]
function createSiteSettings() {
return {
id: 1,
site_name: 'InitCool',
site_short_name: 'Termi',
site_url: FRONTEND_ORIGIN,
site_title: 'InitCool - 终端风格的内容平台',
site_description: '一个基于终端美学的个人内容站,记录代码、设计和生活。',
hero_title: '欢迎来到我的极客终端博客',
hero_subtitle: '这里记录技术、代码和内容运营的持续迭代。',
owner_name: 'InitCool',
owner_title: 'Rust / Go / Frontend Builder',
owner_bio: '偏好用最小系统把内容、测试、值班和自动化串起来。',
owner_avatar_url: null,
social_github: 'https://github.com/initcool',
social_twitter: '',
social_email: 'mailto:initcool@example.com',
location: 'Hong Kong',
tech_stack: ['Astro', 'Svelte', 'React', 'Rust', 'Playwright', 'Docker'],
music_playlist: [
{
title: '山中来信',
artist: 'InitCool Radio',
album: '站点默认歌单',
url: 'https://www.soundhelix.com/examples/mp3/SoundHelix-Song-1.mp3',
cover_image_url: `${MOCK_ORIGIN}/media-files/playlist-1.svg`,
accent_color: '#2f6b5f',
description: '适合文章阅读时循环播放的轻氛围曲。',
},
],
ai_enabled: true,
paragraph_comments_enabled: true,
comment_verification_mode: 'captcha',
comment_turnstile_enabled: false,
subscription_verification_mode: 'off',
subscription_turnstile_enabled: false,
web_push_enabled: false,
turnstile_site_key: null,
turnstile_secret_key: null,
web_push_vapid_public_key: null,
web_push_vapid_private_key: null,
web_push_vapid_subject: null,
ai_provider: 'mock-openai',
ai_api_base: 'https://api.mock.invalid/v1',
ai_api_key: 'mock-key',
ai_chat_model: 'gpt-mock-4.1',
ai_image_provider: 'mock-images',
ai_image_api_base: 'https://images.mock.invalid/v1',
ai_image_api_key: 'mock-image-key',
ai_image_model: 'mock-image-1',
ai_providers: [
{
id: 'default',
name: 'Mock Provider',
provider: 'openai',
api_base: 'https://api.mock.invalid/v1',
api_key: 'mock-key',
chat_model: 'gpt-mock-4.1',
image_model: 'mock-image-1',
},
],
ai_active_provider_id: 'default',
ai_embedding_model: 'text-embedding-mock-3',
ai_system_prompt: '你是 Termi 的 mock AI 助手。',
ai_top_k: 6,
ai_chunk_size: 800,
ai_last_indexed_at: iso(-90),
ai_chunks_count: 128,
ai_local_embedding: 'disabled',
media_storage_provider: 'mock-r2',
media_r2_account_id: 'mock-account',
media_r2_bucket: 'termi-playwright',
media_r2_public_base_url: `${MOCK_ORIGIN}/media`,
media_r2_access_key_id: 'mock-access-key',
media_r2_secret_access_key: 'mock-secret',
seo_default_og_image: `${MOCK_ORIGIN}/media-files/default-og.svg`,
seo_default_twitter_handle: '@initcool',
notification_webhook_url: 'https://notify.mock.invalid/termi',
notification_channel_type: 'webhook',
notification_comment_enabled: true,
notification_friend_link_enabled: true,
subscription_popup_enabled: true,
subscription_popup_title: '订阅更新',
subscription_popup_description: '有新文章、周报和友链通知时,通过邮件第一时间收到提醒。',
subscription_popup_delay_seconds: 2,
search_synonyms: ['playwright,e2e,regression', 'ai,ask,assistant'],
}
}
function parseHeadingAndDescription(markdown, fallbackTitle = 'Untitled') {
const normalized = String(markdown || '').replace(/\r\n/g, '\n').trim()
const titleMatch = normalized.match(/^#\s+(.+)$/m)
const bodyText = normalized
.replace(/^#\s+.+\n+/, '')
.split(/\n{2,}/)
.map((item) => item.trim())
.find(Boolean)
return {
title: normalizeText(titleMatch?.[1] || fallbackTitle) || fallbackTitle,
description: normalizeText(bodyText || ''),
}
}
function makePostRecord(entry, id) {
const markdown = buildMarkdown(entry.title, entry.paragraphs)
const { title, description } = parseHeadingAndDescription(markdown, entry.title)
return {
id,
title,
slug: entry.slug,
description,
content: markdown,
category: entry.category,
tags: [...entry.tags],
post_type: entry.post_type,
image: entry.image || null,
images: entry.images?.length ? [...entry.images] : [],
pinned: Boolean(entry.pinned),
status: entry.status || 'published',
visibility: entry.visibility || 'public',
publish_at: entry.publish_at || null,
unpublish_at: entry.unpublish_at || null,
canonical_url: null,
noindex: false,
og_image: null,
redirect_from: [],
redirect_to: null,
created_at: entry.created_at || NOW_ISO,
updated_at: entry.updated_at || entry.created_at || NOW_ISO,
}
}
function buildReviewRecord(entry, id) {
return {
id,
title: entry.title,
review_type: entry.review_type,
rating: entry.rating,
review_date: entry.review_date,
status: entry.status,
description: entry.description,
tags: JSON.stringify(entry.tags || []),
cover: entry.cover || '',
link_url: entry.link_url || null,
created_at: iso(-id * 15),
updated_at: iso(-id * 10),
}
}
function isPublicReviewVisible(review) {
return ['published', 'completed', 'done'].includes(
normalizeText(review?.status).toLowerCase(),
)
}
function createSubscriptionRecord(id, overrides = {}) {
return {
created_at: iso(-id * 6),
updated_at: iso(-id * 5),
id,
channel_type: 'email',
target: `user${id}@example.com`,
display_name: `订阅用户 ${id}`,
status: 'active',
filters: {
event_types: ['post.published', 'digest.weekly'],
categories: ['前端工程'],
tags: ['Playwright'],
},
metadata: {
source: 'seed',
},
secret: null,
notes: 'seed subscription',
confirm_token: `confirm-token-${id}`,
manage_token: `manage-token-${id}`,
verified_at: iso(-id * 4),
last_notified_at: iso(-id * 2),
failure_count: 0,
last_delivery_status: 'delivered',
...overrides,
}
}
function createMediaRecord(item, index) {
const url = `${MOCK_ORIGIN}/media/${encodeURIComponent(item.key)}`
return {
key: item.key,
url,
size_bytes: item.size_bytes,
last_modified: iso(-index * 7),
title: item.title,
alt_text: item.alt_text,
caption: item.caption,
tags: [...item.tags],
notes: item.notes,
body: `mock media body for ${item.key}`,
content_type: item.key.endsWith('.svg') ? CONTENT_TYPES.svg : CONTENT_TYPES.text,
}
}
function createInitialState() {
const site_settings = createSiteSettings()
const categories = categoryCatalog.map((item, index) => ({
id: index + 1,
name: item.name,
slug: item.slug,
count: 0,
description: item.description,
cover_image: item.cover_image,
accent_color: item.accent_color,
seo_title: item.seo_title,
seo_description: item.seo_description,
created_at: iso(-(index + 1) * 20),
updated_at: iso(-(index + 1) * 10),
}))
const tags = tagCatalog.map((item, index) => ({
id: index + 1,
name: item.name,
slug: item.slug,
count: 0,
description: item.description,
cover_image: item.cover_image,
accent_color: item.accent_color,
seo_title: item.seo_title,
seo_description: item.seo_description,
created_at: iso(-(index + 1) * 18),
updated_at: iso(-(index + 1) * 8),
}))
const posts = postCatalog.map((item, index) => makePostRecord(item, index + 1))
const reviews = reviewCatalog.map((item, index) => buildReviewRecord(item, index + 1))
const friend_links = friendLinkCatalog.map((item, index) => ({
id: index + 1,
site_name: item.site_name,
site_url: item.site_url,
avatar_url: null,
description: item.description,
category: item.category,
status: item.status,
created_at: iso(-(index + 1) * 12),
updated_at: iso(-(index + 1) * 6),
}))
const subscriptions = [
createSubscriptionRecord(1, {
target: 'watcher@example.com',
display_name: '产品订阅',
filters: {
event_types: ['post.published', 'digest.weekly'],
categories: ['测试体系'],
tags: ['Playwright', 'CI'],
},
}),
createSubscriptionRecord(2, {
target: 'ops@example.com',
display_name: '值班通知',
filters: {
event_types: ['digest.monthly', 'friend_link.created'],
categories: ['运维值班'],
tags: ['Docker'],
},
}),
]
const media = mediaCatalog.map((item, index) => createMediaRecord(item, index + 1))
const articlePost = posts.find((item) => item.slug === 'astro-terminal-blog') || posts[0]
const paragraph = extractParagraphDescriptors(articlePost.content)[0]
const comments = [
{
id: 1,
post_id: String(articlePost.id),
post_slug: articlePost.slug,
author: 'Alice',
email: 'alice@example.com',
avatar: null,
ip_address: '10.0.0.8',
user_agent: 'MockBrowser/1.0',
referer: `${FRONTEND_ORIGIN}/articles/${articlePost.slug}`,
content: '首页筛选和终端风格结合得很好,想看更多实现细节。',
reply_to: null,
reply_to_comment_id: null,
scope: 'article',
paragraph_key: null,
paragraph_excerpt: null,
approved: true,
created_at: iso(-40),
updated_at: iso(-40),
},
{
id: 2,
post_id: String(articlePost.id),
post_slug: articlePost.slug,
author: 'Bob',
email: 'bob@example.com',
avatar: null,
ip_address: '10.0.0.9',
user_agent: 'MockBrowser/1.0',
referer: `${FRONTEND_ORIGIN}/articles/${articlePost.slug}`,
content: '这里的 URL 同步策略很实用。',
reply_to: null,
reply_to_comment_id: null,
scope: 'paragraph',
paragraph_key: paragraph?.key || null,
paragraph_excerpt: paragraph?.excerpt || null,
approved: true,
created_at: iso(-35),
updated_at: iso(-35),
},
{
id: 3,
post_id: String(articlePost.id),
post_slug: articlePost.slug,
author: 'Carol',
email: 'carol@example.com',
avatar: null,
ip_address: '10.0.0.10',
user_agent: 'MockBrowser/1.0',
referer: `${FRONTEND_ORIGIN}/articles/${articlePost.slug}`,
content: '这条评论默认待审核,用来验证后台审核流程。',
reply_to: null,
reply_to_comment_id: null,
scope: 'article',
paragraph_key: null,
paragraph_excerpt: null,
approved: false,
created_at: iso(-25),
updated_at: iso(-25),
},
]
return {
site_settings,
categories,
tags,
posts,
reviews,
friend_links,
subscriptions,
media,
comments,
audit_logs: [],
post_revisions: [],
comment_blacklist: [
{
id: 1,
matcher_type: 'email',
matcher_value: 'blocked@example.com',
reason: 'seed rule',
active: true,
expires_at: null,
created_at: iso(-12),
updated_at: iso(-12),
effective: true,
},
],
comment_persona_logs: [],
deliveries: [],
worker_jobs: [],
ai_events: [],
content_events: [],
captcha_tokens: new Map(),
next_ids: {
post: posts.length + 1,
category: categories.length + 1,
tag: tags.length + 1,
comment: comments.length + 1,
friend_link: friend_links.length + 1,
review: reviews.length + 1,
subscription: subscriptions.length + 1,
audit: 1,
revision: 1,
delivery: 1,
worker_job: 1,
blacklist: 2,
persona_log: 1,
ai_event: 1,
},
}
}
let state = createInitialState()
function nextId(bucket) {
const value = state.next_ids[bucket]
state.next_ids[bucket] += 1
return value
}
function isPostVisible(post, options = {}) {
const includePrivate = toBoolean(options.include_private, false)
const preview = toBoolean(options.preview, false)
const includeRedirects = toBoolean(options.include_redirects, false)
const now = BASE_TS
if (!post) return false
if (!includeRedirects && post.redirect_to) return false
if (preview) return true
if (post.status !== 'published') return false
if (post.visibility === 'private' && !includePrivate) return false
if (post.visibility === 'private') return false
if (post.publish_at && Date.parse(post.publish_at) > now) return false
if (post.unpublish_at && Date.parse(post.unpublish_at) <= now) return false
return true
}
function recalculateTaxonomyCounts() {
const categoryCounts = new Map()
const tagCounts = new Map()
for (const post of state.posts.filter((item) => isPostVisible(item))) {
if (post.category) {
categoryCounts.set(post.category, (categoryCounts.get(post.category) || 0) + 1)
}
for (const tag of post.tags || []) {
tagCounts.set(tag, (tagCounts.get(tag) || 0) + 1)
}
}
for (const category of state.categories) {
category.count = categoryCounts.get(category.name) || 0
}
for (const tag of state.tags) {
tag.count = tagCounts.get(tag.name) || 0
}
}
function addAuditLog(action, target_type, target_label, target_id = null, metadata = null) {
state.audit_logs.unshift({
id: nextId('audit'),
created_at: iso(-1),
updated_at: iso(-1),
actor_username: VALID_LOGIN.username,
actor_email: 'admin@termi.test',
actor_source: 'mock-admin',
action,
target_type,
target_id: target_id === null ? null : String(target_id),
target_label: target_label || null,
metadata,
})
}
function addPostRevision(post, operation = 'update', reason = null) {
state.post_revisions.unshift({
id: nextId('revision'),
post_slug: post.slug,
post_title: post.title,
operation,
revision_reason: reason,
actor_username: VALID_LOGIN.username,
actor_email: 'admin@termi.test',
actor_source: 'mock-admin',
created_at: iso(-1),
has_markdown: true,
metadata: { category: post.category, tags: post.tags, status: post.status },
markdown: post.content,
snapshot: clone(post),
})
}
function createNotificationDelivery({
subscription,
eventType,
status = 'queued',
responseText = 'queued by mock server',
payload = null,
}) {
return {
created_at: iso(-1),
updated_at: iso(-1),
id: nextId('delivery'),
subscription_id: subscription?.id ?? null,
channel_type: subscription?.channel_type ?? 'email',
target: subscription?.target ?? 'unknown@example.com',
event_type: eventType,
status,
provider: 'mock-delivery',
response_text: responseText,
payload,
attempts_count: status === 'queued' ? 0 : 1,
next_retry_at: status === 'retry_pending' ? iso(10) : null,
last_attempt_at: status === 'queued' ? null : iso(-1),
delivered_at: status === 'sent' ? iso(-1) : null,
}
}
function createWorkerJob({
job_kind = 'worker',
worker_name,
display_name = null,
status = 'queued',
queue_name = null,
requested_by = VALID_LOGIN.username,
requested_source = 'mock-admin',
trigger_mode = 'manual',
payload = null,
result = null,
error_text = null,
tags = [],
related_entity_type = null,
related_entity_id = null,
parent_job_id = null,
attempts_count = status === 'queued' ? 0 : 1,
max_attempts = 1,
cancel_requested = false,
queued_at = iso(-1),
started_at = status === 'queued' ? null : iso(-1),
finished_at = status === 'queued' || status === 'running' ? null : iso(-1),
} = {}) {
const record = {
created_at: iso(-1),
updated_at: iso(-1),
id: nextId('worker_job'),
parent_job_id,
job_kind,
worker_name,
display_name,
status,
queue_name,
requested_by,
requested_source,
trigger_mode,
payload,
result,
error_text,
tags: [...tags],
related_entity_type,
related_entity_id: related_entity_id === null ? null : String(related_entity_id),
attempts_count,
max_attempts,
cancel_requested,
queued_at,
started_at,
finished_at,
}
state.worker_jobs.unshift(record)
return record
}
function canCancelWorkerJob(job) {
return !job.cancel_requested && (job.status === 'queued' || job.status === 'running')
}
function canRetryWorkerJob(job) {
return ['failed', 'cancelled', 'succeeded'].includes(job.status)
}
function normalizeWorkerJob(job) {
return {
...clone(job),
can_cancel: canCancelWorkerJob(job),
can_retry: canRetryWorkerJob(job),
}
}
function buildWorkerOverview() {
const jobs = state.worker_jobs
const counters = {
total_jobs: jobs.length,
queued: 0,
running: 0,
succeeded: 0,
failed: 0,
cancelled: 0,
active_jobs: 0,
}
const grouped = new Map()
const catalog = [
['worker.download_media', 'worker', '远程媒体下载', '抓取远程图片 / PDF 到媒体库,并回写媒体元数据。', 'media'],
['worker.notification_delivery', 'worker', '通知投递', '执行订阅通知、测试通知与 digest 投递。', 'notifications'],
['task.retry_deliveries', 'task', '重试待投递通知', '扫描 retry_pending 的通知记录并重新入队。', 'maintenance'],
['task.send_weekly_digest', 'task', '发送周报', '根据近期内容生成周报,并为活跃订阅目标入队。', 'digests'],
['task.send_monthly_digest', 'task', '发送月报', '根据近期内容生成月报,并为活跃订阅目标入队。', 'digests'],
].map(([worker_name, job_kind, label, description, queue_name]) => ({
worker_name,
job_kind,
label,
description,
queue_name,
supports_cancel: true,
supports_retry: true,
}))
for (const job of jobs) {
if (Object.hasOwn(counters, job.status)) {
counters[job.status] += 1
}
const existing =
grouped.get(job.worker_name) ||
{
worker_name: job.worker_name,
job_kind: job.job_kind,
label: catalog.find((item) => item.worker_name === job.worker_name)?.label || job.worker_name,
queued: 0,
running: 0,
succeeded: 0,
failed: 0,
cancelled: 0,
last_job_at: null,
}
if (Object.hasOwn(existing, job.status)) {
existing[job.status] += 1
}
existing.last_job_at ||= job.created_at
grouped.set(job.worker_name, existing)
}
counters.active_jobs = counters.queued + counters.running
return {
...counters,
worker_stats: Array.from(grouped.values()),
catalog,
}
}
function enqueueNotificationDeliveryJob(delivery, options = {}) {
return createWorkerJob({
job_kind: 'worker',
worker_name: 'worker.notification_delivery',
display_name: `${delivery.event_type} → ${delivery.target}`,
status: options.status || 'succeeded',
queue_name: 'notifications',
payload: {
delivery_id: delivery.id,
job_id: null,
},
result: options.status === 'failed' ? null : { delivery_id: delivery.id },
error_text: options.status === 'failed' ? options.error_text || 'mock delivery failed' : null,
tags: ['notifications', 'delivery'],
related_entity_type: 'notification_delivery',
related_entity_id: delivery.id,
parent_job_id: options.parent_job_id ?? null,
})
}
function sanitizeFilename(value, fallback = 'upload.bin') {
const normalized = String(value || '').split(/[\\/]/).pop() || fallback
return normalized.replace(/[^a-zA-Z0-9._-]+/g, '-').replace(/^-+|-+$/g, '') || fallback
}
function makeMediaRecordFromUpload(key, file) {
return {
key,
url: `${MOCK_ORIGIN}/media/${encodeURIComponent(key)}`,
size_bytes: file?.size ?? Buffer.byteLength(file?.body || ''),
last_modified: iso(-1),
title: sanitizeFilename(file?.filename || key),
alt_text: null,
caption: null,
tags: [],
notes: 'uploaded by playwright mock',
body: file?.body?.toString('utf8') || `mock media body for ${key}`,
content_type: String(file?.contentType || '').includes('svg')
? CONTENT_TYPES.svg
: String(file?.contentType || '').includes('image/')
? String(file.contentType)
: CONTENT_TYPES.text,
}
}
function queueDownloadWorkerJob(payload, mediaRecord) {
return createWorkerJob({
job_kind: 'worker',
worker_name: 'worker.download_media',
display_name: normalizeText(payload.title) || `download ${normalizeText(payload.source_url)}`,
status: 'succeeded',
queue_name: 'media',
payload: {
source_url: payload.source_url,
prefix: payload.prefix || null,
title: payload.title || null,
alt_text: payload.alt_text || null,
caption: payload.caption || null,
tags: Array.isArray(payload.tags) ? payload.tags : [],
notes: payload.notes || null,
job_id: null,
},
result: mediaRecord
? {
key: mediaRecord.key,
url: mediaRecord.url,
size_bytes: mediaRecord.size_bytes,
source_url: normalizeText(payload.source_url),
content_type: mediaRecord.content_type,
}
: null,
tags: ['media', 'download'],
related_entity_type: 'media_download',
related_entity_id: normalizeText(payload.source_url),
})
}
function upsertPostFromPayload(current, payload) {
const next = current ? { ...current } : {}
const title = normalizeText(payload.title) || normalizeText(current?.title) || '未命名文章'
const slug = normalizeText(payload.slug) || normalizeText(current?.slug) || slugify(title)
const content = payload.content ?? current?.content ?? buildMarkdown(title, ['待补充内容。'])
const parsed = parseHeadingAndDescription(content, title)
next.title = title || parsed.title
next.slug = slug
next.description = normalizeText(payload.description) || parsed.description || current?.description || ''
next.content = content
next.category = normalizeText(payload.category) || null
next.tags = Array.isArray(payload.tags) ? payload.tags.filter(Boolean) : current?.tags || []
next.post_type = normalizeText(payload.post_type || payload.postType) || current?.post_type || 'article'
next.image = normalizeText(payload.image) || null
next.images = Array.isArray(payload.images) ? payload.images.filter(Boolean) : current?.images || []
next.pinned = Object.hasOwn(payload, 'pinned') ? Boolean(payload.pinned) : Boolean(current?.pinned)
next.status =
normalizeText(payload.status) ||
(Object.hasOwn(payload, 'published') ? (payload.published ? 'published' : 'draft') : '') ||
current?.status ||
'draft'
next.visibility = normalizeText(payload.visibility) || current?.visibility || 'public'
next.publish_at = normalizeText(payload.publish_at || payload.publishAt) || null
next.unpublish_at = normalizeText(payload.unpublish_at || payload.unpublishAt) || null
next.canonical_url = normalizeText(payload.canonical_url || payload.canonicalUrl) || null
next.noindex = Object.hasOwn(payload, 'noindex') ? Boolean(payload.noindex) : Boolean(current?.noindex)
next.og_image = normalizeText(payload.og_image || payload.ogImage) || null
next.redirect_from = Array.isArray(payload.redirect_from || payload.redirectFrom)
? (payload.redirect_from || payload.redirectFrom).filter(Boolean)
: current?.redirect_from || []
next.redirect_to = normalizeText(payload.redirect_to || payload.redirectTo) || null
next.created_at = current?.created_at || iso(-1)
next.updated_at = iso(-1)
return next
}
function makeTaxonomyRecord(bucket, payload, current = null) {
return {
id: current?.id ?? nextId(bucket),
name: normalizeText(payload.name) || current?.name || '',
slug: normalizeText(payload.slug) || slugify(payload.name || current?.name || ''),
count: current?.count ?? 0,
description: normalizeText(payload.description) || null,
cover_image: normalizeText(payload.cover_image || payload.coverImage) || null,
accent_color: normalizeText(payload.accent_color || payload.accentColor) || null,
seo_title: normalizeText(payload.seo_title || payload.seoTitle) || null,
seo_description: normalizeText(payload.seo_description || payload.seoDescription) || null,
created_at: current?.created_at || iso(-1),
updated_at: iso(-1),
}
}
function findRevisionById(id) {
return state.post_revisions.find((item) => item.id === id) || null
}
function resetState() {
state = createInitialState()
recalculateTaxonomyCounts()
addAuditLog('seed.bootstrap', 'workspace', 'playwright-smoke', 'seed', {
posts: state.posts.length,
reviews: state.reviews.length,
})
for (const post of state.posts.slice().reverse()) {
addPostRevision(post, 'seed', '初始 mock 数据')
}
}
resetState()
function normalizePostResponse(post) {
return {
created_at: post.created_at,
updated_at: post.updated_at,
id: post.id,
title: post.title,
slug: post.slug,
description: post.description,
content: post.content,
category: post.category,
tags: [...(post.tags || [])],
post_type: post.post_type,
image: post.image,
images: [...(post.images || [])],
pinned: post.pinned,
status: post.status,
visibility: post.visibility,
publish_at: post.publish_at,
unpublish_at: post.unpublish_at,
canonical_url: post.canonical_url,
noindex: post.noindex,
og_image: post.og_image,
redirect_from: [...(post.redirect_from || [])],
redirect_to: post.redirect_to,
}
}
function sortItems(items, sortBy = 'created_at', sortOrder = 'desc') {
const direction = String(sortOrder || 'desc').toLowerCase() === 'asc' ? 1 : -1
const key = sortBy || 'created_at'
return [...items].sort((left, right) => {
const leftValue = left[key] ?? ''
const rightValue = right[key] ?? ''
if (leftValue < rightValue) return -1 * direction
if (leftValue > rightValue) return 1 * direction
return (right.id || 0) - (left.id || 0)
})
}
function filterPostsForQuery(items, searchParams) {
const slug = normalizeText(searchParams.get('slug'))
const category = normalizeText(searchParams.get('category'))
const tag = normalizeText(searchParams.get('tag'))
const search = normalizeText(searchParams.get('search'))
const postType = normalizeText(searchParams.get('type'))
const status = normalizeText(searchParams.get('status'))
const visibility = normalizeText(searchParams.get('visibility'))
const pinned = searchParams.get('pinned')
return items.filter((post) => {
if (slug && post.slug !== slug) return false
if (category && normalizeText(post.category).toLowerCase() !== category.toLowerCase()) return false
if (tag && !(post.tags || []).some((item) => normalizeText(item).toLowerCase() === tag.toLowerCase())) {
return false
}
if (postType && normalizeText(post.post_type).toLowerCase() !== postType.toLowerCase()) return false
if (status && normalizeText(post.status).toLowerCase() !== status.toLowerCase()) return false
if (visibility && normalizeText(post.visibility).toLowerCase() !== visibility.toLowerCase()) return false
if (pinned !== null && String(post.pinned) !== pinned) return false
if (!search) return true
const haystack = [
post.title,
post.slug,
post.description,
post.content,
post.category,
...(post.tags || []),
]
.filter(Boolean)
.join('\n')
.toLowerCase()
return haystack.includes(search.toLowerCase())
})
}
function buildPagedResponse(items, searchParams, mapper = (value) => value) {
const page = Math.max(1, Number.parseInt(searchParams.get('page') || '1', 10) || 1)
const pageSize = Math.max(1, Number.parseInt(searchParams.get('page_size') || '10', 10) || 10)
const sortBy = searchParams.get('sort_by') || 'created_at'
const sortOrder = searchParams.get('sort_order') || 'desc'
const sorted = sortItems(items, sortBy, sortOrder)
const total = sorted.length
const totalPages = Math.max(1, Math.ceil(total / pageSize))
const safePage = Math.min(page, totalPages)
const start = (safePage - 1) * pageSize
return {
items: sorted.slice(start, start + pageSize).map(mapper),
page: safePage,
page_size: pageSize,
total,
total_pages: totalPages,
sort_by: sortBy,
sort_order: sortOrder,
}
}
function makeSearchResult(post, rank) {
return {
id: post.id,
title: post.title,
slug: post.slug,
description: post.description,
content: post.content,
category: post.category,
tags: [...(post.tags || [])],
post_type: post.post_type,
image: post.image,
pinned: post.pinned,
created_at: post.created_at,
updated_at: post.updated_at,
rank,
}
}
function searchPosts(query, filters = {}) {
const normalizedQuery = normalizeText(query).toLowerCase()
let items = state.posts.filter((post) => isPostVisible(post))
if (filters.category) {
items = items.filter((post) => normalizeText(post.category).toLowerCase() === normalizeText(filters.category).toLowerCase())
}
if (filters.tag) {
items = items.filter((post) =>
(post.tags || []).some((item) => normalizeText(item).toLowerCase() === normalizeText(filters.tag).toLowerCase()),
)
}
if (filters.type) {
items = items.filter((post) => normalizeText(post.post_type).toLowerCase() === normalizeText(filters.type).toLowerCase())
}
return items
.map((post) => {
const haystack = [post.title, post.description, post.content, post.category, ...(post.tags || [])]
.filter(Boolean)
.join('\n')
.toLowerCase()
let score = 0
if (normalizedQuery && haystack.includes(normalizedQuery)) score += 20
if (post.title.toLowerCase().includes(normalizedQuery)) score += 30
if ((post.tags || []).some((tag) => tag.toLowerCase().includes(normalizedQuery))) score += 10
if (post.pinned) score += 5
return { post, score }
})
.filter((item) => item.score > 0)
.sort((left, right) => right.score - left.score || right.post.id - left.post.id)
.map((item) => makeSearchResult(item.post, item.score))
}
function issueCaptcha() {
const token = randomUUID()
state.captcha_tokens.set(token, { answer: '7', question: '3 + 4 = ?' })
return { token, question: '3 + 4 = ?', expires_in_seconds: 300 }
}
function verifyCaptcha(token, answer) {
const challenge = state.captcha_tokens.get(String(token || ''))
return Boolean(challenge) && normalizeText(answer) === String(challenge.answer)
}
function getCommentMatcherValue(comment, matcherType) {
if (matcherType === 'email') return normalizeText(comment.email).toLowerCase()
if (matcherType === 'user_agent') return normalizeText(comment.user_agent).toLowerCase()
return normalizeText(comment.ip_address).toLowerCase()
}
function isBlacklistedComment(comment) {
return state.comment_blacklist.some((item) => {
if (!item.active) return false
return getCommentMatcherValue(comment, item.matcher_type) === normalizeText(item.matcher_value).toLowerCase()
})
}
function createMockImageSvg(label, accent = '#14b8a6') {
const safeLabel = String(label || 'mock asset')
return ``
}
function sendSvg(res, label, accent) {
text(res, 200, createMockImageSvg(label, accent), {
'content-type': CONTENT_TYPES.svg,
'cache-control': 'public, max-age=60',
})
}
function writeEmpty(res, status = 204, headers = {}) {
res.writeHead(status, headers)
res.end()
}
function notFound(res, message = 'not found') {
json(res, 404, { error: 'not_found', description: message })
}
function badRequest(res, message) {
json(res, 400, { error: 'bad_request', description: message })
}
function latestDebugState() {
return {
site_settings: clone(state.site_settings),
categories: state.categories.map((item) => clone(item)),
tags: state.tags.map((item) => clone(item)),
posts: state.posts.map((item) => ({
id: item.id,
slug: item.slug,
title: item.title,
category: item.category,
tags: [...(item.tags || [])],
status: item.status,
visibility: item.visibility,
content: item.content,
updated_at: item.updated_at,
})),
reviews: state.reviews.map((item) => clone(item)),
subscriptions: state.subscriptions.map((item) => ({
id: item.id,
target: item.target,
display_name: item.display_name,
channel_type: item.channel_type,
confirm_token: item.confirm_token,
manage_token: item.manage_token,
status: item.status,
filters: clone(item.filters),
})),
deliveries: state.deliveries.map((item) => clone(item)),
worker_jobs: state.worker_jobs.map((item) => clone(item)),
media: state.media.map((item) => ({
key: item.key,
title: item.title,
alt_text: item.alt_text,
tags: [...item.tags],
url: item.url,
size_bytes: item.size_bytes,
})),
friend_links: state.friend_links.map((item) => ({
id: item.id,
site_name: item.site_name,
status: item.status,
})),
comments: state.comments.map((item) => ({
id: item.id,
post_slug: item.post_slug,
scope: item.scope,
approved: item.approved,
author: item.author,
})),
comment_blacklist: state.comment_blacklist.map((item) => clone(item)),
post_revisions: state.post_revisions.map((item) => ({
id: item.id,
post_slug: item.post_slug,
operation: item.operation,
revision_reason: item.revision_reason,
created_at: item.created_at,
})),
audit_logs: state.audit_logs.map((item) => clone(item)),
}
}
function getHomePayload() {
const visiblePosts = state.posts.filter((post) => isPostVisible(post))
const content_ranges = [
{
key: '24h',
label: '24h',
days: 1,
overview: { page_views: 18, read_completes: 4, avg_read_progress: 61, avg_read_duration_ms: 42000 },
popular_posts: [
{
slug: 'astro-terminal-blog',
title: 'Astro 终端博客信息架构实战',
page_views: 12,
read_completes: 3,
avg_progress_percent: 76,
avg_duration_ms: 51000,
},
],
},
{
key: '7d',
label: '7d',
days: 7,
overview: { page_views: 142, read_completes: 57, avg_read_progress: 74, avg_read_duration_ms: 58400 },
popular_posts: [
{
slug: 'playwright-regression-workflow',
title: 'Playwright 回归工作流设计',
page_views: 48,
read_completes: 25,
avg_progress_percent: 83,
avg_duration_ms: 61000,
},
{
slug: 'astro-terminal-blog',
title: 'Astro 终端博客信息架构实战',
page_views: 38,
read_completes: 18,
avg_progress_percent: 71,
avg_duration_ms: 54000,
},
],
},
{
key: '30d',
label: '30d',
days: 30,
overview: { page_views: 580, read_completes: 223, avg_read_progress: 69, avg_read_duration_ms: 50000 },
popular_posts: [
{
slug: 'playwright-regression-workflow',
title: 'Playwright 回归工作流设计',
page_views: 152,
read_completes: 79,
avg_progress_percent: 81,
avg_duration_ms: 63000,
},
{
slug: 'docker-rollout-checklist',
title: 'Docker 发布值班清单',
page_views: 118,
read_completes: 42,
avg_progress_percent: 64,
avg_duration_ms: 47000,
},
],
},
]
return {
site_settings: clone(state.site_settings),
posts: visiblePosts.map(normalizePostResponse),
tags: state.tags.map((item) => clone(item)),
friend_links: state.friend_links.map((item) => clone(item)),
categories: state.categories.map((item) => clone(item)),
content_overview: {
total_page_views: 1642,
page_views_last_24h: 18,
page_views_last_7d: 142,
total_read_completes: 628,
read_completes_last_7d: 57,
avg_read_progress_last_7d: 74,
avg_read_duration_ms_last_7d: 58400,
},
popular_posts: content_ranges[1].popular_posts,
content_ranges,
}
}
const server = createServer(async (req, res) => {
reflectCors(req, res)
if (req.method === 'OPTIONS') {
writeEmpty(res, 204)
return
}
const url = new URL(req.url || '/', MOCK_ORIGIN)
const { pathname, searchParams } = url
if (pathname === '/__playwright/health') {
json(res, 200, { ok: true })
return
}
if (pathname === '/__playwright/reset' && req.method === 'POST') {
resetState()
json(res, 200, { ok: true })
return
}
if (pathname === '/__playwright/state') {
json(res, 200, latestDebugState())
return
}
if (pathname.startsWith('/media-files/') || pathname.startsWith('/review-covers/') || pathname.startsWith('/generated/')) {
sendSvg(res, pathname.split('/').pop()?.replace(/\.(svg|png|jpg|jpeg)$/i, '') || 'asset')
return
}
if (pathname.startsWith('/media/')) {
const key = decodeURIComponent(pathname.replace('/media/', ''))
const media = state.media.find((item) => item.key === key)
if (!media) {
notFound(res, '媒体不存在。')
return
}
if (media.content_type === CONTENT_TYPES.svg) {
sendSvg(res, media.title || media.key, '#8b5cf6')
return
}
text(res, 200, media.body, { 'content-type': media.content_type })
return
}
if (pathname === '/api/admin/session' && req.method === 'GET') {
json(res, 200, createSessionResponse(isAuthenticated(req)))
return
}
if (pathname === '/api/admin/session/login' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
if (
normalizeText(payload.username) !== VALID_LOGIN.username ||
normalizeText(payload.password) !== VALID_LOGIN.password
) {
badRequest(res, '用户名或密码错误。')
return
}
json(res, 200, createSessionResponse(true), {
'set-cookie': `${SESSION_COOKIE}=${SESSION_VALUE}; Path=/; HttpOnly; SameSite=Lax`,
})
return
}
if (pathname === '/api/admin/session' && req.method === 'DELETE') {
json(res, 200, createSessionResponse(false), {
'set-cookie': `${SESSION_COOKIE}=; Path=/; Max-Age=0; HttpOnly; SameSite=Lax`,
})
return
}
if (pathname === '/api/site_settings' && req.method === 'GET') {
json(res, 200, clone(state.site_settings))
return
}
if (pathname === '/api/site_settings/home' && req.method === 'GET') {
json(res, 200, getHomePayload())
return
}
if (pathname === '/api/categories' && req.method === 'GET') {
json(res, 200, state.categories.map((item) => clone(item)))
return
}
if (pathname === '/api/tags' && req.method === 'GET') {
json(res, 200, state.tags.map((item) => clone(item)))
return
}
if (pathname === '/api/reviews' && req.method === 'GET') {
const items = isAuthenticated(req)
? state.reviews
: state.reviews.filter((item) => isPublicReviewVisible(item))
json(res, 200, items.map((item) => clone(item)))
return
}
if (pathname === '/api/reviews' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const title = normalizeText(payload.title)
if (!title) {
badRequest(res, '评测标题不能为空。')
return
}
const record = {
id: nextId('review'),
title,
review_type: normalizeText(payload.review_type) || 'book',
rating: Number(payload.rating) || 4,
review_date: normalizeText(payload.review_date) || '2026-04-01',
status: normalizeText(payload.status) || 'draft',
description: normalizeText(payload.description) || '',
tags: JSON.stringify(Array.isArray(payload.tags) ? payload.tags.filter(Boolean) : []),
cover: normalizeText(payload.cover) || '',
link_url: normalizeText(payload.link_url) || null,
created_at: iso(-1),
updated_at: iso(-1),
}
state.reviews.unshift(record)
addAuditLog('review.create', 'review', record.title, record.id)
json(res, 201, clone(record))
return
}
if (pathname.match(/^\/api\/reviews\/\d+$/) && req.method === 'GET') {
const id = Number(pathname.split('/').pop())
const review = state.reviews.find((item) => item.id === id)
if (!review || (!isAuthenticated(req) && !isPublicReviewVisible(review))) {
notFound(res, '评测不存在。')
return
}
json(res, 200, clone(review))
return
}
if (pathname.match(/^\/api\/reviews\/\d+$/) && req.method === 'PUT') {
const id = Number(pathname.split('/').pop())
const record = state.reviews.find((item) => item.id === id)
if (!record) {
notFound(res, '评测不存在。')
return
}
const { json: payload } = await parseRequest(req)
record.title = normalizeText(payload.title) || record.title
record.review_type = normalizeText(payload.review_type) || record.review_type
record.rating = Number(payload.rating) || record.rating
record.review_date = normalizeText(payload.review_date) || record.review_date
record.status = normalizeText(payload.status) || record.status
record.description = normalizeText(payload.description) || ''
record.tags = JSON.stringify(Array.isArray(payload.tags) ? payload.tags.filter(Boolean) : [])
record.cover = normalizeText(payload.cover) || ''
record.link_url = normalizeText(payload.link_url) || null
record.updated_at = iso(-1)
addAuditLog('review.update', 'review', record.title, record.id)
json(res, 200, clone(record))
return
}
if (pathname.match(/^\/api\/reviews\/\d+$/) && req.method === 'DELETE') {
const id = Number(pathname.split('/').pop())
const index = state.reviews.findIndex((item) => item.id === id)
if (index === -1) {
notFound(res, '评测不存在。')
return
}
const [removed] = state.reviews.splice(index, 1)
addAuditLog('review.delete', 'review', removed.title, removed.id)
writeEmpty(res, 204)
return
}
if (pathname === '/api/friend_links' && req.method === 'GET') {
let items = [...state.friend_links]
const status = normalizeText(searchParams.get('status'))
const category = normalizeText(searchParams.get('category'))
if (status) items = items.filter((item) => normalizeText(item.status).toLowerCase() === status.toLowerCase())
if (category) items = items.filter((item) => normalizeText(item.category).toLowerCase() === category.toLowerCase())
json(res, 200, items.map((item) => clone(item)))
return
}
if (pathname === '/api/friend_links' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const record = {
id: nextId('friend_link'),
site_name: normalizeText(payload.siteName || payload.site_name),
site_url: normalizeText(payload.siteUrl || payload.site_url),
avatar_url: null,
description: normalizeText(payload.description) || null,
category: normalizeText(payload.category) || 'other',
status: normalizeText(payload.status) || 'pending',
created_at: iso(-1),
updated_at: iso(-1),
}
if (!record.site_name || !record.site_url) {
badRequest(res, '站点名称和 URL 不能为空。')
return
}
state.friend_links.unshift(record)
addAuditLog('friend_link.create', 'friend_link', record.site_name, record.id, { status: record.status })
json(res, 201, clone(record))
return
}
if (pathname === '/api/comments/captcha' && req.method === 'GET') {
json(res, 200, issueCaptcha())
return
}
if (pathname === '/api/comments/paragraphs/summary' && req.method === 'GET') {
const postSlug = normalizeText(searchParams.get('post_slug'))
const counts = new Map()
for (const comment of state.comments) {
if (comment.post_slug !== postSlug || comment.scope !== 'paragraph' || !comment.approved) continue
if (!comment.paragraph_key) continue
counts.set(comment.paragraph_key, (counts.get(comment.paragraph_key) || 0) + 1)
}
json(res, 200, Array.from(counts.entries()).map(([paragraph_key, count]) => ({ paragraph_key, count })))
return
}
if (pathname === '/api/comments' && req.method === 'GET') {
let items = [...state.comments]
const postSlug = normalizeText(searchParams.get('post_slug'))
const scope = normalizeText(searchParams.get('scope'))
const paragraphKey = normalizeText(searchParams.get('paragraph_key'))
const approved = searchParams.get('approved')
if (postSlug) items = items.filter((item) => item.post_slug === postSlug)
if (scope) items = items.filter((item) => item.scope === scope)
if (paragraphKey) items = items.filter((item) => item.paragraph_key === paragraphKey)
if (approved !== null) items = items.filter((item) => String(Boolean(item.approved)) === approved)
json(res, 200, items.map((item) => clone(item)))
return
}
if (pathname === '/api/comments' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const post = state.posts.find((item) => item.slug === normalizeText(payload.postSlug))
if (!post) {
notFound(res, '文章不存在。')
return
}
if (state.site_settings.comment_verification_mode === 'captcha' && !verifyCaptcha(payload.captchaToken, payload.captchaAnswer)) {
badRequest(res, '验证码错误。')
return
}
const record = {
id: nextId('comment'),
post_id: String(post.id),
post_slug: post.slug,
author: normalizeText(payload.nickname) || '匿名访客',
email: normalizeText(payload.email) || null,
avatar: null,
ip_address: '10.0.0.88',
user_agent: String(req.headers['user-agent'] || 'Playwright'),
referer: String(req.headers.referer || ''),
content: normalizeText(payload.content),
reply_to: null,
reply_to_comment_id: payload.replyToCommentId === null || payload.replyToCommentId === undefined ? null : Number(payload.replyToCommentId),
scope: normalizeText(payload.scope) || 'article',
paragraph_key: normalizeText(payload.paragraphKey) || null,
paragraph_excerpt: normalizeText(payload.paragraphExcerpt) || null,
approved: false,
created_at: iso(-1),
updated_at: iso(-1),
}
if (!record.content) {
badRequest(res, '评论内容不能为空。')
return
}
if (isBlacklistedComment(record)) {
json(res, 403, { error: 'comment_blocked', description: '该评论命中了黑名单规则,已被拒绝。' })
return
}
state.comments.unshift(record)
addAuditLog('comment.create', 'comment', record.author, record.id, { scope: record.scope, approved: record.approved })
json(res, 201, clone(record))
return
}
if (pathname === '/api/posts' && req.method === 'GET') {
const candidates = state.posts.filter((post) =>
isPostVisible(post, {
include_private: searchParams.get('include_private'),
preview: searchParams.get('preview'),
include_redirects: searchParams.get('include_redirects'),
}),
)
const filtered = filterPostsForQuery(candidates, searchParams)
json(res, 200, filtered.map(normalizePostResponse))
return
}
if (pathname === '/api/posts/page' && req.method === 'GET') {
const candidates = state.posts.filter((post) =>
isPostVisible(post, {
include_private: searchParams.get('include_private'),
preview: searchParams.get('preview'),
include_redirects: searchParams.get('include_redirects'),
}),
)
const filtered = filterPostsForQuery(candidates, searchParams)
json(res, 200, buildPagedResponse(filtered, searchParams, normalizePostResponse))
return
}
if (pathname.match(/^\/api\/posts\/\d+$/) && req.method === 'GET') {
const id = Number(pathname.split('/').pop())
const post = state.posts.find((item) => item.id === id)
if (!post || !isPostVisible(post, { preview: true, include_private: true, include_redirects: true })) {
notFound(res, '文章不存在。')
return
}
json(res, 200, normalizePostResponse(post))
return
}
if (pathname.match(/^\/api\/posts\/slug\/[^/]+$/) && req.method === 'GET') {
const slug = decodeURIComponent(pathname.split('/')[4])
const post = state.posts.find((item) => item.slug === slug)
if (!post || !isPostVisible(post, {
include_private: searchParams.get('include_private'),
preview: searchParams.get('preview'),
include_redirects: searchParams.get('include_redirects'),
})) {
notFound(res, '文章不存在。')
return
}
json(res, 200, normalizePostResponse(post))
return
}
if (pathname === '/api/posts/markdown' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const record = upsertPostFromPayload(
{
id: nextId('post'),
created_at: iso(-1),
updated_at: iso(-1),
},
payload,
)
if (!record.slug || !record.title) {
badRequest(res, '文章标题不能为空。')
return
}
state.posts.unshift(record)
recalculateTaxonomyCounts()
addAuditLog('post.create', 'post', record.title, record.id, { slug: record.slug })
addPostRevision(record, 'create', '创建草稿')
json(res, 201, {
slug: record.slug,
path: `content/posts/${record.slug}.md`,
markdown: record.content,
})
return
}
if (pathname === '/api/posts/markdown/import' && req.method === 'POST') {
const { files } = await parseRequest(req)
const slugs = []
for (const file of files) {
const source = file.body?.toString('utf8') || '# Imported Post\n\nImported by playwright.'
const { title, description } = parseHeadingAndDescription(source, sanitizeFilename(file.filename, 'imported'))
const slug = slugify(title || sanitizeFilename(file.filename, 'imported'))
const record = {
id: nextId('post'),
title,
slug,
description,
content: source,
category: '测试体系',
tags: ['Playwright'],
post_type: 'article',
image: null,
images: [],
pinned: false,
status: 'draft',
visibility: 'public',
publish_at: null,
unpublish_at: null,
canonical_url: null,
noindex: false,
og_image: null,
redirect_from: [],
redirect_to: null,
created_at: iso(-1),
updated_at: iso(-1),
}
state.posts.unshift(record)
addPostRevision(record, 'import', `导入 ${file.filename}`)
slugs.push(slug)
}
recalculateTaxonomyCounts()
addAuditLog('post.import', 'workspace', 'markdown import', String(slugs.length), { slugs })
json(res, 200, { count: slugs.length, slugs })
return
}
if (pathname.match(/^\/api\/posts\/\d+$/) && req.method === 'PATCH') {
const id = Number(pathname.split('/').pop())
const record = state.posts.find((item) => item.id === id)
if (!record) {
notFound(res, '文章不存在。')
return
}
const { json: payload } = await parseRequest(req)
const updated = upsertPostFromPayload(record, payload)
Object.assign(record, updated)
recalculateTaxonomyCounts()
addAuditLog('post.update', 'post', record.title, record.id, { slug: record.slug })
addPostRevision(record, 'update', '保存文章属性')
json(res, 200, normalizePostResponse(record))
return
}
if (pathname === '/api/search' && req.method === 'GET') {
const q = searchParams.get('q') || ''
const limit = Math.max(1, Number.parseInt(searchParams.get('limit') || '20', 10) || 20)
json(res, 200, searchPosts(q).slice(0, limit))
return
}
if (pathname === '/api/search/page' && req.method === 'GET') {
const q = searchParams.get('q') || ''
const results = searchPosts(q, {
category: searchParams.get('category'),
tag: searchParams.get('tag'),
type: searchParams.get('type'),
})
json(res, 200, { query: q, ...buildPagedResponse(results, searchParams) })
return
}
if (pathname === '/api/subscriptions' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const id = nextId('subscription')
const record = createSubscriptionRecord(id, {
target: normalizeText(payload.email) || `subscriber-${id}@example.com`,
display_name: normalizeText(payload.displayName) || null,
status: 'pending_confirmation',
filters: { event_types: ['post.published', 'digest.weekly'], categories: [], tags: [] },
metadata: { source: normalizeText(payload.source) || 'popup' },
verified_at: null,
last_notified_at: null,
last_delivery_status: null,
created_at: iso(-1),
updated_at: iso(-1),
})
state.subscriptions.unshift(record)
addAuditLog('subscription.create', 'subscription', record.target, record.id, { status: record.status })
json(res, 200, {
ok: true,
subscription_id: record.id,
status: record.status,
requires_confirmation: true,
message: '订阅已登记,请前往确认页完成激活。',
})
return
}
if (pathname === '/api/subscriptions/confirm' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const record = state.subscriptions.find((item) => item.confirm_token === normalizeText(payload.token))
if (!record) {
badRequest(res, '确认令牌无效。')
return
}
record.status = 'active'
record.verified_at = iso(-1)
record.updated_at = iso(-1)
json(res, 200, { ok: true, subscription: clone(record) })
return
}
if (pathname === '/api/subscriptions/manage' && req.method === 'GET') {
const token = normalizeText(searchParams.get('token'))
const record = state.subscriptions.find((item) => item.manage_token === token)
if (!record) {
badRequest(res, '管理令牌无效。')
return
}
json(res, 200, { ok: true, subscription: clone(record) })
return
}
if (pathname === '/api/subscriptions/manage' && req.method === 'PATCH') {
const { json: payload } = await parseRequest(req)
const record = state.subscriptions.find((item) => item.manage_token === normalizeText(payload.token))
if (!record) {
badRequest(res, '管理令牌无效。')
return
}
record.display_name = payload.displayName ?? record.display_name
record.status = normalizeText(payload.status) || record.status
record.filters = payload.filters ?? record.filters
record.updated_at = iso(-1)
json(res, 200, { ok: true, subscription: clone(record) })
return
}
if (pathname === '/api/subscriptions/unsubscribe' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const record = state.subscriptions.find((item) => item.manage_token === normalizeText(payload.token))
if (!record) {
badRequest(res, '管理令牌无效。')
return
}
record.status = 'unsubscribed'
record.updated_at = iso(-1)
json(res, 200, { ok: true, subscription: clone(record) })
return
}
if (pathname === '/api/analytics/content' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
state.content_events.unshift({ ...payload, created_at: iso(-1) })
writeEmpty(res, 204)
return
}
if (pathname === '/api/ai/ask/stream' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const question = normalizeText(payload.question) || '未提供问题'
const completePayload = {
question,
answer:
'Termi 的内容主要集中在前端工程、回归测试、值班运维和 AI 工作流。\n\n你可以优先看 Playwright 回归工作流、Astro 终端博客信息架构,以及 Docker 发布值班清单这三篇。',
sources: state.posts.filter((item) => isPostVisible(item)).slice(0, 3).map((item, index) => ({
slug: item.slug,
href: `${FRONTEND_ORIGIN}/articles/${item.slug}`,
title: item.title,
excerpt: item.description,
score: 0.94 - index * 0.07,
chunk_index: index,
})),
indexed_chunks: state.site_settings.ai_chunks_count,
last_indexed_at: state.site_settings.ai_last_indexed_at,
}
res.writeHead(200, {
'content-type': CONTENT_TYPES.sse,
'cache-control': 'no-store',
connection: 'keep-alive',
})
res.write(`event: status\ndata: ${JSON.stringify({ stage: 'searching' })}\n\n`)
for (const chunk of [
'Termi 的内容主要集中在前端工程、回归测试、值班运维和 AI 工作流。',
'\n\n你可以优先看 Playwright 回归工作流、Astro 终端博客信息架构,',
'以及 Docker 发布值班清单这三篇。',
]) {
res.write(`event: delta\ndata: ${JSON.stringify({ delta: chunk })}\n\n`)
}
res.write(`event: complete\ndata: ${JSON.stringify(completePayload)}\n\n`)
res.end()
return
}
if (pathname.startsWith('/api/admin/')) {
if (!ensureAdmin(req, res)) return
if (pathname === '/api/admin/dashboard' && req.method === 'GET') {
const pendingComments = state.comments.filter((item) => !item.approved)
const pendingFriendLinks = state.friend_links.filter((item) => item.status === 'pending')
json(res, 200, {
stats: {
total_posts: state.posts.length,
total_comments: state.comments.length,
pending_comments: pendingComments.length,
draft_posts: state.posts.filter((item) => item.status === 'draft').length,
scheduled_posts: state.posts.filter((item) => item.status === 'scheduled').length,
offline_posts: 0,
expired_posts: 0,
private_posts: state.posts.filter((item) => item.visibility === 'private').length,
unlisted_posts: state.posts.filter((item) => item.visibility === 'unlisted').length,
total_categories: state.categories.length,
total_tags: state.tags.length,
total_reviews: state.reviews.length,
total_links: state.friend_links.length,
pending_links: pendingFriendLinks.length,
ai_chunks: state.site_settings.ai_chunks_count,
ai_enabled: state.site_settings.ai_enabled,
},
site: {
site_name: state.site_settings.site_name,
site_url: state.site_settings.site_url,
ai_enabled: state.site_settings.ai_enabled,
ai_chunks: state.site_settings.ai_chunks_count,
ai_last_indexed_at: state.site_settings.ai_last_indexed_at,
},
recent_posts: state.posts.slice(0, 5).map((item) => ({
id: item.id,
title: item.title,
slug: item.slug,
category: item.category,
post_type: item.post_type,
pinned: item.pinned,
status: item.status,
visibility: item.visibility,
created_at: item.created_at,
})),
pending_comments: pendingComments.slice(0, 5).map((item) => ({
id: item.id,
author: item.author,
post_slug: item.post_slug,
scope: item.scope,
excerpt: item.content?.slice(0, 80) || '',
approved: Boolean(item.approved),
created_at: item.created_at,
})),
pending_friend_links: pendingFriendLinks.slice(0, 5).map((item) => ({
id: item.id,
site_name: item.site_name,
site_url: item.site_url,
category: item.category,
status: item.status,
created_at: item.created_at,
})),
recent_reviews: state.reviews.slice(0, 5).map((item) => ({
id: item.id,
title: item.title,
review_type: item.review_type,
rating: item.rating,
status: item.status,
review_date: item.review_date,
})),
})
return
}
if (pathname === '/api/admin/analytics' && req.method === 'GET') {
json(res, 200, {
overview: {
total_searches: 218,
total_ai_questions: 64,
searches_last_24h: 12,
ai_questions_last_24h: 5,
searches_last_7d: 66,
ai_questions_last_7d: 18,
unique_search_terms_last_7d: 21,
unique_ai_questions_last_7d: 9,
avg_search_results_last_7d: 5.4,
avg_ai_latency_ms_last_7d: 286,
},
content_overview: {
total_page_views: 1642,
page_views_last_24h: 18,
page_views_last_7d: 142,
total_read_completes: 628,
read_completes_last_7d: 57,
avg_read_progress_last_7d: 74,
avg_read_duration_ms_last_7d: 58400,
},
top_search_terms: [{ query: 'playwright', count: 22, last_seen_at: iso(-8) }],
top_ai_questions: [{ query: '这个博客主要写什么内容?', count: 8, last_seen_at: iso(-6) }],
recent_events: [],
providers_last_7d: [{ provider: 'mock-openai', count: 18 }],
top_referrers: [{ referrer: 'homepage', count: 44 }],
popular_posts: getHomePayload().popular_posts,
daily_activity: [{ date: '2026-04-01', searches: 9, ai_questions: 5 }],
})
return
}
if (pathname === '/api/admin/categories' && req.method === 'GET') {
json(res, 200, state.categories.map((item) => clone(item)))
return
}
if (pathname === '/api/admin/categories' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const record = makeTaxonomyRecord('category', payload)
state.categories.unshift(record)
recalculateTaxonomyCounts()
addAuditLog('category.create', 'category', record.name, record.id)
json(res, 201, clone(record))
return
}
if (pathname.match(/^\/api\/admin\/categories\/\d+$/) && req.method === 'PATCH') {
const id = Number(pathname.split('/').pop())
const index = state.categories.findIndex((item) => item.id === id)
if (index === -1) {
notFound(res, '分类不存在。')
return
}
const current = state.categories[index]
const { json: payload } = await parseRequest(req)
const previousName = current.name
const updated = makeTaxonomyRecord('category', payload, current)
state.categories[index] = updated
if (previousName !== updated.name) {
state.posts.forEach((post) => {
if (post.category === previousName) {
post.category = updated.name
}
})
}
recalculateTaxonomyCounts()
addAuditLog('category.update', 'category', updated.name, updated.id)
json(res, 200, clone(updated))
return
}
if (pathname.match(/^\/api\/admin\/categories\/\d+$/) && req.method === 'DELETE') {
const id = Number(pathname.split('/').pop())
const index = state.categories.findIndex((item) => item.id === id)
if (index === -1) {
notFound(res, '分类不存在。')
return
}
const [removed] = state.categories.splice(index, 1)
state.posts.forEach((post) => {
if (post.category === removed.name) {
post.category = null
}
})
recalculateTaxonomyCounts()
addAuditLog('category.delete', 'category', removed.name, removed.id)
writeEmpty(res, 204)
return
}
if (pathname === '/api/admin/tags' && req.method === 'GET') {
json(res, 200, state.tags.map((item) => clone(item)))
return
}
if (pathname === '/api/admin/tags' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const record = makeTaxonomyRecord('tag', payload)
state.tags.unshift(record)
recalculateTaxonomyCounts()
addAuditLog('tag.create', 'tag', record.name, record.id)
json(res, 201, clone(record))
return
}
if (pathname.match(/^\/api\/admin\/tags\/\d+$/) && req.method === 'PATCH') {
const id = Number(pathname.split('/').pop())
const index = state.tags.findIndex((item) => item.id === id)
if (index === -1) {
notFound(res, '标签不存在。')
return
}
const current = state.tags[index]
const { json: payload } = await parseRequest(req)
const previousName = current.name
const updated = makeTaxonomyRecord('tag', payload, current)
state.tags[index] = updated
if (previousName !== updated.name) {
state.posts.forEach((post) => {
post.tags = (post.tags || []).map((tag) => (tag === previousName ? updated.name : tag))
})
}
recalculateTaxonomyCounts()
addAuditLog('tag.update', 'tag', updated.name, updated.id)
json(res, 200, clone(updated))
return
}
if (pathname.match(/^\/api\/admin\/tags\/\d+$/) && req.method === 'DELETE') {
const id = Number(pathname.split('/').pop())
const index = state.tags.findIndex((item) => item.id === id)
if (index === -1) {
notFound(res, '标签不存在。')
return
}
const [removed] = state.tags.splice(index, 1)
state.posts.forEach((post) => {
post.tags = (post.tags || []).filter((tag) => tag !== removed.name)
})
recalculateTaxonomyCounts()
addAuditLog('tag.delete', 'tag', removed.name, removed.id)
writeEmpty(res, 204)
return
}
if (pathname === '/api/admin/site-settings' && req.method === 'GET') {
json(res, 200, clone(state.site_settings))
return
}
if (pathname === '/api/admin/site-settings' && req.method === 'PATCH') {
const { json: payload } = await parseRequest(req)
const fieldMap = {
siteName: 'site_name',
siteShortName: 'site_short_name',
siteUrl: 'site_url',
siteTitle: 'site_title',
siteDescription: 'site_description',
heroTitle: 'hero_title',
heroSubtitle: 'hero_subtitle',
ownerName: 'owner_name',
ownerTitle: 'owner_title',
ownerBio: 'owner_bio',
ownerAvatarUrl: 'owner_avatar_url',
socialGithub: 'social_github',
socialTwitter: 'social_twitter',
socialEmail: 'social_email',
location: 'location',
techStack: 'tech_stack',
musicPlaylist: 'music_playlist',
aiEnabled: 'ai_enabled',
paragraphCommentsEnabled: 'paragraph_comments_enabled',
commentVerificationMode: 'comment_verification_mode',
commentTurnstileEnabled: 'comment_turnstile_enabled',
subscriptionVerificationMode: 'subscription_verification_mode',
subscriptionTurnstileEnabled: 'subscription_turnstile_enabled',
webPushEnabled: 'web_push_enabled',
turnstileSiteKey: 'turnstile_site_key',
turnstileSecretKey: 'turnstile_secret_key',
webPushVapidPublicKey: 'web_push_vapid_public_key',
webPushVapidPrivateKey: 'web_push_vapid_private_key',
webPushVapidSubject: 'web_push_vapid_subject',
aiProvider: 'ai_provider',
aiApiBase: 'ai_api_base',
aiApiKey: 'ai_api_key',
aiChatModel: 'ai_chat_model',
aiImageProvider: 'ai_image_provider',
aiImageApiBase: 'ai_image_api_base',
aiImageApiKey: 'ai_image_api_key',
aiImageModel: 'ai_image_model',
aiProviders: 'ai_providers',
aiActiveProviderId: 'ai_active_provider_id',
aiEmbeddingModel: 'ai_embedding_model',
aiSystemPrompt: 'ai_system_prompt',
aiTopK: 'ai_top_k',
aiChunkSize: 'ai_chunk_size',
mediaStorageProvider: 'media_storage_provider',
mediaR2AccountId: 'media_r2_account_id',
mediaR2Bucket: 'media_r2_bucket',
mediaR2PublicBaseUrl: 'media_r2_public_base_url',
mediaR2AccessKeyId: 'media_r2_access_key_id',
mediaR2SecretAccessKey: 'media_r2_secret_access_key',
seoDefaultOgImage: 'seo_default_og_image',
seoDefaultTwitterHandle: 'seo_default_twitter_handle',
notificationWebhookUrl: 'notification_webhook_url',
notificationChannelType: 'notification_channel_type',
notificationCommentEnabled: 'notification_comment_enabled',
notificationFriendLinkEnabled: 'notification_friend_link_enabled',
subscriptionPopupEnabled: 'subscription_popup_enabled',
subscriptionPopupTitle: 'subscription_popup_title',
subscriptionPopupDescription: 'subscription_popup_description',
subscriptionPopupDelaySeconds: 'subscription_popup_delay_seconds',
searchSynonyms: 'search_synonyms',
}
for (const [sourceKey, targetKey] of Object.entries(fieldMap)) {
if (Object.hasOwn(payload, sourceKey)) {
state.site_settings[targetKey] = payload[sourceKey]
}
}
addAuditLog('site_settings.update', 'site_settings', state.site_settings.site_name, '1')
json(res, 200, clone(state.site_settings))
return
}
if (pathname === '/api/admin/audit-logs' && req.method === 'GET') {
json(res, 200, state.audit_logs.map((item) => clone(item)))
return
}
if (pathname === '/api/admin/post-revisions' && req.method === 'GET') {
let items = [...state.post_revisions]
const slug = normalizeText(searchParams.get('slug'))
const limit = Number.parseInt(searchParams.get('limit') || '0', 10) || 0
if (slug) {
items = items.filter((item) => item.post_slug === slug)
}
if (limit > 0) {
items = items.slice(0, limit)
}
json(res, 200, items.map((item) => ({
id: item.id,
post_slug: item.post_slug,
post_title: item.post_title,
operation: item.operation,
revision_reason: item.revision_reason,
actor_username: item.actor_username,
actor_email: item.actor_email,
actor_source: item.actor_source,
created_at: item.created_at,
has_markdown: item.has_markdown,
metadata: clone(item.metadata),
})))
return
}
if (pathname.match(/^\/api\/admin\/post-revisions\/\d+$/) && req.method === 'GET') {
const id = Number(pathname.split('/').pop())
const revision = findRevisionById(id)
if (!revision) {
notFound(res, '版本不存在。')
return
}
json(res, 200, {
item: {
id: revision.id,
post_slug: revision.post_slug,
post_title: revision.post_title,
operation: revision.operation,
revision_reason: revision.revision_reason,
actor_username: revision.actor_username,
actor_email: revision.actor_email,
actor_source: revision.actor_source,
created_at: revision.created_at,
has_markdown: revision.has_markdown,
metadata: clone(revision.metadata),
},
markdown: revision.markdown,
})
return
}
if (pathname.match(/^\/api\/admin\/post-revisions\/\d+\/restore$/) && req.method === 'POST') {
const id = Number(pathname.split('/')[4])
const revision = findRevisionById(id)
if (!revision) {
notFound(res, '版本不存在。')
return
}
const { json: payload } = await parseRequest(req)
const mode = normalizeText(payload.mode) || 'full'
const post = state.posts.find((item) => item.slug === revision.post_slug)
if (!post) {
notFound(res, '原文章不存在。')
return
}
if (mode === 'full') {
Object.assign(post, clone(revision.snapshot))
}
if (mode === 'metadata') {
const snapshot = revision.snapshot || {}
Object.assign(post, {
title: snapshot.title,
description: snapshot.description,
category: snapshot.category,
tags: clone(snapshot.tags || []),
post_type: snapshot.post_type,
image: snapshot.image,
images: clone(snapshot.images || []),
pinned: snapshot.pinned,
status: snapshot.status,
visibility: snapshot.visibility,
publish_at: snapshot.publish_at,
unpublish_at: snapshot.unpublish_at,
canonical_url: snapshot.canonical_url,
noindex: snapshot.noindex,
og_image: snapshot.og_image,
redirect_from: clone(snapshot.redirect_from || []),
redirect_to: snapshot.redirect_to,
})
}
if (mode === 'markdown') {
post.content = revision.markdown
}
post.updated_at = iso(-1)
recalculateTaxonomyCounts()
addAuditLog('post.restore', 'post', post.title, post.id, { revision_id: id, mode })
addPostRevision(post, 'restore', `从版本 #${id} 恢复(${mode})`)
json(res, 200, { restored: true, revision_id: id, post_slug: post.slug, mode })
return
}
if (pathname === '/api/admin/comments/blacklist' && req.method === 'GET') {
json(res, 200, state.comment_blacklist.map((item) => clone(item)))
return
}
if (pathname === '/api/admin/comments/blacklist' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const record = {
id: nextId('blacklist'),
matcher_type: normalizeText(payload.matcher_type) || 'ip',
matcher_value: normalizeText(payload.matcher_value),
reason: normalizeText(payload.reason) || null,
active: Object.hasOwn(payload, 'active') ? Boolean(payload.active) : true,
expires_at: normalizeText(payload.expires_at) || null,
created_at: iso(-1),
updated_at: iso(-1),
effective: Object.hasOwn(payload, 'active') ? Boolean(payload.active) : true,
}
state.comment_blacklist.unshift(record)
addAuditLog('comment_blacklist.create', 'comment_blacklist', record.matcher_value, record.id)
json(res, 201, clone(record))
return
}
if (pathname.match(/^\/api\/admin\/comments\/blacklist\/\d+$/) && req.method === 'PATCH') {
const id = Number(pathname.split('/').pop())
const record = state.comment_blacklist.find((item) => item.id === id)
if (!record) {
notFound(res, '黑名单规则不存在。')
return
}
const { json: payload } = await parseRequest(req)
if (Object.hasOwn(payload, 'reason')) record.reason = normalizeText(payload.reason) || null
if (Object.hasOwn(payload, 'active')) record.active = Boolean(payload.active)
if (Object.hasOwn(payload, 'expires_at')) record.expires_at = normalizeText(payload.expires_at) || null
if (payload.clear_expires_at) record.expires_at = null
record.updated_at = iso(-1)
record.effective = Boolean(record.active) && !record.expires_at
addAuditLog('comment_blacklist.update', 'comment_blacklist', record.matcher_value, record.id)
json(res, 200, clone(record))
return
}
if (pathname.match(/^\/api\/admin\/comments\/blacklist\/\d+$/) && req.method === 'DELETE') {
const id = Number(pathname.split('/').pop())
const index = state.comment_blacklist.findIndex((item) => item.id === id)
if (index === -1) {
notFound(res, '黑名单规则不存在。')
return
}
const [removed] = state.comment_blacklist.splice(index, 1)
addAuditLog('comment_blacklist.delete', 'comment_blacklist', removed.matcher_value, removed.id)
json(res, 200, { deleted: true, id })
return
}
if (pathname === '/api/admin/comments/analyze' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const matcherType = normalizeText(payload.matcher_type) || 'ip'
const matcherValue = normalizeText(payload.matcher_value)
const matches = state.comments.filter((comment) =>
getCommentMatcherValue(comment, matcherType) === matcherValue.toLowerCase(),
)
const analysis = `该来源共出现 ${matches.length} 条评论,其中待审核 ${
matches.filter((item) => !item.approved).length
} 条;建议保持观察并视情况加入黑名单。`
const log = {
id: nextId('persona_log'),
matcher_type: matcherType,
matcher_value: matcherValue,
from_at: matches.at(-1)?.created_at ?? null,
to_at: matches[0]?.created_at ?? null,
total_comments: matches.length,
pending_comments: matches.filter((item) => !item.approved).length,
distinct_posts: new Set(matches.map((item) => item.post_slug)).size,
analysis,
samples: matches.slice(0, 3).map((item) => ({
id: item.id,
created_at: item.created_at,
post_slug: item.post_slug,
author: item.author,
email: item.email,
approved: Boolean(item.approved),
content_preview: String(item.content || '').slice(0, 80),
})),
created_at: iso(-1),
}
state.comment_persona_logs.unshift(log)
addAuditLog('comment_persona.analyze', 'comment_persona', matcherValue, log.id)
json(res, 200, {
matcher_type: log.matcher_type,
matcher_value: log.matcher_value,
total_comments: log.total_comments,
pending_comments: log.pending_comments,
first_seen_at: log.from_at,
latest_seen_at: log.to_at,
distinct_posts: log.distinct_posts,
analysis: log.analysis,
samples: clone(log.samples),
})
return
}
if (pathname === '/api/admin/comments/analyze/logs' && req.method === 'GET') {
const matcherType = normalizeText(searchParams.get('matcher_type'))
const matcherValue = normalizeText(searchParams.get('matcher_value'))
const limit = Number.parseInt(searchParams.get('limit') || '0', 10) || 0
let items = [...state.comment_persona_logs]
if (matcherType) {
items = items.filter((item) => item.matcher_type === matcherType)
}
if (matcherValue) {
items = items.filter((item) => item.matcher_value === matcherValue)
}
if (limit > 0) {
items = items.slice(0, limit)
}
json(res, 200, items.map((item) => clone(item)))
return
}
if (pathname === '/api/admin/subscriptions' && req.method === 'GET') {
json(res, 200, { subscriptions: state.subscriptions.map((item) => clone(item)) })
return
}
if (pathname === '/api/admin/subscriptions' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const id = nextId('subscription')
const record = createSubscriptionRecord(id, {
channel_type: normalizeText(payload.channelType) || 'email',
target: normalizeText(payload.target) || `user${id}@example.com`,
display_name: normalizeText(payload.displayName) || null,
status: normalizeText(payload.status) || 'active',
filters: payload.filters ?? null,
metadata: payload.metadata ?? null,
secret: normalizeText(payload.secret) || null,
notes: normalizeText(payload.notes) || null,
created_at: iso(-1),
updated_at: iso(-1),
})
state.subscriptions.unshift(record)
addAuditLog('subscription.admin_create', 'subscription', record.target, record.id)
json(res, 201, clone(record))
return
}
if (pathname.match(/^\/api\/admin\/subscriptions\/\d+$/) && req.method === 'PATCH') {
const id = Number(pathname.split('/').pop())
const record = state.subscriptions.find((item) => item.id === id)
if (!record) {
notFound(res, '订阅目标不存在。')
return
}
const { json: payload } = await parseRequest(req)
if (Object.hasOwn(payload, 'channelType')) record.channel_type = normalizeText(payload.channelType) || record.channel_type
if (Object.hasOwn(payload, 'target')) record.target = normalizeText(payload.target) || record.target
if (Object.hasOwn(payload, 'displayName')) record.display_name = normalizeText(payload.displayName) || null
if (Object.hasOwn(payload, 'status')) record.status = normalizeText(payload.status) || record.status
if (Object.hasOwn(payload, 'filters')) record.filters = payload.filters ?? null
if (Object.hasOwn(payload, 'metadata')) record.metadata = payload.metadata ?? null
if (Object.hasOwn(payload, 'secret')) record.secret = normalizeText(payload.secret) || null
if (Object.hasOwn(payload, 'notes')) record.notes = normalizeText(payload.notes) || null
record.updated_at = iso(-1)
addAuditLog('subscription.update', 'subscription', record.target, record.id)
json(res, 200, clone(record))
return
}
if (pathname.match(/^\/api\/admin\/subscriptions\/\d+$/) && req.method === 'DELETE') {
const id = Number(pathname.split('/').pop())
const index = state.subscriptions.findIndex((item) => item.id === id)
if (index === -1) {
notFound(res, '订阅目标不存在。')
return
}
const [removed] = state.subscriptions.splice(index, 1)
addAuditLog('subscription.delete', 'subscription', removed.target, removed.id)
writeEmpty(res, 204)
return
}
if (pathname.match(/^\/api\/admin\/subscriptions\/\d+\/test$/) && req.method === 'POST') {
const id = Number(pathname.split('/')[4])
const record = state.subscriptions.find((item) => item.id === id)
if (!record) {
notFound(res, '订阅目标不存在。')
return
}
const delivery = createNotificationDelivery({
subscription: record,
eventType: 'subscription.test',
status: 'queued',
})
state.deliveries.unshift(delivery)
const job = enqueueNotificationDeliveryJob(delivery)
record.last_delivery_status = delivery.status
record.last_notified_at = iso(-1)
addAuditLog('subscription.test', 'subscription', record.target, record.id)
json(res, 200, { queued: true, id: record.id, delivery_id: delivery.id, job_id: job.id })
return
}
if (pathname === '/api/admin/subscriptions/deliveries' && req.method === 'GET') {
const limit = Number.parseInt(searchParams.get('limit') || '0', 10) || 0
const items = limit > 0 ? state.deliveries.slice(0, limit) : state.deliveries
json(res, 200, { deliveries: items.map((item) => clone(item)) })
return
}
if (pathname === '/api/admin/subscriptions/digest' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const period = normalizeText(payload.period) || 'weekly'
const activeSubscriptions = state.subscriptions.filter((item) => item.status === 'active')
activeSubscriptions.forEach((subscription) => {
const delivery = createNotificationDelivery({
subscription,
eventType: `digest.${period}`,
status: 'queued',
payload: { period },
})
state.deliveries.unshift(delivery)
enqueueNotificationDeliveryJob(delivery)
subscription.last_delivery_status = 'queued'
subscription.last_notified_at = iso(-1)
})
addAuditLog('subscription.digest', 'subscription', period, String(activeSubscriptions.length))
json(res, 200, {
period,
post_count: state.posts.filter((item) => item.status === 'published').length,
queued: activeSubscriptions.length,
skipped: state.subscriptions.length - activeSubscriptions.length,
})
return
}
if (pathname === '/api/admin/workers/overview' && req.method === 'GET') {
json(res, 200, buildWorkerOverview())
return
}
if (pathname === '/api/admin/workers/jobs' && req.method === 'GET') {
const status = normalizeText(searchParams.get('status'))
const jobKind = normalizeText(searchParams.get('job_kind'))
const workerName = normalizeText(searchParams.get('worker_name'))
const keyword = normalizeText(searchParams.get('search'))
const limit = Number.parseInt(searchParams.get('limit') || '0', 10) || 0
let items = [...state.worker_jobs]
if (status) {
items = items.filter((item) => item.status === status)
}
if (jobKind) {
items = items.filter((item) => item.job_kind === jobKind)
}
if (workerName) {
items = items.filter((item) => item.worker_name === workerName)
}
if (keyword) {
items = items.filter((item) =>
[item.worker_name, item.display_name, item.related_entity_type, item.related_entity_id]
.filter(Boolean)
.some((value) => String(value).toLowerCase().includes(keyword.toLowerCase())),
)
}
const total = items.length
if (limit > 0) {
items = items.slice(0, limit)
}
json(res, 200, { total, jobs: items.map((item) => normalizeWorkerJob(item)) })
return
}
if (pathname.match(/^\/api\/admin\/workers\/jobs\/\d+$/) && req.method === 'GET') {
const id = Number(pathname.split('/').pop())
const job = state.worker_jobs.find((item) => item.id === id)
if (!job) {
notFound(res, 'worker job 不存在。')
return
}
json(res, 200, normalizeWorkerJob(job))
return
}
if (pathname.match(/^\/api\/admin\/workers\/jobs\/\d+\/cancel$/) && req.method === 'POST') {
const id = Number(pathname.split('/')[5])
const job = state.worker_jobs.find((item) => item.id === id)
if (!job) {
notFound(res, 'worker job 不存在。')
return
}
job.cancel_requested = true
if (job.status === 'queued') {
job.status = 'cancelled'
job.finished_at = iso(-1)
job.error_text = 'job cancelled before start'
}
job.updated_at = iso(-1)
addAuditLog('worker.cancel', 'worker_job', job.worker_name, job.id)
json(res, 200, normalizeWorkerJob(job))
return
}
if (pathname.match(/^\/api\/admin\/workers\/jobs\/\d+\/retry$/) && req.method === 'POST') {
const id = Number(pathname.split('/')[5])
const job = state.worker_jobs.find((item) => item.id === id)
if (!job) {
notFound(res, 'worker job 不存在。')
return
}
let nextJob = null
if (job.worker_name === 'task.send_weekly_digest' || job.worker_name === 'task.send_monthly_digest') {
const period = job.worker_name === 'task.send_monthly_digest' ? 'monthly' : 'weekly'
nextJob = createWorkerJob({
job_kind: 'task',
worker_name: job.worker_name,
display_name: period === 'monthly' ? '发送月报' : '发送周报',
status: 'succeeded',
queue_name: 'digests',
payload: { period },
result: {
period,
post_count: state.posts.filter((item) => item.status === 'published').length,
queued: state.subscriptions.filter((item) => item.status === 'active').length,
skipped: state.subscriptions.filter((item) => item.status !== 'active').length,
},
tags: ['digest', period],
related_entity_type: 'subscription_digest',
related_entity_id: period,
parent_job_id: job.id,
trigger_mode: 'retry',
})
} else if (job.worker_name === 'worker.download_media') {
const payload = job.payload || {}
nextJob = createWorkerJob({
job_kind: 'worker',
worker_name: 'worker.download_media',
display_name: job.display_name,
status: 'succeeded',
queue_name: 'media',
payload,
result: job.result,
tags: ['media', 'download'],
related_entity_type: job.related_entity_type,
related_entity_id: job.related_entity_id,
parent_job_id: job.id,
trigger_mode: 'retry',
})
} else {
nextJob = createWorkerJob({
job_kind: job.job_kind,
worker_name: job.worker_name,
display_name: job.display_name,
status: 'succeeded',
queue_name: job.queue_name,
payload: clone(job.payload),
result: clone(job.result),
tags: Array.isArray(job.tags) ? job.tags : [],
related_entity_type: job.related_entity_type,
related_entity_id: job.related_entity_id,
parent_job_id: job.id,
trigger_mode: 'retry',
})
}
addAuditLog('worker.retry', 'worker_job', nextJob.worker_name, nextJob.id, { source_job_id: job.id })
json(res, 200, { queued: true, job: normalizeWorkerJob(nextJob) })
return
}
if (pathname === '/api/admin/workers/tasks/retry-deliveries' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const limit = Number.parseInt(String(payload.limit || '80'), 10) || 80
const retryable = state.deliveries
.filter((item) => item.status === 'retry_pending')
.slice(0, limit)
retryable.forEach((delivery) => {
delivery.status = 'queued'
delivery.updated_at = iso(-1)
delivery.next_retry_at = null
enqueueNotificationDeliveryJob(delivery)
})
const job = createWorkerJob({
job_kind: 'task',
worker_name: 'task.retry_deliveries',
display_name: '重试待投递通知',
status: 'succeeded',
queue_name: 'maintenance',
payload: { limit },
result: { limit, queued: retryable.length },
tags: ['maintenance', 'retry'],
related_entity_type: 'notification_delivery',
related_entity_id: null,
})
addAuditLog('worker.task.retry_deliveries', 'worker_job', job.worker_name, job.id, { limit })
json(res, 200, { queued: true, job: normalizeWorkerJob(job) })
return
}
if (pathname === '/api/admin/workers/tasks/digest' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const period = normalizeText(payload.period) === 'monthly' ? 'monthly' : 'weekly'
const activeSubscriptions = state.subscriptions.filter((item) => item.status === 'active')
activeSubscriptions.forEach((subscription) => {
const delivery = createNotificationDelivery({
subscription,
eventType: `digest.${period}`,
status: 'queued',
payload: { period },
})
state.deliveries.unshift(delivery)
enqueueNotificationDeliveryJob(delivery)
})
const job = createWorkerJob({
job_kind: 'task',
worker_name: period === 'monthly' ? 'task.send_monthly_digest' : 'task.send_weekly_digest',
display_name: period === 'monthly' ? '发送月报' : '发送周报',
status: 'succeeded',
queue_name: 'digests',
payload: { period },
result: {
period,
post_count: state.posts.filter((item) => item.status === 'published').length,
queued: activeSubscriptions.length,
skipped: state.subscriptions.length - activeSubscriptions.length,
},
tags: ['digest', period],
related_entity_type: 'subscription_digest',
related_entity_id: period,
})
addAuditLog('worker.task.digest', 'worker_job', job.worker_name, job.id, { period })
json(res, 200, { queued: true, job: normalizeWorkerJob(job) })
return
}
if (pathname === '/api/admin/storage/media' && req.method === 'GET') {
const prefix = normalizeText(searchParams.get('prefix'))
const limit = Number.parseInt(searchParams.get('limit') || '0', 10) || 0
const items = state.media
.filter((item) => !prefix || item.key.startsWith(prefix))
.slice(0, limit > 0 ? limit : undefined)
json(res, 200, {
provider: state.site_settings.media_storage_provider,
bucket: state.site_settings.media_r2_bucket,
public_base_url: state.site_settings.media_r2_public_base_url,
items: items.map((item) => ({
key: item.key,
url: item.url,
size_bytes: item.size_bytes,
last_modified: item.last_modified,
title: item.title,
alt_text: item.alt_text,
caption: item.caption,
tags: [...item.tags],
notes: item.notes,
})),
})
return
}
if (pathname === '/api/admin/storage/media' && req.method === 'POST') {
const { fields, files } = await parseRequest(req)
const prefix = normalizeText(fields.prefix) || 'uploads/'
const uploaded = files.map((file, index) => {
const key = `${prefix}${Date.now()}-${index}-${sanitizeFilename(file.filename)}`
const record = makeMediaRecordFromUpload(key, file)
state.media.unshift(record)
return {
key: record.key,
url: record.url,
size_bytes: record.size_bytes,
}
})
addAuditLog('media.upload', 'media', prefix, String(uploaded.length))
json(res, 200, { uploaded })
return
}
if (pathname === '/api/admin/storage/media' && req.method === 'DELETE') {
const key = decodeURIComponent(searchParams.get('key') || '')
const index = state.media.findIndex((item) => item.key === key)
if (index === -1) {
notFound(res, '媒体对象不存在。')
return
}
state.media.splice(index, 1)
addAuditLog('media.delete', 'media', key, key)
json(res, 200, { deleted: true, key })
return
}
if (pathname === '/api/admin/storage/media/batch-delete' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const deleted = []
const failed = []
for (const key of Array.isArray(payload.keys) ? payload.keys : []) {
const index = state.media.findIndex((item) => item.key === key)
if (index === -1) {
failed.push(key)
continue
}
state.media.splice(index, 1)
deleted.push(key)
}
addAuditLog('media.batch_delete', 'media', `${deleted.length} deleted`, String(deleted.length), { deleted, failed })
json(res, 200, { deleted, failed })
return
}
if (pathname === '/api/admin/storage/media/replace' && req.method === 'POST') {
const { fields, files } = await parseRequest(req)
const key = normalizeText(fields.key)
const record = state.media.find((item) => item.key === key)
if (!record) {
notFound(res, '媒体对象不存在。')
return
}
const file = files[0]
if (file) {
record.body = file.body?.toString('utf8') || record.body
record.size_bytes = file.size ?? record.size_bytes
record.content_type = String(file.contentType || record.content_type)
}
record.last_modified = iso(-1)
addAuditLog('media.replace', 'media', key, key)
json(res, 200, { key: record.key, url: `${record.url}?v=${Date.now()}` })
return
}
if (pathname === '/api/admin/storage/media/download' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const sourceUrl = normalizeText(payload.source_url)
if (!sourceUrl) {
badRequest(res, '缺少远程素材地址。')
return
}
const prefix = normalizeText(payload.prefix) || 'uploads/'
const fileName = sanitizeFilename(sourceUrl.split('/').pop(), 'remote-asset.svg')
const key = `${prefix}${Date.now()}-${fileName}`
const title = normalizeText(payload.title) || sanitizeFilename(fileName, 'remote-asset')
const record = {
key,
url: `${MOCK_ORIGIN}/media/${encodeURIComponent(key)}`,
size_bytes: 2048,
last_modified: iso(-1),
title,
alt_text: normalizeText(payload.alt_text) || null,
caption: normalizeText(payload.caption) || null,
tags: Array.isArray(payload.tags) ? payload.tags.filter(Boolean) : [],
notes: normalizeText(payload.notes) || `downloaded from ${sourceUrl}`,
body: ``,
content_type: CONTENT_TYPES.svg,
}
state.media.unshift(record)
const job = queueDownloadWorkerJob(payload, record)
addAuditLog('media.download', 'media', sourceUrl, job.id, { key })
json(res, 200, {
queued: true,
job_id: job.id,
status: job.status,
})
return
}
if (pathname === '/api/admin/storage/media/metadata' && req.method === 'PATCH') {
const { json: payload } = await parseRequest(req)
const key = normalizeText(payload.key)
const record = state.media.find((item) => item.key === key)
if (!record) {
notFound(res, '媒体对象不存在。')
return
}
record.title = normalizeText(payload.title) || null
record.alt_text = normalizeText(payload.alt_text) || null
record.caption = normalizeText(payload.caption) || null
record.tags = Array.isArray(payload.tags) ? payload.tags.filter(Boolean) : []
record.notes = normalizeText(payload.notes) || null
record.last_modified = iso(-1)
addAuditLog('media.metadata.update', 'media', key, key)
json(res, 200, {
saved: true,
key: record.key,
title: record.title,
alt_text: record.alt_text,
caption: record.caption,
tags: [...record.tags],
notes: record.notes,
})
return
}
if (pathname === '/api/admin/storage/review-cover' && req.method === 'POST') {
const { files } = await parseRequest(req)
const file = files[0]
if (!file) {
badRequest(res, '缺少封面文件。')
return
}
const key = `review-covers/${Date.now()}-${sanitizeFilename(file.filename, 'review-cover.svg')}`
const record = makeMediaRecordFromUpload(key, file)
state.media.unshift(record)
addAuditLog('review.cover.upload', 'media', key, key)
json(res, 200, { key: record.key, url: record.url })
return
}
if (pathname === '/api/admin/ai/reindex' && req.method === 'POST') {
state.site_settings.ai_last_indexed_at = iso(-1)
state.site_settings.ai_chunks_count += 3
json(res, 200, {
indexed_chunks: state.site_settings.ai_chunks_count,
last_indexed_at: state.site_settings.ai_last_indexed_at,
})
return
}
if (pathname === '/api/admin/ai/post-metadata' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const markdown = String(payload.markdown || '')
const { title, description } = parseHeadingAndDescription(markdown, 'Playwright Mock Draft')
json(res, 200, {
title,
description,
category: '测试体系',
tags: ['Playwright', 'CI'],
slug: slugify(title),
})
return
}
if (pathname === '/api/admin/ai/polish-post' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const markdown = String(payload.markdown || '').trim()
const polishedMarkdown = markdown.includes('【AI 润色】')
? markdown
: `${markdown}\n\n【AI 润色】这是一段由 mock server 追加的润色说明。`
json(res, 200, { polished_markdown: polishedMarkdown })
return
}
if (pathname === '/api/admin/ai/polish-review' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const description = normalizeText(payload.description) || '暂无点评。'
json(res, 200, {
polished_description: `${description}\n\n(AI 润色)整体表达更凝练,也更适合作为首页卡片摘要。`,
})
return
}
if (pathname === '/api/admin/ai/post-cover' && req.method === 'POST') {
const { json: payload } = await parseRequest(req)
const slug = normalizeText(payload.slug) || slugify(payload.title || 'mock-cover')
json(res, 200, {
image_url: `${MOCK_ORIGIN}/generated/${slug}-cover.svg`,
prompt: `Generate a mock cover for ${normalizeText(payload.title) || slug}`,
})
return
}
if (pathname === '/api/admin/ai/test-provider' && req.method === 'POST') {
json(res, 200, {
provider: 'openai',
endpoint: 'https://api.mock.invalid/v1',
chat_model: 'gpt-mock-4.1',
reply_preview: 'Mock provider connection looks good.',
})
return
}
if (pathname === '/api/admin/ai/test-image-provider' && req.method === 'POST') {
json(res, 200, {
provider: 'mock-images',
endpoint: 'https://images.mock.invalid/v1',
image_model: 'mock-image-1',
result_preview: `${MOCK_ORIGIN}/generated/image-provider-preview.svg`,
})
return
}
if (pathname === '/api/admin/storage/r2/test' && req.method === 'POST') {
json(res, 200, {
bucket: state.site_settings.media_r2_bucket,
public_base_url: state.site_settings.media_r2_public_base_url,
})
return
}
}
if (pathname.match(/^\/api\/posts\/slug\/[^/]+\/markdown$/) && req.method === 'GET') {
const slug = decodeURIComponent(pathname.split('/')[4])
const post = state.posts.find((item) => item.slug === slug)
if (!post) {
notFound(res, '文章不存在。')
return
}
json(res, 200, { slug: post.slug, path: `content/posts/${post.slug}.md`, markdown: post.content })
return
}
if (pathname.match(/^\/api\/posts\/slug\/[^/]+\/markdown$/) && req.method === 'PATCH') {
const slug = decodeURIComponent(pathname.split('/')[4])
const post = state.posts.find((item) => item.slug === slug)
if (!post) {
notFound(res, '文章不存在。')
return
}
const { json: payload } = await parseRequest(req)
post.content = String(payload.markdown || post.content || '')
const parsed = parseHeadingAndDescription(post.content, post.title || post.slug)
post.title = post.title || parsed.title
if (!normalizeText(post.description)) {
post.description = parsed.description
}
post.updated_at = iso(-1)
addAuditLog('post.markdown.update', 'post', post.title, post.id, { slug: post.slug })
addPostRevision(post, 'markdown', '保存 Markdown')
json(res, 200, { slug: post.slug, path: `content/posts/${post.slug}.md`, markdown: post.content })
return
}
if (pathname.match(/^\/api\/posts\/slug\/[^/]+\/markdown$/) && req.method === 'DELETE') {
const slug = decodeURIComponent(pathname.split('/')[4])
const index = state.posts.findIndex((item) => item.slug === slug)
if (index === -1) {
notFound(res, '文章不存在。')
return
}
const [removed] = state.posts.splice(index, 1)
recalculateTaxonomyCounts()
addAuditLog('post.delete', 'post', removed.title, removed.id, { slug: removed.slug })
addPostRevision(removed, 'delete', '删除文章')
json(res, 200, { slug: removed.slug, deleted: true })
return
}
if (pathname.match(/^\/api\/comments\/\d+$/) && req.method === 'PATCH') {
if (!ensureAdmin(req, res)) return
const id = Number(pathname.split('/').pop())
const record = state.comments.find((item) => item.id === id)
if (!record) {
notFound(res, '评论不存在。')
return
}
const { json: payload } = await parseRequest(req)
if (Object.hasOwn(payload, 'approved')) record.approved = Boolean(payload.approved)
record.updated_at = iso(-1)
addAuditLog('comment.update', 'comment', record.author, record.id, { approved: record.approved })
json(res, 200, clone(record))
return
}
if (pathname.match(/^\/api\/comments\/\d+$/) && req.method === 'DELETE') {
if (!ensureAdmin(req, res)) return
const id = Number(pathname.split('/').pop())
const index = state.comments.findIndex((item) => item.id === id)
if (index === -1) {
notFound(res, '评论不存在。')
return
}
const [record] = state.comments.splice(index, 1)
addAuditLog('comment.delete', 'comment', record.author, record.id)
writeEmpty(res, 204)
return
}
if (pathname.match(/^\/api\/friend_links\/\d+$/) && req.method === 'PATCH') {
if (!ensureAdmin(req, res)) return
const id = Number(pathname.split('/').pop())
const record = state.friend_links.find((item) => item.id === id)
if (!record) {
notFound(res, '友链不存在。')
return
}
const { json: payload } = await parseRequest(req)
record.site_name = normalizeText(payload.site_name || payload.siteName) || record.site_name
record.site_url = normalizeText(payload.site_url || payload.siteUrl) || record.site_url
record.description = normalizeText(payload.description) || record.description
record.category = normalizeText(payload.category) || record.category
record.status = normalizeText(payload.status) || record.status
record.updated_at = iso(-1)
addAuditLog('friend_link.update', 'friend_link', record.site_name, record.id, { status: record.status })
json(res, 200, clone(record))
return
}
if (pathname.match(/^\/api\/friend_links\/\d+$/) && req.method === 'DELETE') {
if (!ensureAdmin(req, res)) return
const id = Number(pathname.split('/').pop())
const index = state.friend_links.findIndex((item) => item.id === id)
if (index === -1) {
notFound(res, '友链不存在。')
return
}
const [record] = state.friend_links.splice(index, 1)
addAuditLog('friend_link.delete', 'friend_link', record.site_name, record.id)
writeEmpty(res, 204)
return
}
notFound(res, `未匹配到 mock 接口:${req.method} ${pathname}`)
})
server.listen(PORT, '127.0.0.1')