diff --git a/.env.example b/.env.example index 0068b2e4a..921213684 100644 --- a/.env.example +++ b/.env.example @@ -32,6 +32,10 @@ EMBEDDING_USE_GPU=true # ── Vector Store Configuration ── VECTOR_STORE_TYPE=chromadb +# ── NovelPro Obsidian Long-Term Memory ── +# 留空时默认写入 data/obsidian-vault;如需接入自己的 Obsidian Vault,填绝对路径并重启后端。 +# PLOTPILOT_OBSIDIAN_VAULT=/Users/you/Documents/Obsidian/PlotPilot + # 日志配置 LOG_LEVEL=INFO # DEBUG, INFO, WARNING, ERROR, CRITICAL LOG_FILE=logs/aitext.log diff --git a/.github/workflows/backend-ci.yml b/.github/workflows/backend-ci.yml index b4b7c6186..182d8a0ee 100644 --- a/.github/workflows/backend-ci.yml +++ b/.github/workflows/backend-ci.yml @@ -1,20 +1,29 @@ name: Backend CI on: + push: + paths: + - "**.py" + - "requirements*.txt" + - "pyproject.toml" + - "pytest.ini" + - ".github/workflows/backend-ci.yml" pull_request: paths: - "**.py" - "requirements*.txt" - "pyproject.toml" - "pytest.ini" + - ".github/workflows/backend-ci.yml" + workflow_dispatch: jobs: test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: "3.11" cache: "pip" @@ -22,8 +31,20 @@ jobs: - name: Install dependencies run: pip install -r requirements.txt - - name: Run unit tests - run: pytest tests/unit -q --tb=short + - name: Run verified backend workflow tests + run: > + pytest + tests/unit/application/services/test_continuity_overview_service.py + tests/unit/application/services/test_power_system_service.py + tests/integration/interfaces/api/v1/test_continuity_api.py + tests/integration/interfaces/api/v1/test_power_system_api.py + tests/unit/infrastructure/persistence/database/test_sqlite_chapter_candidate_draft_repository.py + tests/unit/application/services/test_chapter_candidate_draft_service.py + tests/unit/application/services/test_chapter_service.py + tests/unit/application/services/test_chronicles_service.py + tests/integration/interfaces/api/v1/test_chapter_candidate_drafts_api.py + -q + --tb=short env: # 单测不需要真实 key,用占位值防止启动时报错 ANTHROPIC_API_KEY: "test-placeholder" diff --git a/.github/workflows/frontend-ci.yml b/.github/workflows/frontend-ci.yml index 2642a787a..ddc2efd31 100644 --- a/.github/workflows/frontend-ci.yml +++ b/.github/workflows/frontend-ci.yml @@ -1,9 +1,15 @@ name: Frontend CI on: + push: + paths: + - "frontend/**" + - ".github/workflows/frontend-ci.yml" pull_request: paths: - "frontend/**" + - ".github/workflows/frontend-ci.yml" + workflow_dispatch: jobs: build: @@ -13,9 +19,9 @@ jobs: working-directory: frontend steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - - uses: actions/setup-node@v4 + - uses: actions/setup-node@v6 with: node-version: "20" cache: "npm" diff --git a/.gitignore b/.gitignore index 99fe935e1..9ec023c21 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,7 @@ __pycache__/ .hypothesis/ .venv/ venv/ +/uv.lock *.egg-info/ .eggs/ dist/ @@ -39,6 +40,9 @@ frontend/dist/ .playwright-mcp/ .claude/ .trae/ +.worktrees/ +agent_memory/ +LOCAL_DEVELOPMENT.md # ── 覆盖率 / 测试产物 ── .coverage @@ -88,6 +92,11 @@ scripts/evaluation/results/ data/chromadb/ data/logs/ data/llm_configs.json +data/obsidian-vault/ +data/obsidian-vault-backups/ +.obsidian-sync.lock/ +scripts/sync_obsidian_from_server.sh +scripts/launchd/com.plotpilot.novelpro.obsidian-sync.plist # ── 日志 ── logs/ @@ -114,6 +123,7 @@ base_library.zip # ── 临时 / 无关文件 ── =++Contribution Value Roster++= +# Contributors: xibian-YQ, JamesGoslings # ── PlotPilot 残留 ── PlotPilot-master/ @@ -123,8 +133,12 @@ llm_profiles.json aitext.lock scripts/aitext.lock -# ── Tauri 桌面壳(仅本机打包用,勿提交;协作者 clone 后无此目录,不影响 npm run dev) ── -frontend/src-tauri/ +# ── Tauri 桌面壳 ── +# 提交源码,忽略构建产物 +frontend/src-tauri/target/ +frontend/src-tauri/Cargo.lock +frontend/src-tauri/scripts/ +frontend/src-tauri/tools/ # ── 仅本机 Windows 打包链路(协作者只需 npm run dev + 后端) ── scripts/build_installer.py @@ -150,3 +164,9 @@ mcps/ *.rs.bk *.orig *.rej + +# ── 本地执行计划 / Agent 过程文档 ── +docs/superpowers/plans/2026-04-27-p1-candidate-drafts.md +docs/superpowers/plans/2026-04-29-topic-idea-incubation.md +docs/superpowers/plans/2026-04-29-topic-idea-report.md +docs/superpowers/specs/2026-04-29-topic-idea-incubation-design.md diff --git a/=++Contribution Value Roster++= b/=++Contribution Value Roster++= index 5bf8af22a..534885de5 100644 --- a/=++Contribution Value Roster++= +++ b/=++Contribution Value Roster++= @@ -17,3 +17,8 @@ 16 wfnysse https://github.com/wfnysse 17 https://github.com/Kobe9312 1Kobe9312 18 https://github.com/zeroranyi zeroranyi +19 bugmaker2 https://github.com/bugmaker2 +20 haoziyouxia https://github.com/haoziyouxia +21 LuoFengXiaoXiao https://github.com/LuoFengXiaoXiao +22 droid-Q https://github.com/droid-Q +23 semir0037-source https://github.com/semir0037-source diff --git a/README.md b/README.md index a0074b77a..5aa68a92f 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ - **知识图谱**:自动提取故事三元组,语义检索历史内容。 - **伏笔台账**:追踪并自动闭合叙事钩子。 - **风格分析**:作者声音漂移检测与文体指纹。 +- **NovelPro 作者工作台**:选题立项、候选稿、连续性巡检、战力系统、Obsidian 长期记忆与监控中心。详见 [docs/NOVELPRO_README.md](docs/NOVELPRO_README.md)。 - **节拍表与故事结构**:三幕式、章节节拍规划。 - **DDD 四层架构**:`domain` / `application` / `infrastructure` / `interfaces` diff --git a/application/ai/embedding_config_service.py b/application/ai/embedding_config_service.py index 623044776..9cd44885c 100644 --- a/application/ai/embedding_config_service.py +++ b/application/ai/embedding_config_service.py @@ -157,8 +157,9 @@ def update_config(self, **kwargs) -> EmbeddingConfigModel: params.append("default") # WHERE id = ? sql = f"UPDATE embedding_config SET {', '.join(set_clauses)} WHERE id = ?" - db.execute(sql, params) - db.get_connection().commit() + conn = db.get_connection() + conn.execute(sql, tuple(params)) + conn.commit() logger.info("EmbeddingConfigService: 配置已更新,字段: %s", list(kwargs.keys())) return self.get_config() diff --git a/application/ai/llm_json_extract.py b/application/ai/llm_json_extract.py index 2084dff3b..672f37c87 100644 --- a/application/ai/llm_json_extract.py +++ b/application/ai/llm_json_extract.py @@ -89,7 +89,7 @@ def _do_repair(s: str) -> str: if in_string: res += '"' - res = res.strip() + res = ''.join(res).strip() while res.endswith(','): res = res[:-1].strip() while stack: diff --git a/application/analyst/services/coc_canon_service.py b/application/analyst/services/coc_canon_service.py new file mode 100644 index 000000000..5cff536f3 --- /dev/null +++ b/application/analyst/services/coc_canon_service.py @@ -0,0 +1,195 @@ +"""CoC 正典注册表服务。""" +from __future__ import annotations + +from typing import Any, Mapping, Optional + + +class CocCanonService: + """管理 CoC 正典条目与章节证据。""" + + def __init__(self, repository): + self.repository = repository + + def get_overview(self, novel_id: str) -> dict[str, Any]: + entries = self.repository.list_entries(novel_id) + events = self.repository.list_events(novel_id, limit=100) + return { + "novel_id": novel_id, + "entries": entries, + "recent_events": events, + "cognition_layers": self.get_cognition_layers(novel_id), + } + + def get_cognition_layers(self, novel_id: str) -> dict[str, list[str]]: + entries = [ + item + for item in self.repository.list_entries(novel_id) + if str(item.get("status") or "").strip() != "archived" + ] + author_truth: list[str] = [] + reader_known: list[str] = [] + author_truth_snippets: list[str] = [] + for item in entries: + title = str(item.get("title") or "").strip() or "未命名条目" + public_facts = str(item.get("public_facts") or "").strip() + hidden_truth = str(item.get("hidden_truth") or "").strip() + if public_facts: + reader_known.append(f"{title}:{public_facts}") + if hidden_truth: + author_truth.append(f"{title}:{hidden_truth}") + if len(hidden_truth) >= 8 and hidden_truth not in author_truth_snippets: + author_truth_snippets.append(hidden_truth[:80]) + return { + "author_truth": author_truth[:24], + "reader_known": reader_known[:24], + "author_truth_snippets": author_truth_snippets[:40], + } + + def upsert_entry( + self, + *, + novel_id: str, + canon_type: str, + title: str, + public_facts: str = "", + hidden_truth: str = "", + lock_level: str = "soft", + mutable_notes: str = "", + status: str = "active", + entry_id: Optional[str] = None, + ) -> dict[str, Any]: + clean_canon_type = canon_type.strip() + clean_title = title.strip() + if not clean_canon_type: + raise ValueError("canon_type is required") + if not clean_title: + raise ValueError("title is required") + + existing = self.repository.get_entry_by_id(entry_id) if entry_id else self.repository.get_entry_by_key( + novel_id, + clean_canon_type, + clean_title, + ) + if existing is not None and existing.get("novel_id") != novel_id: + raise ValueError("entry does not belong to novel") + + incoming = { + "canon_type": clean_canon_type, + "title": clean_title, + "public_facts": public_facts.strip(), + "hidden_truth": hidden_truth.strip(), + "lock_level": self._normalize_lock_level(lock_level), + "mutable_notes": mutable_notes.strip(), + "status": self._normalize_status(status), + } + self.lock_guard_validate_patch(existing, incoming) + return self.repository.upsert_entry( + entry_id=existing["id"] if existing else None, + novel_id=novel_id, + canon_type=incoming["canon_type"], + title=incoming["title"], + public_facts=incoming["public_facts"], + hidden_truth=incoming["hidden_truth"], + lock_level=incoming["lock_level"], + mutable_notes=incoming["mutable_notes"], + status=incoming["status"], + ) + + def create_event( + self, + *, + novel_id: str, + entry_id: str = "", + title: str = "", + chapter_number: int, + event_type: str = "mention", + evidence: str = "", + notes: str = "", + ) -> dict[str, Any]: + clean_entry_id = str(entry_id or "").strip() + clean_title = str(title or "").strip() + chapter = int(chapter_number) + if chapter < 1: + raise ValueError("chapter_number must be greater than 0") + if not clean_entry_id and not clean_title: + raise ValueError("entry_id or title is required") + + entry = None + if clean_entry_id: + entry = self.repository.get_entry_by_id(clean_entry_id) + elif clean_title: + entry = self.repository.get_entry_by_title(novel_id, clean_title) + if entry is None: + entry = self.upsert_entry( + novel_id=novel_id, + canon_type="other", + title=clean_title, + public_facts="", + hidden_truth="", + lock_level="soft", + mutable_notes="自动创建:由事件记录补建条目。", + status="draft", + ) + if entry is None: + raise ValueError("entry not found") + if entry.get("novel_id") != novel_id: + raise ValueError("entry does not belong to novel") + return self.repository.create_event( + entry_id=str(entry.get("id") or clean_entry_id), + chapter_number=chapter, + event_type=(event_type or "mention").strip() or "mention", + evidence=evidence.strip(), + notes=notes.strip(), + ) + + @staticmethod + def lock_guard_validate_patch( + existing: Optional[Mapping[str, Any]], + incoming: Mapping[str, Any], + ) -> None: + if not existing: + return + if str(existing.get("lock_level") or "").strip() != "absolute": + return + + protected_fields = ("public_facts", "hidden_truth", "title", "canon_type") + for field in protected_fields: + old_value = str(existing.get(field) or "") + new_value = str(incoming.get(field) or "") + if old_value != new_value: + raise ValueError(f"absolute lock forbids changing `{field}`") + + def build_overlay(self, novel_id: str) -> str: + entries = [ + item + for item in self.repository.list_entries(novel_id) + if str(item.get("status") or "active").strip() != "archived" + ] + if not entries: + return "【CoC正典(必须保持一致)】\n- 暂无已登记正典。" + + lines = ["【CoC正典(必须保持一致)】"] + for item in entries: + lines.append( + f"- [{item.get('canon_type', '')}] {item.get('title', '')}(锁定:{item.get('lock_level', 'soft')})" + ) + public_facts = str(item.get("public_facts") or "").strip() + hidden_truth = str(item.get("hidden_truth") or "").strip() + mutable_notes = str(item.get("mutable_notes") or "").strip() + if public_facts: + lines.append(f" 公共事实:{public_facts}") + if hidden_truth: + lines.append(f" 隐藏真相:{hidden_truth}") + if mutable_notes: + lines.append(f" 可变备注:{mutable_notes}") + return "\n".join(lines) + + @staticmethod + def _normalize_lock_level(value: str) -> str: + normalized = (value or "").strip().lower() + return normalized if normalized in {"soft", "strict", "absolute"} else "soft" + + @staticmethod + def _normalize_status(value: str) -> str: + normalized = (value or "").strip().lower() + return normalized if normalized in {"active", "draft", "archived"} else "active" diff --git a/application/analyst/services/coc_clue_service.py b/application/analyst/services/coc_clue_service.py new file mode 100644 index 000000000..9c34a5a6f --- /dev/null +++ b/application/analyst/services/coc_clue_service.py @@ -0,0 +1,228 @@ +"""CoC 线索账本服务。""" +from __future__ import annotations + +from typing import Any, Mapping, Optional + + +class CocClueService: + """管理 CoC 线索条目与章节证据。""" + + def __init__(self, repository): + self.repository = repository + + def get_overview(self, novel_id: str) -> dict[str, Any]: + return { + "novel_id": novel_id, + "items": self.repository.list_items(novel_id), + "recent_events": self.repository.list_events(novel_id, limit=100), + "cognition_layers": self.get_cognition_layers(novel_id), + } + + def get_cognition_layers(self, novel_id: str) -> dict[str, list[str]]: + items = self.repository.list_items(novel_id) + author_truth: list[str] = [] + character_known: list[str] = [] + reader_known: list[str] = [] + for item in items: + clue_key = str(item.get("clue_key") or "").strip() or "未命名线索" + clue_text = str(item.get("clue_text") or "").strip() or "(待补充)" + visibility = str(item.get("visibility") or "").strip().lower() + known_by = self._normalize_known_by(item.get("known_by") or "") + suffix = f"(已知角色:{known_by or '未记录'})" + line = f"{clue_key}:{clue_text}{suffix}" + if visibility == "author_only": + author_truth.append(line) + elif visibility == "protagonist_known": + character_known.append(line) + else: + reader_known.append(line) + return { + "author_truth": author_truth[:24], + "character_known": character_known[:24], + "reader_known": reader_known[:24], + } + + def upsert_item( + self, + *, + novel_id: str, + clue_key: str, + clue_text: str = "", + visibility: str = "reader_known", + reveal_chapter: Optional[int] = None, + known_by: Any = "", + confidence: float = 0.5, + lock_level: str = "soft", + status: str = "active", + notes: str = "", + entry_id: Optional[str] = None, + ) -> dict[str, Any]: + clean_key = clue_key.strip() + if not clean_key: + raise ValueError("clue_key is required") + + existing = self.repository.get_item_by_id(entry_id) if entry_id else self.repository.get_item_by_key( + novel_id, + clean_key, + ) + if existing is not None and existing.get("novel_id") != novel_id: + raise ValueError("entry does not belong to novel") + + incoming = { + "clue_key": clean_key, + "clue_text": (clue_text or "").strip(), + "visibility": self._normalize_visibility(visibility), + "reveal_chapter": self._normalize_reveal_chapter(reveal_chapter), + "known_by": self._normalize_known_by(known_by), + "confidence": self._normalize_confidence(confidence), + "lock_level": self._normalize_lock_level(lock_level), + "status": self._normalize_status(status), + "notes": (notes or "").strip(), + } + self.lock_guard_validate_patch(existing, incoming) + return self.repository.upsert_item( + item_id=existing["id"] if existing else None, + novel_id=novel_id, + clue_key=incoming["clue_key"], + clue_text=incoming["clue_text"], + visibility=incoming["visibility"], + reveal_chapter=incoming["reveal_chapter"], + known_by=incoming["known_by"], + confidence=incoming["confidence"], + lock_level=incoming["lock_level"], + status=incoming["status"], + notes=incoming["notes"], + ) + + def create_event( + self, + *, + novel_id: str, + entry_id: str = "", + clue_key: str = "", + chapter_number: int, + event_type: str = "mention", + evidence: str = "", + notes: str = "", + ) -> dict[str, Any]: + clean_entry_id = str(entry_id or "").strip() + clean_clue_key = str(clue_key or "").strip() + chapter = int(chapter_number) + if chapter < 1: + raise ValueError("chapter_number must be greater than 0") + if bool(clean_entry_id) == bool(clean_clue_key): + raise ValueError("entry_id or clue_key is required (choose one)") + + item = None + if clean_entry_id: + item = self.repository.get_item_by_id(clean_entry_id) + elif clean_clue_key: + item = self.repository.get_item_by_key(novel_id, clean_clue_key) + if item is None: + item = self.upsert_item( + novel_id=novel_id, + clue_key=clean_clue_key, + clue_text="", + visibility="reader_known", + reveal_chapter=chapter, + known_by="", + confidence=0.4, + lock_level="soft", + status="active", + notes="自动创建:由线索事件补建 draft 线索。", + ) + + if item is None: + raise ValueError("clue item not found") + if item.get("novel_id") != novel_id: + raise ValueError("entry does not belong to novel") + + return self.repository.create_event( + clue_id=str(item.get("id") or clean_entry_id), + chapter_number=chapter, + event_type=(event_type or "mention").strip() or "mention", + evidence=(evidence or "").strip(), + notes=(notes or "").strip(), + ) + + @staticmethod + def lock_guard_validate_patch( + existing: Optional[Mapping[str, Any]], + incoming: Mapping[str, Any], + ) -> None: + if not existing: + return + if str(existing.get("lock_level") or "").strip() != "absolute": + return + for field in ("clue_key", "clue_text", "reveal_chapter"): + old_value = str(existing.get(field) or "") + new_value = str(incoming.get(field) or "") + if old_value != new_value: + raise ValueError(f"absolute lock forbids changing `{field}`") + + def build_overlay(self, novel_id: str) -> str: + items = self.repository.list_items(novel_id) + if not items: + return "【CoC线索账本(已知信息边界)】\n- 暂无已登记线索。" + + prioritized = [ + item + for item in items + if str(item.get("status") or "").strip() == "active" + and str(item.get("visibility") or "").strip() != "author_only" + ] + others = [ + item + for item in items + if item not in prioritized + and str(item.get("visibility") or "").strip() != "author_only" + ] + + if not prioritized and not others: + return "【CoC线索账本(已知信息边界)】\n- 当前仅有作者私有线索(author_only)。" + + lines = ["【CoC线索账本(已知信息边界)】"] + for item in prioritized + others: + reveal_chapter = item.get("reveal_chapter") + reveal_text = f"第{reveal_chapter}章" if reveal_chapter else "待揭示" + lines.append( + f"- {item.get('clue_key', '')}: {item.get('clue_text', '') or '(待补充)'}" + f"(可见={item.get('visibility', 'reader_known')},状态={item.get('status', 'active')},揭示={reveal_text})" + ) + return "\n".join(lines) + + @staticmethod + def _normalize_visibility(value: str) -> str: + normalized = (value or "").strip().lower() + return normalized if normalized in {"reader_known", "protagonist_known", "author_only"} else "reader_known" + + @staticmethod + def _normalize_lock_level(value: str) -> str: + normalized = (value or "").strip().lower() + return normalized if normalized in {"soft", "strict", "absolute"} else "soft" + + @staticmethod + def _normalize_status(value: str) -> str: + normalized = (value or "").strip().lower() + return normalized if normalized in {"active", "resolved", "refuted"} else "active" + + @staticmethod + def _normalize_reveal_chapter(value: Optional[int]) -> Optional[int]: + if value is None: + return None + chapter = int(value) + return chapter if chapter > 0 else None + + @staticmethod + def _normalize_confidence(value: float) -> float: + try: + confidence = float(value) + except (TypeError, ValueError): + return 0.5 + return max(0.0, min(1.0, confidence)) + + @staticmethod + def _normalize_known_by(value: Any) -> str: + if isinstance(value, list): + return ", ".join(str(item).strip() for item in value if str(item).strip()) + return str(value or "").strip() diff --git a/application/analyst/services/coc_preset_service.py b/application/analyst/services/coc_preset_service.py new file mode 100644 index 000000000..865baf44c --- /dev/null +++ b/application/analyst/services/coc_preset_service.py @@ -0,0 +1,646 @@ +"""CoC 初始模板服务。""" +from __future__ import annotations + +from typing import Any + + +class CocPresetService: + """将预设模板批量写入 CoC 正典/线索/道具账本。""" + + def __init__(self, canon_service, clue_service, prop_ledger_service=None): + self.canon_service = canon_service + self.clue_service = clue_service + self.prop_ledger_service = prop_ledger_service + + def list_presets(self) -> list[dict[str, Any]]: + presets = [] + for preset in self._preset_definitions(): + presets.append({ + "key": preset["key"], + "name": preset["name"], + "description": preset["description"], + "source_novel_id": preset["source_novel_id"], + "canon_count": len(preset["canon_entries"]), + "clue_count": len(preset["clue_items"]), + "prop_count": len(preset.get("prop_items") or []), + }) + return presets + + def apply_preset( + self, + *, + novel_id: str, + preset_key: str = "analysis-loop-721", + overwrite_existing: bool = False, + ) -> dict[str, Any]: + preset = self._get_preset((preset_key or "").strip().lower()) + if preset is None: + raise ValueError(f"unknown preset_key: {preset_key}") + + created_canon = 0 + created_clues = 0 + created_props = 0 + skipped = 0 + + for item in preset["canon_entries"]: + existing = self.canon_service.repository.get_entry_by_key( + novel_id, + item["canon_type"], + item["title"], + ) + if existing and not overwrite_existing: + skipped += 1 + continue + self.canon_service.upsert_entry(novel_id=novel_id, **item) + created_canon += 1 + + for item in preset["clue_items"]: + existing = self.clue_service.repository.get_item_by_key(novel_id, item["clue_key"]) + if existing and not overwrite_existing: + skipped += 1 + continue + self.clue_service.upsert_item(novel_id=novel_id, **item) + created_clues += 1 + + if self.prop_ledger_service is not None: + for item in preset.get("prop_items") or []: + existing = self.prop_ledger_service.repository.get_item_by_name(novel_id, item["name"]) + if existing and not overwrite_existing: + skipped += 1 + continue + self.prop_ledger_service.upsert_item(novel_id=novel_id, **item) + created_props += 1 + + return { + "preset_key": preset["key"], + "novel_id": novel_id, + "created_canon": created_canon, + "created_clues": created_clues, + "created_props": created_props, + "skipped": skipped, + "overwrite_existing": bool(overwrite_existing), + } + + @classmethod + def _get_preset(cls, key: str) -> dict[str, Any] | None: + return next((item for item in cls._preset_definitions() if item["key"] == key), None) + + @classmethod + def _preset_definitions(cls) -> list[dict[str, Any]]: + return [ + cls._analysis_loop_721_preset(), + cls._fog_harbor_gray_card_preset(), + ] + + @staticmethod + def _analysis_loop_721_preset() -> dict[str, Any]: + return { + "key": "analysis-loop-721", + "name": "循环隧道·721(分析模板)", + "description": "基于已分析的循环悬疑样本抽取:固定叙事边界、核心规则、延迟揭示线索。", + "source_novel_id": "novel-1777374690524", + "canon_entries": [ + { + "canon_type": "world_rule", + "title": "叙事视角边界", + "public_facts": "全书坚持第三人称限制视角,信息边界以主角当前感知为准。", + "hidden_truth": "", + "lock_level": "absolute", + "mutable_notes": "可加强体感细节,但不能切换全知叙述。", + "status": "active", + }, + { + "canon_type": "timeline", + "title": "第七次熄灯与十七分钟窗口", + "public_facts": "第七次熄灯后,关键窗口为十七分钟,剧情推进围绕窗口前后触发。", + "hidden_truth": "窗口触发与更高层循环校准有关。", + "lock_level": "strict", + "mutable_notes": "窗口长度不可随意改写;若变更需先登记正典事件。", + "status": "active", + }, + { + "canon_type": "artifact", + "title": "721 座椅数字机制", + "public_facts": "721 不是装饰编号,具备实际触发/位移意义。", + "hidden_truth": "721 与主控逻辑的倒序频率耦合,能牵引时间层切换。", + "lock_level": "absolute", + "mutable_notes": "不得写成普通噪音或随机巧合。", + "status": "active", + }, + { + "canon_type": "world_rule", + "title": "B-721-M 维护口定位", + "public_facts": "B-721-M 是系统内部维护入口,关联关键人物失踪与回传信息。", + "hidden_truth": "入口同时承担观测与重写权限争夺。", + "lock_level": "strict", + "mutable_notes": "可补充细节,但入口性质不改。", + "status": "active", + }, + { + "canon_type": "character_truth", + "title": "周牧野立场双重性", + "public_facts": "周牧野既提供线索也制造阻力,行为呈校准者特征。", + "hidden_truth": "其行动受高层循环指令约束,存在镜像身份来源。", + "lock_level": "strict", + "mutable_notes": "阶段内可保持暧昧,不可突兀洗白或突兀脸谱化。", + "status": "active", + }, + { + "canon_type": "character_truth", + "title": "苏晓跨时信息链", + "public_facts": "苏晓通过跨时信号持续留下碎片信息,指向更深层规则。", + "hidden_truth": "其状态与主角记忆结构绑定,非普通失踪。", + "lock_level": "strict", + "mutable_notes": "公开层可渐进推进,真相层仅按节奏揭露。", + "status": "active", + }, + ], + "clue_items": [ + { + "clue_key": "clue-721-morse", + "clue_text": "721 数字以摩斯式节律反复出现,不是普通故障。", + "visibility": "reader_known", + "reveal_chapter": 1, + "known_by": "主角", + "confidence": 0.88, + "lock_level": "strict", + "status": "active", + "notes": "用于多章回收的基础线索。", + }, + { + "clue_key": "clue-b721m-entry", + "clue_text": "B-721-M 维护口与关键失踪事件相关,入口可被特定条件触发。", + "visibility": "protagonist_known", + "reveal_chapter": 2, + "known_by": "主角,陈维", + "confidence": 0.82, + "lock_level": "strict", + "status": "active", + "notes": "保持入口性质稳定。", + }, + { + "clue_key": "clue-17min-window", + "clue_text": "十七分钟是关键反应窗口,超时后规则会发生偏移。", + "visibility": "reader_known", + "reveal_chapter": 3, + "known_by": "主角,周牧野", + "confidence": 0.74, + "lock_level": "soft", + "status": "active", + "notes": "常用于段落倒计时。", + }, + { + "clue_key": "clue-double-manual", + "clue_text": "1970 与 2023 的双手册存在互证与冲突信息。", + "visibility": "protagonist_known", + "reveal_chapter": 3, + "known_by": "主角,周牧野", + "confidence": 0.77, + "lock_level": "soft", + "status": "active", + "notes": "适合做误导与校正。", + }, + { + "clue_key": "clue-zhou-origin", + "clue_text": "周牧野可能并非单一时间线个体,来源与循环层有关。", + "visibility": "author_only", + "reveal_chapter": None, + "known_by": "作者", + "confidence": 0.71, + "lock_level": "absolute", + "status": "active", + "notes": "作者层线索,严禁正文直出。", + }, + { + "clue_key": "clue-suxiao-binding", + "clue_text": "苏晓状态与主角记忆结构耦合,非普通生死状态。", + "visibility": "author_only", + "reveal_chapter": None, + "known_by": "作者", + "confidence": 0.7, + "lock_level": "absolute", + "status": "active", + "notes": "作者层线索,需拆分为多次渐进揭露。", + }, + ], + "prop_items": [], + } + + @staticmethod + def _fog_harbor_gray_card_preset() -> dict[str, Any]: + return { + "key": "fog-harbor-gray-card", + "name": "雾港灰卡调查团", + "description": "原创 CoC 风格任务流模板:白雨翔、第七档案局、灰卡副本、认知污染与第一季主线。", + "source_novel_id": "原创模板", + "canon_entries": _FOG_HARBOR_CANON_ENTRIES, + "clue_items": _FOG_HARBOR_CLUE_ITEMS, + "prop_items": _FOG_HARBOR_PROP_ITEMS, + } + + +_FOG_HARBOR_CANON_ENTRIES: list[dict[str, Any]] = [ + { + "canon_type": "world_rule", + "title": "系列模式:雾港灰卡调查团", + "public_facts": "本书采用任务流、多异常世界调查、固定主角团结构。白雨翔和第七档案局小组会被灰卡派往不同异常现场,每个副本独立成案,同时回收同一条主线。", + "hidden_truth": "所有任务都在筛选和训练见证人。主角团以为自己在救援和封缄,实际正在补齐一个旧仪式需要的观察、记录、证明和代价。", + "lock_level": "absolute", + "mutable_notes": "可以更换副本外壳,但不能改成普通冒险、纯打怪或无限升级流。", + "status": "active", + }, + { + "canon_type": "character_truth", + "title": "主角:白雨翔", + "public_facts": "白雨翔是雾港第七档案局记录员,曾做调查记者,擅长从证词、旧报纸、照片边角和档案删改处发现矛盾。他的职责是记录任务经过,并决定哪些内容进入正式档案。", + "hidden_truth": "白雨翔并不是第一次接触灰卡。他曾经参与过一次失败见证,相关记忆被档案局封存,而灰卡持续选择他,是因为他能把异常转写成可被现实承认的记录。", + "lock_level": "absolute", + "mutable_notes": "可调整过往细节,但不能改掉记录员/前记者/被灰卡反复选中的核心定位。", + "status": "active", + }, + { + "canon_type": "artifact", + "title": "灰卡任务机制", + "public_facts": "灰卡会在无人注视时出现任务文字,通常只给地点、时限和一个看似可救的人。任务文字会随调查进展发生小幅变化,但不会主动解释真相。", + "hidden_truth": "灰卡不是通讯工具,而是旧仪式的分发器。它通过任务把调查员送入不同异常叙事,让他们完成见证、命名、封缄或献祭中的某一环。", + "lock_level": "absolute", + "mutable_notes": "灰卡可以误导,但不能像系统面板一样直接发布奖励、等级或完整解释。", + "status": "active", + }, + { + "canon_type": "world_rule", + "title": "雾港第七档案局", + "public_facts": "第七档案局是雾港地下异常档案机构,对外不存在正式编制。它处理无法归入刑事、民事和自然灾害档案的事件,成员以档案员、顾问、外勤和证物管理员为主。", + "hidden_truth": "档案局并非完全站在人类一侧。局内高层知道灰卡会筛选见证人,却长期把任务包装成应急救援,以换取雾港多年表面平静。", + "lock_level": "strict", + "mutable_notes": "可增设部门和人物,但档案局的暧昧立场要保留。", + "status": "active", + }, + { + "canon_type": "world_rule", + "title": "异常世界进入与退出", + "public_facts": "每个副本都有入口、时限和退出条件。入口形态由当次大纲决定;退出条件通常不是杀死怪物,而是确认真相、完成封缄或带走指定证据。", + "hidden_truth": "副本不是完整世界,而是被旧仪式切出的异常叙事片段。调查员在片段中做出的记录会反向改变现实档案。", + "lock_level": "absolute", + "mutable_notes": "副本规则可变化,但必须有清晰入口、时限、代价和退出条件。", + "status": "active", + }, + { + "canon_type": "world_rule", + "title": "认知污染与理智代价", + "public_facts": "越接近异常真相,调查员越容易出现记忆断片、感官错位、熟人陌生化、时间感错误和身份边界松动。代价首先表现为认知偏移,而不是数值扣减。", + "hidden_truth": "所谓理智损耗不是精神脆弱,而是人类认知被迫容纳非人结构。每次成功封缄都会留下一个小缺口,最终让白雨翔能看见完整仪式。", + "lock_level": "absolute", + "mutable_notes": "不得写成简单发疯或游戏数值;代价要具体落在记忆、关系、判断和身体感上。", + "status": "active", + }, + { + "canon_type": "world_rule", + "title": "异常源不可真正杀死", + "public_facts": "主角团能阻止扩散、救出部分人、封住入口或拿走关键证据,但无法真正杀死异常源。每个胜利都必须带着未解决的残留。", + "hidden_truth": "异常源只是旧仪式的投影。不同副本的具体表现需要随连载大纲逐步登记,不能在初始模板里全部写死。", + "lock_level": "strict", + "mutable_notes": "结局可以阶段性胜利,但不能把副本写成彻底通关。", + "status": "active", + }, + { + "canon_type": "character_truth", + "title": "固定主角团功能位", + "public_facts": "核心小组由白雨翔、许照、周闻笙、陈泊舟组成。白雨翔负责记录和叙事漏洞;许照负责尸检、现场和证据链;周闻笙负责民俗、仪式和禁忌;陈泊舟负责外勤、撤离和保护。", + "hidden_truth": "四人的功能位对应旧仪式的四个动作:记录、验证、命名、执行。灰卡不是随机选人,而是在把他们放回各自的位置。", + "lock_level": "strict", + "mutable_notes": "可以增加临时队友,但四人核心功能不要被替换。", + "status": "active", + }, + { + "canon_type": "world_rule", + "title": "正典与线索渐进登记规则", + "public_facts": "初始模板只固定主角团、灰卡机制、第一案起点和不可变边界。后续副本的大纲、真相、角色死亡、关键反转和读者反馈调整,必须在剧情确定后再登记为正典或线索。", + "hidden_truth": "第七档案局的记录本身会被污染;过早写死未来副本会削弱连载调整空间,也容易造成认知预检误判。", + "lock_level": "strict", + "mutable_notes": "允许保留系列方向,但不把未来副本细节作为初始硬设定。", + "status": "active", + }, + { + "canon_type": "world_rule", + "title": "每章戏剧任务规则", + "public_facts": "每章生成前必须明确:角色想要什么、谁阻碍、读者期待什么、信息发生了什么变化、人物关系发生了什么变化、结尾留下什么钩子。正文只围绕这些任务推进。", + "hidden_truth": "", + "lock_level": "strict", + "mutable_notes": "用于约束 PP 章节生成,避免散、顺、假和全知解释。", + "status": "active", + }, + { + "canon_type": "world_rule", + "title": "正文信息边界", + "public_facts": "正文只能写角色可感知、可推断、可误解的信息。禁止直接解释旧仪式、异常源本质和作者层真相。恐怖优先来自细节错位、证词互相抵触和日常秩序失效。", + "hidden_truth": "", + "lock_level": "absolute", + "mutable_notes": "可用档案摘录、录音、照片、短信补信息,但也必须受角色认知限制。", + "status": "active", + }, + { + "canon_type": "world_rule", + "title": "调查员属性与技能边界", + "public_facts": "主角团每人有固定属性、固定核心技能和固定职责。技能可因剧情获得临时优势/惩罚,但不随副本自动升级;新能力必须来自明确训练、代价或道具。", + "hidden_truth": "灰卡任务会通过失败和污染改变角色对技能的信心,而不是直接提升数值。", + "lock_level": "strict", + "mutable_notes": "后续可补每个角色的属性卡,但不要把技能写成网游式成长。", + "status": "active", + }, + { + "canon_type": "artifact", + "title": "核心道具与临时道具规则", + "public_facts": "主角团每名成员至少绑定一个固定核心道具。核心道具可带出任务;损坏、遗失、污染必须触发惩罚。非核心道具只能在当前任务内使用,任务结束后不能带出,除非转化为证物并登记。", + "hidden_truth": "核心道具并非单纯装备,而是角色在旧仪式中的位置锚点。道具受损会牵连记忆、关系、判断或下一次任务的安全余量。", + "lock_level": "absolute", + "mutable_notes": "非核心道具由剧情和道具识别自动生成;核心道具必须人工确认后登记。", + "status": "active", + }, + { + "canon_type": "world_rule", + "title": "主角团固定与关系遗忘", + "public_facts": "主角团成员固定,不因单个副本随意换人。成员可能因为认知污染遗忘某段关系、某次救援或某个私人细节,但团队席位与职责保持稳定。", + "hidden_truth": "关系遗忘是旧仪式削弱团队互证能力的方式;被忘记的关系仍会在道具、照片、录音和档案批注里留下痕迹。", + "lock_level": "absolute", + "mutable_notes": "允许失联、受伤、短期不信任,但不要无铺垫替换核心队员。", + "status": "active", + }, + { + "canon_type": "other", + "title": "第一副本:盐雾灯塔", + "public_facts": "任务卡提示雾港东侧旧灯塔恢复亮灯,三名失踪者家属连续收到同一句短信:我在塔上,别让他们熄灯。白雨翔小组必须在涨潮前确认是否仍有生还者。", + "hidden_truth": "灯塔不是单纯建筑,而是召回失败者的仪式标记。失踪者并非普通被困,他们中的一部分正在替下一批调查员守灯。", + "lock_level": "strict", + "mutable_notes": "第一案重点是建立灰卡、时限、误判、代价和主线回钩。", + "status": "active", + }, +] + +_FOG_HARBOR_CLUE_ITEMS: list[dict[str, Any]] = [ + { + "clue_key": "fog-card-text-change", + "clue_text": "灰卡上的任务文字会在无人注视时发生细微改写。", + "visibility": "protagonist_known", + "reveal_chapter": 1, + "known_by": "白雨翔", + "confidence": 0.9, + "lock_level": "strict", + "status": "active", + "notes": "用于第一章制造任务不可靠感;正文不要解释灰卡来源。", + }, + { + "clue_key": "fog-bureau-hidden-file", + "clue_text": "第七档案局存在未向白雨翔开放的旧任务档案。", + "visibility": "reader_known", + "reveal_chapter": 1, + "known_by": "读者,白雨翔", + "confidence": 0.78, + "lock_level": "soft", + "status": "active", + "notes": "可以通过权限提示、被遮盖页码、上级打断来呈现。", + }, + { + "clue_key": "lighthouse-repeated-sms", + "clue_text": "三名失踪者家属收到同一句短信:我在塔上,别让他们熄灯。", + "visibility": "reader_known", + "reveal_chapter": 1, + "known_by": "白雨翔,许照,读者", + "confidence": 0.86, + "lock_level": "strict", + "status": "active", + "notes": "第一案开篇钩子;短信来源后置,不要开局解释。", + }, + { + "clue_key": "lighthouse-floor-count", + "clue_text": "灯塔熄灯后楼层数量会变化,队员对楼梯段数的记忆互相冲突。", + "visibility": "protagonist_known", + "reveal_chapter": 1, + "known_by": "白雨翔,陈泊舟", + "confidence": 0.75, + "lock_level": "strict", + "status": "active", + "notes": "用体感和对话呈现,不要写成旁白设定说明。", + }, + { + "clue_key": "lighthouse-fourth-footprint", + "clue_text": "灯塔内有第四个人脚印,但失踪者资料只有三人。", + "visibility": "protagonist_known", + "reveal_chapter": 1, + "known_by": "许照,白雨翔", + "confidence": 0.8, + "lock_level": "strict", + "status": "active", + "notes": "推动误判:第四人可能被认为是凶手或守灯人。", + }, + { + "clue_key": "lighthouse-tide-early", + "clue_text": "潮汐时间比公开海事表提前,且提前幅度与灯塔熄灯次数相关。", + "visibility": "reader_known", + "reveal_chapter": 2, + "known_by": "白雨翔,周闻笙,读者", + "confidence": 0.72, + "lock_level": "soft", + "status": "active", + "notes": "用于制造倒计时压力。", + }, + { + "clue_key": "keeper-is-replacement", + "clue_text": "所谓守灯人不是一个固定的人,而是失败调查者被替换后的职位。", + "visibility": "author_only", + "reveal_chapter": None, + "known_by": "作者", + "confidence": 0.92, + "lock_level": "absolute", + "status": "active", + "notes": "作者层真相,第一案只能通过照片、工牌、声音相似暗示。", + }, + { + "clue_key": "witness-ritual-mainline", + "clue_text": "灰卡任务的真正目的不是救援,而是让白雨翔小组完成见证人训练。", + "visibility": "author_only", + "reveal_chapter": None, + "known_by": "作者", + "confidence": 0.95, + "lock_level": "absolute", + "status": "active", + "notes": "主线底牌,严禁前期正文直出。", + }, + { + "clue_key": "white-old-memory-sealed", + "clue_text": "白雨翔曾参与一次失败见证,相关记忆被第七档案局封存。", + "visibility": "author_only", + "reveal_chapter": None, + "known_by": "作者", + "confidence": 0.84, + "lock_level": "absolute", + "status": "active", + "notes": "可先写成白雨翔对某些档案照片产生陌生熟悉感。", + }, + { + "clue_key": "bone-compass-cost", + "clue_text": "骨罗盘能指向异常叙事中心,但使用后会短暂遗忘一个熟人的细节。", + "visibility": "protagonist_known", + "reveal_chapter": 2, + "known_by": "周闻笙,白雨翔", + "confidence": 0.76, + "lock_level": "strict", + "status": "active", + "notes": "用于把道具代价落到人物关系,而不是数值。", + }, + { + "clue_key": "core-prop-loss-penalty", + "clue_text": "核心道具损坏、遗失或被污染时,惩罚优先落在记忆、关系、判断或下一次任务安全余量上。", + "visibility": "protagonist_known", + "reveal_chapter": 1, + "known_by": "白雨翔,许照,周闻笙,陈泊舟", + "confidence": 0.82, + "lock_level": "strict", + "status": "active", + "notes": "用于约束道具账本;不要把核心道具写成普通消耗品。", + }, + { + "clue_key": "temporary-props-cannot-exit", + "clue_text": "副本内取得的非核心道具不能直接带出任务,除非被第七档案局转化为证物并登记。", + "visibility": "protagonist_known", + "reveal_chapter": 1, + "known_by": "白雨翔,陈泊舟", + "confidence": 0.78, + "lock_level": "strict", + "status": "active", + "notes": "非核心道具仍可由剧情自动发现,但任务结束要清理或证物化。", + }, + { + "clue_key": "relationship-memory-loss", + "clue_text": "主角团不会轻易换人,但可能遗忘彼此之间的某段共同经历或私人细节。", + "visibility": "reader_known", + "reveal_chapter": 2, + "known_by": "白雨翔,读者", + "confidence": 0.74, + "lock_level": "strict", + "status": "active", + "notes": "关系遗忘要通过动作、称呼、旧照片和录音残留呈现。", + }, + { + "clue_key": "future-cases-progressive-registration", + "clue_text": "后续副本的大纲、真相、核心线索和读者反馈调整,需要在剧情确定后再登记。", + "visibility": "author_only", + "reveal_chapter": None, + "known_by": "作者", + "confidence": 0.88, + "lock_level": "strict", + "status": "active", + "notes": "这是创作流程约束,不是正文线索;防止初始模板过早锁死未来副本。", + }, +] + +_FOG_HARBOR_PROP_ITEMS: list[dict[str, Any]] = [ + { + "name": "灰卡", + "category": "任务道具", + "status": "可用;文字会自行改写", + "current_holder": "白雨翔", + "current_location": "随身证件夹", + "first_seen_chapter": 1, + "last_seen_chapter": 1, + "importance": "major", + "description": "灰白色硬卡,无编号,背面偶尔浮出任务文字。它负责把小组送入异常现场。", + "notes": "写到灰卡时必须保持不解释来源、不发奖励、不像游戏系统面板。", + }, + { + "name": "第七档案局封缄章", + "category": "封缄工具", + "status": "封存于外勤装备箱;需两人确认才能启用", + "current_holder": "陈泊舟", + "current_location": "第七档案局外勤装备箱", + "first_seen_chapter": 1, + "last_seen_chapter": 1, + "importance": "major", + "description": "铜质印章,能短暂封住异常入口,但不能处理异常源。", + "notes": "使用前要交代限制;使用后留下现实侧代价或记录污染。", + }, + { + "name": "盐蚀日志", + "category": "档案证物", + "status": "缺页;靠近海雾时纸页返潮", + "current_holder": "白雨翔", + "current_location": "第七档案局临时档案袋", + "first_seen_chapter": 1, + "last_seen_chapter": 1, + "importance": "major", + "description": "旧守灯人日志,部分页码被盐渍糊住,能记录灯塔案的失败版本。", + "notes": "适合在章末增加新字迹,作为下章钩子。", + }, + { + "name": "骨罗盘", + "category": "异常工具", + "status": "未启用;使用会造成短时记忆缺口", + "current_holder": "周闻笙", + "current_location": "黑布包", + "first_seen_chapter": 2, + "last_seen_chapter": 2, + "importance": "major", + "description": "指针由不明骨片制成,能指向异常叙事中心。", + "notes": "每次使用必须付出具体记忆代价,不能当万能定位器。", + }, + { + "name": "黑线录音笔", + "category": "证物", + "status": "可录音;会录到现场不存在的第五人声音", + "current_holder": "许照", + "current_location": "证物包", + "first_seen_chapter": 1, + "last_seen_chapter": 1, + "importance": "major", + "description": "外壳缠着黑色绝缘线的录音笔,用于保存证词和异常声纹。", + "notes": "录音内容可以制造误导,但不能直接说出作者真相。", + }, + { + "name": "白雨翔的旧记者证", + "category": "个人核心道具", + "status": "边角磨损;照片背面有被刮掉的采访日期", + "current_holder": "白雨翔", + "current_location": "钱包夹层", + "first_seen_chapter": 1, + "last_seen_chapter": 1, + "importance": "major", + "description": "白雨翔离开新闻行业前留下的证件,提醒他仍会本能追问证词来源。", + "notes": "白雨翔固定核心道具。若损坏、遗失或污染,会触发记者时期记忆缺口,或让他误判证词来源。", + }, + { + "name": "空白证物袋", + "category": "共享核心道具", + "status": "未使用;每次任务限带三只", + "current_holder": "第七档案局", + "current_location": "外勤补给柜", + "first_seen_chapter": 1, + "last_seen_chapter": 1, + "importance": "major", + "description": "用于把副本内的临时道具转化为可带出现实的证物。封口后会自动生成档案编号。", + "notes": "不归属个人。若用完或污染,非核心道具不能带出任务。", + }, + { + "name": "雾港档案钥匙", + "category": "共享核心道具", + "status": "封存在局内;只在需要查阅封存档案时启用", + "current_holder": "第七档案局", + "current_location": "档案局内库", + "first_seen_chapter": 1, + "last_seen_chapter": 1, + "importance": "major", + "description": "能打开部分被封存的旧案柜,但每次开启都会留下查阅者姓名和一段无法删除的批注。", + "notes": "不归属个人。适合在中后段用于换取关键信息,同时暴露调查痕迹。", + }, + { + "name": "未命名黑匣", + "category": "共享核心道具", + "status": "禁止单独开启;当前封存在第七档案局", + "current_holder": "第七档案局", + "current_location": "负二层证物库", + "first_seen_chapter": 1, + "last_seen_chapter": 1, + "importance": "major", + "description": "可短暂保存异常声音、照片或文字的原始状态,避免证据在现实中被改写。", + "notes": "不归属个人。若损坏,会导致一次已记录证据被现实侧覆盖。", + }, +] diff --git a/application/analyst/services/continuity_overview_service.py b/application/analyst/services/continuity_overview_service.py new file mode 100644 index 000000000..24897eff1 --- /dev/null +++ b/application/analyst/services/continuity_overview_service.py @@ -0,0 +1,1164 @@ +"""连续性总览服务 + +为作者工作台提供轻量的连续性提醒聚合: +- 角色掉线提醒 +- 时间线覆盖情况 +- 文风漂移告警 +- 关系聚焦摘要 +- 关系变化追踪 +- 大纲与正文偏离提醒 +""" +from __future__ import annotations + +from dataclasses import dataclass +import re +from typing import Any, Optional +from uuid import uuid4 + +from domain.novel.value_objects.novel_id import NovelId + + +@dataclass +class _CharacterAppearanceStat: + character_id: str + last_appearance_chapter: int + appearance_count: int + + +class ContinuityOverviewService: + """聚合现有 v1.0.4 数据源的连续性提醒。""" + + def __init__( + self, + *, + bible_service, + chapter_service, + voice_drift_service, + timeline_repository, + db_connection, + ) -> None: + self.bible_service = bible_service + self.chapter_service = chapter_service + self.voice_drift_service = voice_drift_service + self.timeline_repository = timeline_repository + self.db_connection = db_connection + + def get_overview( + self, + novel_id: str, + chapter_number: Optional[int] = None, + *, + dropout_gap: int = 5, + max_dropouts: int = 6, + max_timeline_events: int = 5, + max_relationships: int = 6, + ) -> dict[str, Any]: + chapters = self.chapter_service.list_chapters_by_novel(novel_id) + latest_chapter_number = max((chapter.number for chapter in chapters), default=0) + current_chapter_number = chapter_number or latest_chapter_number + + bible = self.bible_service.get_bible_by_novel(novel_id) + drift_report = self.voice_drift_service.get_drift_report(novel_id) + timeline_registry = self.timeline_repository.get_by_novel_id(NovelId(novel_id)) + + appearance_stats = self._load_character_appearance_stats(novel_id, current_chapter_number) + dropouts = self._build_character_dropouts( + bible=bible, + current_chapter_number=current_chapter_number, + appearance_stats=appearance_stats, + dropout_gap=dropout_gap, + max_dropouts=max_dropouts, + ) + timeline = self._build_timeline_summary( + timeline_registry=timeline_registry, + current_chapter_number=current_chapter_number, + max_timeline_events=max_timeline_events, + ) + relationships = self._build_relationship_spotlights( + bible=bible, + max_relationships=max_relationships, + ) + chapter_context = self._load_current_chapter_context(novel_id, current_chapter_number) + relationship_tracking = self._build_relationship_tracking( + novel_id=novel_id, + bible=bible, + current_chapter_number=current_chapter_number, + max_relationships=max_relationships, + chapter_context=chapter_context, + ) + dropouts = self._attach_dropout_relationship_context( + dropouts=dropouts, + bible=bible, + relationship_tracking=relationship_tracking, + ) + outline_deviation = self._build_outline_deviation( + chapter_context=chapter_context, + ) + + scores = list(drift_report.get("scores", []) or []) + latest_score = scores[-1] if scores else None + + return { + "novel_id": novel_id, + "chapter_number": current_chapter_number, + "latest_chapter_number": latest_chapter_number, + "character_dropouts": dropouts, + "relationship_spotlights": relationships, + "relationship_tracking": relationship_tracking, + "voice_drift": { + "drift_alert": bool(drift_report.get("drift_alert", False)), + "latest_similarity_score": latest_score.get("similarity_score") if latest_score else None, + "scored_chapters": len(scores), + "alert_threshold": drift_report.get("alert_threshold", 0.75), + "alert_consecutive": drift_report.get("alert_consecutive", 5), + }, + "timeline": timeline, + "outline_deviation": outline_deviation, + } + + def record_relationship_event(self, novel_id: str, payload: dict[str, Any]) -> dict[str, Any]: + event_id = str(uuid4()) + self.db_connection.execute( + """ + INSERT INTO continuity_relationship_events ( + id, novel_id, chapter_number, source_character, target_character, + relation, event_type, description, evidence, severity + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, + ( + event_id, + novel_id, + int(payload.get("chapter_number") or 0), + str(payload.get("source_character") or "").strip(), + str(payload.get("target_character") or "").strip(), + str(payload.get("relation") or "关系").strip() or "关系", + str(payload.get("event_type") or "update").strip() or "update", + str(payload.get("description") or "").strip(), + str(payload.get("evidence") or "").strip(), + str(payload.get("severity") or "info").strip() or "info", + ), + ) + return { + "id": event_id, + "novel_id": novel_id, + "chapter_number": int(payload.get("chapter_number") or 0), + "source_character": str(payload.get("source_character") or "").strip(), + "target_character": str(payload.get("target_character") or "").strip(), + "relation": str(payload.get("relation") or "关系").strip() or "关系", + "event_type": str(payload.get("event_type") or "update").strip() or "update", + "description": str(payload.get("description") or "").strip(), + "evidence": str(payload.get("evidence") or "").strip(), + "severity": str(payload.get("severity") or "info").strip() or "info", + } + + def upsert_outline_node_status(self, novel_id: str, payload: dict[str, Any]) -> dict[str, Any]: + row_id = str(uuid4()) + chapter_number = int(payload.get("chapter_number") or 0) + node_key = str(payload.get("node_key") or "").strip() + self.db_connection.execute( + """ + INSERT INTO outline_node_statuses ( + id, novel_id, chapter_number, node_key, outline_text, status, note, evidence + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(novel_id, chapter_number, node_key) DO UPDATE SET + outline_text = excluded.outline_text, + status = excluded.status, + note = excluded.note, + evidence = excluded.evidence, + updated_at = CURRENT_TIMESTAMP + """, + ( + row_id, + novel_id, + chapter_number, + node_key, + str(payload.get("outline_text") or "").strip(), + str(payload.get("status") or "pending").strip() or "pending", + str(payload.get("note") or "").strip(), + str(payload.get("evidence") or "").strip(), + ), + ) + existing = self.db_connection.execute( + """ + SELECT id + FROM outline_node_statuses + WHERE novel_id = ? + AND chapter_number = ? + AND node_key = ? + LIMIT 1 + """, + (novel_id, chapter_number, node_key), + ).fetchone() + return { + "id": str(existing["id"] if existing else row_id), + "novel_id": novel_id, + "chapter_number": chapter_number, + "node_key": node_key, + "outline_text": str(payload.get("outline_text") or "").strip(), + "status": str(payload.get("status") or "pending").strip() or "pending", + "note": str(payload.get("note") or "").strip(), + "evidence": str(payload.get("evidence") or "").strip(), + } + + def auto_record_chapter_signals( + self, + novel_id: str, + chapter_number: int, + content: str, + ) -> dict[str, Any]: + """章后管线的轻量结构化沉淀。 + + 不额外调用 LLM,只把可解释、可覆盖的关系/大纲节点写入结构化表。 + """ + bible = self.bible_service.get_bible_by_novel(novel_id) + chapter_context = self._load_current_chapter_context(novel_id, chapter_number) + outline = str(chapter_context.get("outline") or "") + relationship_count = self._auto_record_relationship_events( + novel_id=novel_id, + chapter_number=chapter_number, + content=content, + bible=bible, + ) + outline_count = self._auto_record_outline_statuses( + novel_id=novel_id, + chapter_number=chapter_number, + outline=outline, + content=content, + ) + return { + "relationship_events": relationship_count, + "outline_nodes": outline_count, + } + + def _get_table_columns(self, table_name: str) -> set[str]: + try: + rows = self.db_connection.execute(f"PRAGMA table_info({table_name})").fetchall() + except Exception: + return set() + return {str(row["name"]) for row in rows if row["name"]} + + def _load_current_chapter_context(self, novel_id: str, chapter_number: int) -> dict[str, str]: + context = { + "novel_id": novel_id, + "chapter_number": str(chapter_number), + "content": "", + "outline": "", + "summary": "", + "key_events": "", + "open_threads": "", + "consistency_note": "", + "review_memo": "", + } + if chapter_number <= 0: + return context + + chapter_row = self.db_connection.execute( + """ + SELECT + ch.content AS chapter_content, + ch.outline AS chapter_outline, + sn.outline AS story_outline + FROM chapters ch + LEFT JOIN story_nodes sn + ON sn.novel_id = ch.novel_id + AND sn.node_type = 'chapter' + AND sn.number = ch.number + WHERE ch.novel_id = ? + AND ch.number = ? + LIMIT 1 + """, + (novel_id, chapter_number), + ).fetchone() + if chapter_row: + context["content"] = str(chapter_row["chapter_content"] or "") + context["outline"] = str(chapter_row["story_outline"] or chapter_row["chapter_outline"] or "") + + summary_columns = self._get_table_columns("chapter_summaries") + if summary_columns: + select_columns = ["cs.summary"] + optional_columns = [] + for column in ("key_events", "open_threads", "consistency_note"): + if column in summary_columns: + select_columns.append(f"cs.{column}") + optional_columns.append(column) + + summary_row = self.db_connection.execute( + f""" + SELECT {", ".join(select_columns)} + FROM chapter_summaries cs + JOIN knowledge k ON k.id = cs.knowledge_id + WHERE k.novel_id = ? + AND cs.chapter_number = ? + LIMIT 1 + """, + (novel_id, chapter_number), + ).fetchone() + if summary_row: + context["summary"] = str(summary_row["summary"] or "") + for column in optional_columns: + context[column] = str(summary_row[column] or "") + + review_columns = self._get_table_columns("chapter_reviews") + if "memo" in review_columns: + review_row = self.db_connection.execute( + """ + SELECT memo + FROM chapter_reviews + WHERE novel_id = ? + AND chapter_number = ? + LIMIT 1 + """, + (novel_id, chapter_number), + ).fetchone() + if review_row: + context["review_memo"] = str(review_row["memo"] or "") + + return context + + def _load_character_appearance_stats( + self, + novel_id: str, + current_chapter_number: int, + ) -> dict[str, _CharacterAppearanceStat]: + if current_chapter_number <= 0: + return {} + + cursor = self.db_connection.execute( + """ + SELECT + ce.element_id AS character_id, + MAX(sn.number) AS last_appearance_chapter, + COUNT(DISTINCT sn.number) AS appearance_count + FROM chapter_elements ce + JOIN story_nodes sn + ON sn.id = ce.chapter_id + AND sn.novel_id = ? + AND sn.node_type = 'chapter' + WHERE ce.element_type = 'character' + AND ce.relation_type = 'appears' + AND sn.number <= ? + GROUP BY ce.element_id + """, + (novel_id, current_chapter_number), + ) + + stats: dict[str, _CharacterAppearanceStat] = {} + for row in cursor.fetchall(): + stats[row["character_id"]] = _CharacterAppearanceStat( + character_id=row["character_id"], + last_appearance_chapter=int(row["last_appearance_chapter"] or 0), + appearance_count=int(row["appearance_count"] or 0), + ) + return stats + + def _build_character_dropouts( + self, + *, + bible, + current_chapter_number: int, + appearance_stats: dict[str, _CharacterAppearanceStat], + dropout_gap: int, + max_dropouts: int, + ) -> list[dict[str, Any]]: + if not bible or current_chapter_number <= 0: + return [] + + dropouts: list[dict[str, Any]] = [] + for character in bible.characters: + stat = appearance_stats.get(character.id) + if stat is None or stat.last_appearance_chapter <= 0: + continue + + chapters_absent = current_chapter_number - stat.last_appearance_chapter + if chapters_absent < dropout_gap: + continue + + severity = "high" if chapters_absent >= 10 else "medium" if chapters_absent >= 7 else "low" + dropouts.append( + { + "character_id": character.id, + "character_name": character.name, + "last_appearance_chapter": stat.last_appearance_chapter, + "chapters_absent": chapters_absent, + "appearance_count": stat.appearance_count, + "severity": severity, + } + ) + + dropouts.sort( + key=lambda item: ( + -item["chapters_absent"], + -item["appearance_count"], + item["character_name"], + ) + ) + return dropouts[:max_dropouts] + + def _build_timeline_summary( + self, + *, + timeline_registry, + current_chapter_number: int, + max_timeline_events: int, + ) -> dict[str, Any]: + if timeline_registry is None: + return { + "total_events": 0, + "current_chapter_has_event": False, + "current_chapter_events": [], + "recent_events": [], + "conflicts": [], + "conflict_count": 0, + } + + events = list(timeline_registry.get_all_events_sorted()) + current_events = [event for event in events if event.chapter_number == current_chapter_number] + recent_events = [event for event in events if event.chapter_number <= current_chapter_number][-max_timeline_events:] + conflicts = self._detect_timeline_conflicts(current_events, recent_events) + + def _serialize(event) -> dict[str, Any]: + return { + "id": event.id, + "chapter_number": event.chapter_number, + "event": event.event, + "timestamp": event.timestamp, + "timestamp_type": event.timestamp_type, + } + + return { + "total_events": len(events), + "current_chapter_has_event": len(current_events) > 0, + "current_chapter_events": [_serialize(event) for event in current_events], + "recent_events": [_serialize(event) for event in recent_events], + "conflicts": conflicts, + "conflict_count": len(conflicts), + } + + def _detect_timeline_conflicts(self, current_events: list[Any], recent_events: list[Any]) -> list[dict[str, Any]]: + conflicts: list[dict[str, Any]] = [] + absolute_current = [ + event for event in current_events + if str(getattr(event, "timestamp_type", "") or "").lower() == "absolute" + and str(getattr(event, "timestamp", "") or "").strip() + ] + absolute_values = {str(event.timestamp).strip() for event in absolute_current} + if len(absolute_values) > 1: + conflicts.append( + { + "type": "multiple_absolute_times", + "severity": "warning", + "description": "同一章节出现多个绝对时间锚点,建议确认是否为闪回或时间跳切。", + "evidence": ";".join(sorted(absolute_values)), + } + ) + + seen: dict[str, Any] = {} + for event in recent_events: + timestamp = str(getattr(event, "timestamp", "") or "").strip() + if not timestamp or str(getattr(event, "timestamp_type", "") or "").lower() == "vague": + continue + previous = seen.get(timestamp) + if previous and previous.chapter_number != event.chapter_number: + conflicts.append( + { + "type": "reused_time_anchor", + "severity": "info", + "description": "多个章节复用同一时间锚点,若不是同日并行线,需要补充时间推进说明。", + "evidence": f"第{previous.chapter_number}章 / 第{event.chapter_number}章:{timestamp}", + } + ) + break + seen[timestamp] = event + return conflicts[:5] + + def _build_relationship_spotlights( + self, + *, + bible, + max_relationships: int, + ) -> list[dict[str, Any]]: + if not bible: + return [] + + items: list[dict[str, Any]] = [] + for character in bible.characters: + relationships = list(getattr(character, "relationships", []) or []) + for relationship in relationships: + if isinstance(relationship, dict): + target_name = ( + relationship.get("target") + or relationship.get("target_name") + or relationship.get("character") + or "" + ) + relation = ( + relationship.get("relation") + or relationship.get("type") + or relationship.get("label") + or relationship.get("status") + or "关系" + ) + description = relationship.get("description") or "" + else: + target_name = "" + relation = str(relationship) + description = "" + + if not relation.strip(): + continue + + items.append( + { + "source_character": character.name, + "target_character": target_name, + "relation": relation, + "description": description, + } + ) + + if len(items) >= max_relationships: + return items + return items + + def _auto_record_relationship_events( + self, + *, + novel_id: str, + chapter_number: int, + content: str, + bible, + ) -> int: + if not bible or not content: + return 0 + + character_names = [ + str(getattr(character, "name", "") or "").strip() + for character in list(getattr(bible, "characters", []) or []) + if str(getattr(character, "name", "") or "").strip() + ] + if len(character_names) < 2: + return 0 + + markers = { + "关系升温": ("和解", "信任", "默契", "亲近", "依赖", "联手", "暧昧", "升温"), + "关系趋紧": ("裂痕", "疏远", "争执", "冲突", "敌意", "不信任", "决裂", "对峙"), + } + recorded = 0 + seen_pairs: set[tuple[str, str, str]] = set() + for line in re.split(r"[。!?\n]+", content): + text = line.strip() + if not text: + continue + present = [name for name in character_names if name in text] + if len(present) < 2: + continue + event_type = "" + severity = "info" + for label, words in markers.items(): + if any(word in text for word in words): + event_type = label + severity = "success" if label == "关系升温" else "warning" + break + if not event_type: + continue + source, target = sorted(present[:2]) + key = (source, target, event_type) + if key in seen_pairs: + continue + seen_pairs.add(key) + self.record_relationship_event( + novel_id, + { + "chapter_number": chapter_number, + "source_character": source, + "target_character": target, + "relation": "自动关系事件", + "event_type": event_type, + "description": f"章后自动识别:{event_type}", + "evidence": text[:160], + "severity": severity, + }, + ) + recorded += 1 + if recorded >= 5: + break + return recorded + + def _auto_record_outline_statuses( + self, + *, + novel_id: str, + chapter_number: int, + outline: str, + content: str, + ) -> int: + segments = self._split_outline_segments(outline) + if not segments: + return 0 + normalized_content = self._normalize_text(content) + recorded = 0 + for index, segment in enumerate(segments[:8], start=1): + normalized_segment = self._normalize_text(segment) + status = "matched" if normalized_segment and normalized_segment in normalized_content else "pending" + self.upsert_outline_node_status( + novel_id, + { + "chapter_number": chapter_number, + "node_key": f"auto-segment-{index}", + "outline_text": segment, + "status": status, + "note": "章后自动沉淀", + "evidence": segment if status == "matched" else "", + }, + ) + recorded += 1 + return recorded + + def _build_relationship_tracking( + self, + *, + novel_id: str, + bible, + current_chapter_number: int, + max_relationships: int, + chapter_context: dict[str, str], + ) -> dict[str, Any]: + structured_events = self._load_structured_relationship_events( + novel_id=novel_id, + current_chapter_number=current_chapter_number, + max_relationships=max_relationships, + ) if current_chapter_number > 0 else [] + + if not bible or current_chapter_number <= 0: + return { + "source": "structured" if structured_events else "heuristic", + "tracked_pairs": 0, + "active_signals": structured_events, + "stale_pairs": [], + } + + name_to_id = { + str(character.name or ""): str(character.id or "") + for character in bible.characters + if getattr(character, "name", None) and getattr(character, "id", None) + } + active_signals: list[dict[str, Any]] = [] + stale_pairs: list[dict[str, Any]] = [] + tracked_pairs = 0 + text_bundle = "\n".join( + [ + chapter_context.get("summary", ""), + chapter_context.get("key_events", ""), + chapter_context.get("open_threads", ""), + chapter_context.get("consistency_note", ""), + chapter_context.get("review_memo", ""), + chapter_context.get("content", "")[:600], + ] + ) + + for character in bible.characters: + source_name = str(getattr(character, "name", "") or "").strip() + source_id = str(getattr(character, "id", "") or "").strip() + relationships = list(getattr(character, "relationships", []) or []) + + for relationship in relationships: + if not source_name or not source_id: + continue + target_name = "" + relation = "关系" + description = "" + if isinstance(relationship, dict): + target_name = str( + relationship.get("target") + or relationship.get("target_name") + or relationship.get("character") + or "" + ).strip() + relation = str( + relationship.get("relation") + or relationship.get("type") + or relationship.get("label") + or relationship.get("status") + or "关系" + ).strip() or "关系" + description = str(relationship.get("description") or "").strip() + else: + relation = str(relationship).strip() or "关系" + + tracked_pairs += 1 + target_id = name_to_id.get(target_name, "") + joint_stat = self._load_joint_appearance_stat( + novel_id=novel_id, + current_chapter_number=current_chapter_number, + source_id=source_id, + target_id=target_id, + ) if target_id else None + + last_joint_chapter = int((joint_stat or {}).get("last_joint_chapter") or 0) + joint_appearance_count = int((joint_stat or {}).get("joint_appearance_count") or 0) + chapters_since_joint = ( + current_chapter_number - last_joint_chapter if last_joint_chapter > 0 else None + ) + change_signal, severity = self._infer_relationship_signal( + text_bundle=text_bundle, + source_name=source_name, + target_name=target_name, + ) + signal_excerpt = self._extract_signal_excerpt(text_bundle, source_name, target_name) + + if last_joint_chapter == current_chapter_number or change_signal: + active_signals.append( + { + "source_character": source_name, + "target_character": target_name, + "relation": relation, + "description": description, + "last_joint_chapter": last_joint_chapter, + "joint_appearance_count": joint_appearance_count, + "change_signal": change_signal or "本章有关系推进", + "signal_excerpt": signal_excerpt, + "severity": severity or "info", + "source": "heuristic", + } + ) + elif chapters_since_joint is not None and chapters_since_joint >= 5: + stale_pairs.append( + { + "source_character": source_name, + "target_character": target_name, + "relation": relation, + "description": description, + "last_joint_chapter": last_joint_chapter, + "chapters_since_joint": chapters_since_joint, + "severity": "warning" if chapters_since_joint >= 8 else "info", + } + ) + + active_signals.sort( + key=lambda item: ( + item["severity"] != "warning", + -(item["last_joint_chapter"] or 0), + item["source_character"], + ) + ) + stale_pairs.sort( + key=lambda item: ( + -item["chapters_since_joint"], + item["source_character"], + ) + ) + if structured_events: + seen_pairs = { + ( + item["source_character"], + item["target_character"], + item["change_signal"], + ) + for item in structured_events + } + active_signals = structured_events + [ + item + for item in active_signals + if ( + item["source_character"], + item["target_character"], + item["change_signal"], + ) not in seen_pairs + ] + + return { + "source": "structured" if structured_events else "heuristic", + "tracked_pairs": tracked_pairs, + "active_signals": active_signals[:max_relationships], + "stale_pairs": stale_pairs[:max_relationships], + } + + def _load_structured_relationship_events( + self, + *, + novel_id: str, + current_chapter_number: int, + max_relationships: int, + ) -> list[dict[str, Any]]: + columns = self._get_table_columns("continuity_relationship_events") + required = { + "novel_id", + "chapter_number", + "source_character", + "target_character", + "relation", + "event_type", + "description", + "evidence", + "severity", + } + if not required.issubset(columns): + return [] + + rows = self.db_connection.execute( + """ + SELECT + source_character, + target_character, + relation, + event_type, + description, + evidence, + severity, + chapter_number + FROM continuity_relationship_events + WHERE novel_id = ? + AND chapter_number <= ? + ORDER BY chapter_number DESC, updated_at DESC, created_at DESC + LIMIT ? + """, + (novel_id, current_chapter_number, max_relationships), + ).fetchall() + + events: list[dict[str, Any]] = [] + for row in rows: + chapter_number = int(row["chapter_number"] or current_chapter_number) + events.append( + { + "source_character": str(row["source_character"] or ""), + "target_character": str(row["target_character"] or ""), + "relation": str(row["relation"] or "关系"), + "description": str(row["description"] or ""), + "last_joint_chapter": chapter_number, + "joint_appearance_count": 0, + "change_signal": str(row["event_type"] or "update"), + "signal_excerpt": str(row["evidence"] or row["description"] or ""), + "severity": str(row["severity"] or "info"), + "source": "structured", + } + ) + return events + + def _attach_dropout_relationship_context( + self, + *, + dropouts: list[dict[str, Any]], + bible, + relationship_tracking: dict[str, Any], + ) -> list[dict[str, Any]]: + if not dropouts: + return dropouts + + tracked_map = self._build_character_relationship_map(bible) + stale_map: dict[str, list[str]] = {} + + for item in relationship_tracking.get("stale_pairs", []) or []: + source_name = str(item.get("source_character") or "").strip() + target_name = str(item.get("target_character") or "").strip() + if source_name and target_name: + stale_map.setdefault(source_name, []).append(target_name) + stale_map.setdefault(target_name, []).append(source_name) + + enriched: list[dict[str, Any]] = [] + for item in dropouts: + name = str(item.get("character_name") or "").strip() + tracked_targets = list(dict.fromkeys(tracked_map.get(name, []))) + stale_targets = list(dict.fromkeys(stale_map.get(name, []))) + + if stale_targets: + dropout_scope = "linked" + elif tracked_targets: + dropout_scope = "tracked" + else: + dropout_scope = "solo" + + enriched.append( + { + **item, + "tracked_relationship_count": len(tracked_targets), + "stale_relationship_count": len(stale_targets), + "stale_relationship_targets": stale_targets, + "dropout_scope": dropout_scope, + } + ) + return enriched + + def _build_character_relationship_map(self, bible) -> dict[str, list[str]]: + if not bible: + return {} + + relationship_map: dict[str, list[str]] = {} + for character in bible.characters: + source_name = str(getattr(character, "name", "") or "").strip() + if not source_name: + continue + + for relationship in list(getattr(character, "relationships", []) or []): + if isinstance(relationship, dict): + target_name = str( + relationship.get("target") + or relationship.get("target_name") + or relationship.get("character") + or "" + ).strip() + else: + target_name = "" + + if not target_name: + continue + relationship_map.setdefault(source_name, []).append(target_name) + relationship_map.setdefault(target_name, []).append(source_name) + return relationship_map + + def _load_joint_appearance_stat( + self, + *, + novel_id: str, + current_chapter_number: int, + source_id: str, + target_id: str, + ) -> Optional[dict[str, int]]: + row = self.db_connection.execute( + """ + SELECT + MAX(sn.number) AS last_joint_chapter, + COUNT(DISTINCT sn.number) AS joint_appearance_count + FROM story_nodes sn + JOIN chapter_elements ce1 + ON ce1.chapter_id = sn.id + AND ce1.element_type = 'character' + AND ce1.relation_type = 'appears' + AND ce1.element_id = ? + JOIN chapter_elements ce2 + ON ce2.chapter_id = sn.id + AND ce2.element_type = 'character' + AND ce2.relation_type = 'appears' + AND ce2.element_id = ? + WHERE sn.novel_id = ? + AND sn.node_type = 'chapter' + AND sn.number <= ? + """, + (source_id, target_id, novel_id, current_chapter_number), + ).fetchone() + if not row: + return None + return { + "last_joint_chapter": int(row["last_joint_chapter"] or 0), + "joint_appearance_count": int(row["joint_appearance_count"] or 0), + } + + def _infer_relationship_signal( + self, + *, + text_bundle: str, + source_name: str, + target_name: str, + ) -> tuple[str, str]: + if not source_name or not target_name: + return "", "" + normalized = self._normalize_text(text_bundle) + if source_name not in normalized or target_name not in normalized: + return "", "" + + conflict_markers = ("裂痕", "疏远", "争执", "冲突", "敌意", "不信任", "决裂", "紧张", "对峙") + warm_markers = ("和解", "靠近", "信任", "默契", "亲近", "依赖", "联手", "暧昧", "升温") + + if any(marker in normalized for marker in conflict_markers): + return "关系趋紧", "warning" + if any(marker in normalized for marker in warm_markers): + return "关系升温", "success" + return "本章有关系推进", "info" + + def _extract_signal_excerpt(self, text_bundle: str, source_name: str, target_name: str) -> str: + for raw_line in re.split(r"[。!?\n]+", text_bundle): + line = raw_line.strip() + if not line: + continue + if source_name in line and target_name in line: + return line[:80] + return "" + + def _build_outline_deviation(self, *, chapter_context: dict[str, str]) -> dict[str, Any]: + outline = str(chapter_context.get("outline", "") or "").strip() + summary_text = str(chapter_context.get("summary", "") or "").strip() + basis_text = summary_text or str(chapter_context.get("key_events", "") or "").strip() + if not basis_text: + basis_text = str(chapter_context.get("content", "") or "").strip()[:180] + + structured_outline = self._load_structured_outline_statuses( + novel_id=str(chapter_context.get("novel_id", "") or ""), + chapter_number=int(chapter_context.get("chapter_number", 0) or 0), + ) + if structured_outline: + return self._build_structured_outline_deviation( + outline=outline, + basis_text=basis_text, + outline_nodes=structured_outline, + ) + + if not outline: + return { + "source": "heuristic", + "status": "unavailable", + "overlap_score": None, + "outline_excerpt": "", + "summary_excerpt": basis_text, + "warning_reasons": ["当前章节还没有可用大纲"], + "outline_nodes": [], + } + if not basis_text: + return { + "source": "heuristic", + "status": "unavailable", + "overlap_score": None, + "outline_excerpt": outline[:120], + "summary_excerpt": "", + "warning_reasons": ["当前章节缺少可用于比对的正文摘要"], + "outline_nodes": [], + } + + outline_segments = self._split_outline_segments(outline) + normalized_basis = self._normalize_text(basis_text) + matched_segments = [ + segment + for segment in outline_segments + if self._normalize_text(segment) and self._normalize_text(segment) in normalized_basis + ] + overlap_score = ( + round(len(matched_segments) / len(outline_segments), 2) + if outline_segments + else 0.0 + ) + + review_memo = str(chapter_context.get("review_memo", "") or "") + warning_reasons: list[str] = [] + if self._contains_outline_drift_marker(review_memo): + warning_reasons.append("审阅备注提示可能偏离大纲") + if outline_segments and overlap_score < 0.34: + warning_reasons.append("章节摘要与章节大纲重合度偏低") + elif outline_segments and overlap_score < 0.55: + warning_reasons.append("章节摘要只覆盖了部分大纲节点") + + if not warning_reasons: + status = "aligned" + elif any("偏离大纲" in reason or "重合度偏低" in reason for reason in warning_reasons): + status = "warning" + else: + status = "watch" + + return { + "source": "heuristic", + "status": status, + "overlap_score": overlap_score, + "outline_excerpt": outline[:120], + "summary_excerpt": basis_text[:120], + "warning_reasons": warning_reasons, + "outline_nodes": [ + { + "node_key": f"segment-{index + 1}", + "outline_text": segment, + "status": "matched" if segment in matched_segments else "pending", + "note": "", + "evidence": "", + } + for index, segment in enumerate(outline_segments) + ], + } + + def _load_structured_outline_statuses( + self, + *, + novel_id: str, + chapter_number: int, + ) -> list[dict[str, str]]: + columns = self._get_table_columns("outline_node_statuses") + required = { + "novel_id", + "chapter_number", + "node_key", + "outline_text", + "status", + "note", + "evidence", + } + if not novel_id or chapter_number <= 0 or not required.issubset(columns): + return [] + + rows = self.db_connection.execute( + """ + SELECT node_key, outline_text, status, note, evidence + FROM outline_node_statuses + WHERE novel_id = ? + AND chapter_number = ? + ORDER BY node_key ASC, created_at ASC + """, + (novel_id, chapter_number), + ).fetchall() + return [ + { + "node_key": str(row["node_key"] or ""), + "outline_text": str(row["outline_text"] or ""), + "status": str(row["status"] or "pending"), + "note": str(row["note"] or ""), + "evidence": str(row["evidence"] or ""), + } + for row in rows + ] + + def _build_structured_outline_deviation( + self, + *, + outline: str, + basis_text: str, + outline_nodes: list[dict[str, str]], + ) -> dict[str, Any]: + total = len(outline_nodes) + completed_statuses = {"completed", "matched", "done"} + risk_statuses = {"changed", "missing", "blocked", "deviated"} + completed_count = sum(1 for item in outline_nodes if item.get("status") in completed_statuses) + risk_nodes = [item for item in outline_nodes if item.get("status") in risk_statuses] + overlap_score = round(completed_count / total, 2) if total else None + + warning_reasons: list[str] = [] + if risk_nodes: + warning_reasons.append("结构化大纲节点存在变更或缺失") + if total and completed_count == 0: + warning_reasons.append("结构化大纲节点尚未确认完成") + elif total and overlap_score is not None and overlap_score < 0.55: + warning_reasons.append("结构化大纲节点完成比例偏低") + + if any(item.get("status") in {"missing", "blocked", "deviated"} for item in risk_nodes): + status = "warning" + elif risk_nodes or warning_reasons: + status = "watch" + else: + status = "aligned" + + return { + "source": "structured", + "status": status, + "overlap_score": overlap_score, + "outline_excerpt": outline[:120], + "summary_excerpt": basis_text[:120], + "warning_reasons": warning_reasons, + "outline_nodes": outline_nodes, + } + + def _split_outline_segments(self, outline: str) -> list[str]: + segments: list[str] = [] + seen: set[str] = set() + for raw in re.split(r"[,。;:、\n]+", outline): + segment = raw.strip() + segment = re.sub(r"^(并且|并|随后|然后|最终|最后|接着)", "", segment) + if len(segment) < 4: + continue + if segment in seen: + continue + seen.add(segment) + segments.append(segment) + return segments + + def _contains_outline_drift_marker(self, text: str) -> bool: + normalized = self._normalize_text(text) + if not normalized: + return False + markers = ("偏离", "不一致", "不完全一致", "新增设定", "跳出大纲", "脱纲", "失控") + return any(marker in normalized for marker in markers) + + def _normalize_text(self, text: str) -> str: + return re.sub(r"\s+", "", str(text or "")) diff --git a/application/analyst/services/novelpro_ai_suggestion_service.py b/application/analyst/services/novelpro_ai_suggestion_service.py new file mode 100644 index 000000000..e04a0ec33 --- /dev/null +++ b/application/analyst/services/novelpro_ai_suggestion_service.py @@ -0,0 +1,220 @@ +"""NovelPro form-field suggestion service powered by the active PP AI.""" +from __future__ import annotations + +import json +import re +from typing import Any, Dict, List, Optional + +from domain.ai.services.llm_service import GenerationConfig, LLMService +from domain.ai.value_objects.prompt import Prompt + + +SUGGESTION_TYPE_LABELS = { + "voice_anchor": "角色口吻/OOC 锚点", + "voice_sample": "作者样本对", + "relationship_event": "关系变化事件", + "outline_node": "大纲节点状态", + "power_rules": "战力规则", + "power_profile": "角色战力档案", + "power_event": "战斗/升级事件", +} + + +class NovelProAISuggestionService: + """Generate editable suggestions for manual NovelPro forms.""" + + def __init__( + self, + *, + llm_service: LLMService, + knowledge_service, + bible_service, + continuity_service, + power_system_service, + ) -> None: + self.llm_service = llm_service + self.knowledge_service = knowledge_service + self.bible_service = bible_service + self.continuity_service = continuity_service + self.power_system_service = power_system_service + + async def suggest_fields( + self, + *, + novel_id: str, + suggestion_type: str, + fields: List[str], + chapter_number: Optional[int] = None, + target: Optional[Dict[str, Any]] = None, + current_values: Optional[Dict[str, Any]] = None, + instruction: str = "", + ) -> Dict[str, Any]: + fields = [str(field).strip() for field in fields if str(field).strip()] + if not fields: + raise ValueError("fields is required") + + prompt = Prompt( + system=( + "你是 PlotPilot NovelPro 内部填表助手。" + "你只为作者生成可编辑建议,不要自动保存,不要写正文。" + "必须输出 JSON,不要 Markdown。" + ), + user=self._build_user_prompt( + novel_id=novel_id, + suggestion_type=suggestion_type, + fields=fields, + chapter_number=chapter_number, + target=target or {}, + current_values=current_values or {}, + instruction=instruction, + ), + ) + result = await self.llm_service.generate( + prompt, + GenerationConfig(max_tokens=1600, temperature=0.35), + ) + parsed = self._parse_json_object(result.content) + suggested_fields = parsed.get("fields", parsed) + if not isinstance(suggested_fields, dict): + suggested_fields = {} + clean_fields = { + field: self._coerce_field_value(suggested_fields.get(field, "")) + for field in fields + if suggested_fields.get(field) is not None + } + return { + "suggestion_type": suggestion_type, + "fields": clean_fields, + "rationale": str(parsed.get("rationale") or "").strip(), + } + + def _build_user_prompt( + self, + *, + novel_id: str, + suggestion_type: str, + fields: List[str], + chapter_number: Optional[int], + target: Dict[str, Any], + current_values: Dict[str, Any], + instruction: str, + ) -> str: + context = self._build_context(novel_id, chapter_number) + return "\n".join( + [ + f"小说 ID:{novel_id}", + f"建议类型:{SUGGESTION_TYPE_LABELS.get(suggestion_type, suggestion_type)}", + f"关注章节:{chapter_number or '最新章节'}", + "", + "【作品上下文】", + context, + "", + "【目标对象】", + json.dumps(target, ensure_ascii=False, indent=2), + "", + "【当前表单值】", + json.dumps(current_values, ensure_ascii=False, indent=2), + "", + "【需要生成的字段】", + ", ".join(fields), + "", + "【额外要求】", + instruction.strip() or "根据初始设定、长期记忆、连续性提醒和战力规范给出稳妥建议。", + "", + "【输出 JSON 格式】", + json.dumps( + { + "fields": {field: "这里填建议值" for field in fields}, + "rationale": "一句话说明为什么这样建议", + }, + ensure_ascii=False, + ), + ] + ) + + def _build_context(self, novel_id: str, chapter_number: Optional[int]) -> str: + chunks: List[str] = [] + knowledge = self._safe_call(lambda: self.knowledge_service.get_knowledge(novel_id)) + if knowledge: + premise = str(getattr(knowledge, "premise_lock", "") or "").strip() + if premise: + chunks.append(f"全书基调:{premise}") + facts = getattr(knowledge, "facts", []) or [] + if facts: + chunks.append("长期事实:") + for fact in facts[:12]: + chunks.append( + f"- {getattr(fact, 'subject', '')} / {getattr(fact, 'predicate', '')} / {getattr(fact, 'object', '')}" + ) + chapters = sorted(getattr(knowledge, "chapters", []) or [], key=lambda item: item.chapter_id) + if chapters: + selected = [item for item in chapters if item.chapter_id == chapter_number] or chapters[-3:] + chunks.append("章节记忆:") + for chapter in selected: + summary = str(getattr(chapter, "summary", "") or "").strip() + key_events = str(getattr(chapter, "key_events", "") or "").strip() + chunks.append(f"- 第{chapter.chapter_id}章:{summary or key_events}") + + bible = self._safe_call(lambda: self.bible_service.get_bible_by_novel(novel_id)) + if bible: + for attr, label in (("theme", "主题"), ("genre", "类型"), ("worldview", "世界观")): + value = str(getattr(bible, attr, "") or "").strip() + if value: + chunks.append(f"{label}:{value[:500]}") + + continuity = self._safe_call(lambda: self.continuity_service.get_overview(novel_id, chapter_number)) or {} + if continuity: + dropouts = continuity.get("character_dropouts") or [] + outline = continuity.get("outline_deviation") or {} + chunks.append( + "连续性提醒:" + f"掉线角色 {len(dropouts)};" + f"大纲状态 {outline.get('status') or 'unknown'};" + f"原因 {';'.join(outline.get('warning_reasons') or [])}" + ) + + power = self._safe_call(lambda: self.power_system_service.get_overview(novel_id)) or {} + if power: + warnings = power.get("warnings") or [] + rules = power.get("rules") or {} + chunks.append( + "战力约束:" + f"{str(rules.get('tier_schema') or '')[:300]};" + f"提醒 {';'.join(str(item.get('title') or '') for item in warnings[:5])}" + ) + + return "\n".join(chunk for chunk in chunks if str(chunk).strip()) or "暂无结构化上下文。" + + @staticmethod + def _safe_call(factory): + try: + return factory() + except Exception: + return None + + @staticmethod + def _parse_json_object(text: str) -> Dict[str, Any]: + raw = str(text or "").strip() + if raw.startswith("```"): + raw = re.sub(r"^```(?:json)?", "", raw).strip() + raw = re.sub(r"```$", "", raw).strip() + try: + value = json.loads(raw) + return value if isinstance(value, dict) else {} + except json.JSONDecodeError: + match = re.search(r"\{.*\}", raw, re.DOTALL) + if not match: + return {} + try: + value = json.loads(match.group(0)) + return value if isinstance(value, dict) else {} + except json.JSONDecodeError: + return {} + + @staticmethod + def _coerce_field_value(value: Any) -> Any: + if isinstance(value, (int, float, bool)) or value is None: + return value + if isinstance(value, (list, dict)): + return json.dumps(value, ensure_ascii=False) + return str(value).strip() diff --git a/application/analyst/services/novelpro_monitor_service.py b/application/analyst/services/novelpro_monitor_service.py new file mode 100644 index 000000000..47531c67c --- /dev/null +++ b/application/analyst/services/novelpro_monitor_service.py @@ -0,0 +1,318 @@ +"""NovelPro 自动监控聚合服务。""" +from __future__ import annotations + +from typing import Any, Optional + + +RELATIONSHIP_MARKERS = ( + "关系", + "敌", + "友", + "同盟", + "盟友", + "师", + "徒", + "亲", + "父", + "母", + "兄", + "姐", + "妹", + "爱", + "恨", + "追随", + "背叛", + "保护", + "隶属", +) + + +class NovelProMonitorService: + """把 Obsidian 主记忆、连续性和战力风险收束成右侧监控中心。""" + + def __init__( + self, + *, + knowledge_service, + obsidian_memory_service, + obsidian_sync_service=None, + continuity_service, + power_system_service, + ) -> None: + self.knowledge_service = knowledge_service + self.obsidian_memory_service = obsidian_memory_service + self.obsidian_sync_service = obsidian_sync_service or obsidian_memory_service + self.continuity_service = continuity_service + self.power_system_service = power_system_service + + def get_overview(self, novel_id: str, chapter_number: Optional[int] = None) -> dict[str, Any]: + knowledge = self._safe_call(lambda: self.knowledge_service.get_knowledge(novel_id)) + obsidian_knowledge = self._safe_call(lambda: self.obsidian_memory_service.load_knowledge(novel_id)) + continuity = self._safe_call( + lambda: self.continuity_service.get_overview(novel_id, chapter_number) + ) or {} + power = self._safe_call(lambda: self.power_system_service.get_overview(novel_id)) or {} + + effective_knowledge = obsidian_knowledge or knowledge + alerts = [] + + obsidian = self._build_obsidian_summary(novel_id, obsidian_knowledge) + knowledge_graph = self._build_knowledge_graph_summary(effective_knowledge) + continuity_summary = self._build_continuity_summary(continuity) + power_summary = self._build_power_summary(power) + + alerts.extend(self._build_obsidian_alerts(obsidian, knowledge_graph)) + alerts.extend(self._build_continuity_alerts(continuity)) + alerts.extend(self._build_power_alerts(power)) + + health = self._build_health(alerts) + return { + "novel_id": novel_id, + "chapter_number": int(continuity.get("chapter_number") or chapter_number or 0), + "health": health, + "obsidian": obsidian, + "knowledge_graph": knowledge_graph, + "continuity": continuity_summary, + "power": power_summary, + "alerts": alerts, + } + + def sync_obsidian_chapter(self, novel_id: str, chapter_number: int) -> dict[str, Any]: + sync_chapter = getattr(self.obsidian_sync_service, "sync_chapter", None) + if not callable(sync_chapter): + return { + "synced": False, + "reason": "obsidian memory service unavailable", + } + return sync_chapter(novel_id, chapter_number) + + def _build_obsidian_summary(self, novel_id: str, knowledge: Any) -> dict[str, Any]: + graph_path = "" + get_graph_path = getattr(self.obsidian_memory_service, "get_relationship_graph_path", None) + if callable(get_graph_path): + graph_path = str(get_graph_path(novel_id)) + is_installed = getattr(self.obsidian_memory_service, "is_obsidian_installed", None) + is_configured = getattr(self.obsidian_memory_service, "is_vault_configured", None) + return { + "primary_memory": knowledge is not None, + "premise_locked": bool(getattr(knowledge, "premise_lock", "") if knowledge else ""), + "fact_count": len(getattr(knowledge, "facts", []) or []) if knowledge else 0, + "chapter_count": len(getattr(knowledge, "chapters", []) or []) if knowledge else 0, + "relationship_graph_path": graph_path, + "vault_path": str(getattr(self.obsidian_memory_service, "vault_root", "") or ""), + "vault_configured": bool(is_configured()) if callable(is_configured) else False, + "obsidian_app_installed": bool(is_installed()) if callable(is_installed) else False, + } + + def _build_knowledge_graph_summary(self, knowledge: Any) -> dict[str, Any]: + facts = list(getattr(knowledge, "facts", []) or []) if knowledge else [] + entities = set() + relationship_count = 0 + for fact in facts: + subject = str(getattr(fact, "subject", "") or "").strip() + obj = str(getattr(fact, "object", "") or "").strip() + if subject: + entities.add(subject) + if obj: + entities.add(obj) + if self._is_relationship_fact(fact): + relationship_count += 1 + return { + "fact_count": len(facts), + "relationship_count": relationship_count, + "entity_count": len(entities), + } + + @staticmethod + def _build_continuity_summary(continuity: dict[str, Any]) -> dict[str, Any]: + relationship_tracking = continuity.get("relationship_tracking") or {} + voice_drift = continuity.get("voice_drift") or {} + timeline = continuity.get("timeline") or {} + outline = continuity.get("outline_deviation") or {} + return { + "dropout_count": len(continuity.get("character_dropouts") or []), + "stale_relationship_count": len(relationship_tracking.get("stale_pairs") or []), + "active_relationship_signal_count": len(relationship_tracking.get("active_signals") or []), + "voice_drift_alert": bool(voice_drift.get("drift_alert", False)), + "timeline_conflict_count": int(timeline.get("conflict_count") or 0), + "current_chapter_has_timeline_event": bool(timeline.get("current_chapter_has_event", False)), + "outline_status": str(outline.get("status") or "unavailable"), + } + + @staticmethod + def _build_power_summary(power: dict[str, Any]) -> dict[str, Any]: + return { + "profile_count": len(power.get("profiles") or []), + "warning_count": len(power.get("warnings") or []), + } + + def _build_obsidian_alerts( + self, + obsidian: dict[str, Any], + knowledge_graph: dict[str, Any], + ) -> list[dict[str, str]]: + alerts = [] + if obsidian["primary_memory"]: + alerts.append(self._alert( + "info", + "obsidian", + "Obsidian 主记忆已接管", + "当前 PP 知识读取会优先使用 Obsidian vault,并把结果同步回 PP 缓存。", + "继续写作时会自动导出章节摘要、事实锁和关系图。", + )) + else: + alerts.append(self._alert( + "warning", + "obsidian", + "Obsidian 主记忆尚未建立", + "还没有检测到可回读的 Obsidian 长期记忆,建议先保存或采纳一章触发章后管线。", + "写入章节后等待自动同步,或检查 PLOTPILOT_OBSIDIAN_VAULT 配置。", + )) + if knowledge_graph["relationship_count"] == 0: + alerts.append(self._alert( + "warning", + "knowledge", + "关系图缺少结构化关系", + "当前知识三元组里没有明显角色/故事关系,关系图和掉线监控会偏弱。", + "在设定或正文中补充角色关系,章后管线会自动沉淀到 Obsidian。", + )) + return alerts + + def _build_continuity_alerts(self, continuity: dict[str, Any]) -> list[dict[str, str]]: + alerts = [] + for item in continuity.get("character_dropouts") or []: + name = str(item.get("character_name") or item.get("character_id") or "未知角色") + alerts.append(self._alert( + self._normalize_severity(item.get("severity"), default="warning"), + "continuity", + f"角色掉线:{name}", + f"{name} 已缺席 {int(item.get('chapters_absent') or 0)} 章,可能需要回收人物线或说明离场。", + "可在连续性巡检中创建候选改稿任务。", + )) + relationship_tracking = continuity.get("relationship_tracking") or {} + for item in relationship_tracking.get("stale_pairs") or []: + source = str(item.get("source_character") or "") + target = str(item.get("target_character") or "") + alerts.append(self._alert( + self._normalize_severity(item.get("severity"), default="warning"), + "continuity", + f"关系线沉默:{source} / {target}", + f"这条关系线已沉默 {int(item.get('chapters_since_joint') or 0)} 章。", + "继续写作前建议安排互动、冲突或明确暂时搁置。", + )) + voice_drift = continuity.get("voice_drift") or {} + if voice_drift.get("drift_alert"): + alerts.append(self._alert( + "warning", + "continuity", + "文风漂移提醒", + "最近章节低于口吻相似度阈值,建议先做口吻锁定或精细改稿。", + "打开口吻锁定面板校准作者样本和角色锚点。", + )) + timeline = continuity.get("timeline") or {} + timeline_conflict_count = int(timeline.get("conflict_count") or 0) + if timeline_conflict_count > 0: + has_current_event = bool(timeline.get("current_chapter_has_event")) + is_hard_blocker = timeline_conflict_count >= 3 or not has_current_event + severity = "error" if is_hard_blocker else "warning" + title = "时间线冲突提醒" if is_hard_blocker else "时间线需确认" + message = ( + "当前时间线存在冲突或顺序异常,需要先确认事件先后再继续写。" + if is_hard_blocker + else "当前时间线有可疑冲突,但本章已登记时间事件,优先作为内容复核项处理。" + ) + action = ( + "打开连续性巡检查看冲突证据。" + if is_hard_blocker + else "打开连续性巡检核对时间锚点;确认无误后可继续写作。" + ) + alerts.append(self._alert( + severity, + "continuity", + title, + message, + action, + )) + elif not timeline.get("current_chapter_has_event") and int(continuity.get("chapter_number") or 0) > 0: + alerts.append(self._alert( + "warning", + "continuity", + "当前章节缺少时间锚点", + "本章没有进入时间线注册表,后续容易出现时间漂移。", + "若本章发生了时间推进,请补充时间线事件。", + )) + outline = continuity.get("outline_deviation") or {} + if outline.get("status") in {"warning", "watch"}: + reasons = ";".join(outline.get("warning_reasons") or []) + alerts.append(self._alert( + "warning" if outline.get("status") == "warning" else "info", + "continuity", + "大纲偏离提醒", + reasons or "当前章节和大纲节点覆盖不完整。", + "打开连续性巡检生成结构化修稿方案。", + )) + return alerts + + def _build_power_alerts(self, power: dict[str, Any]) -> list[dict[str, str]]: + alerts = [] + for item in power.get("warnings") or []: + alerts.append(self._alert( + self._normalize_severity(item.get("severity"), default="warning"), + "power", + str(item.get("title") or "战力系统提醒"), + str(item.get("message") or "战力规则存在待确认项。"), + "打开战力系统面板补齐规则、角色限制或升级代价。", + )) + return alerts + + @staticmethod + def _build_health(alerts: list[dict[str, str]]) -> dict[str, Any]: + error_count = sum(1 for alert in alerts if alert["severity"] == "error") + warning_count = sum(1 for alert in alerts if alert["severity"] == "warning") + status = "error" if error_count else "warning" if warning_count else "ok" + score = max(0, 100 - error_count * 30 - warning_count * 12) + return { + "status": status, + "score": score, + "error_count": error_count, + "warning_count": warning_count, + "alert_count": len(alerts), + } + + @staticmethod + def _safe_call(factory): + try: + return factory() + except Exception: + return None + + @staticmethod + def _is_relationship_fact(fact: Any) -> bool: + text = " ".join( + [ + str(getattr(fact, "predicate", "") or ""), + " ".join(getattr(fact, "tags", []) or []), + ] + ) + return any(marker in text for marker in RELATIONSHIP_MARKERS) + + @staticmethod + def _normalize_severity(value: Any, default: str = "info") -> str: + severity = str(value or default).lower() + if severity == "high": + return "error" + if severity == "medium": + return "warning" + if severity in {"info", "success", "warning", "error"}: + return severity + return default + + @staticmethod + def _alert(severity: str, source: str, title: str, message: str, action: str) -> dict[str, str]: + return { + "severity": severity, + "source": source, + "title": title, + "message": message, + "action": action, + } diff --git a/application/analyst/services/power_system_service.py b/application/analyst/services/power_system_service.py new file mode 100644 index 000000000..8ecbde149 --- /dev/null +++ b/application/analyst/services/power_system_service.py @@ -0,0 +1,177 @@ +"""战力系统守恒服务。""" +from __future__ import annotations + +from typing import Any, Optional + + +SYSTEM_GAME_STANDARD = """系统文 / 游戏文战力规范: +1. 战力来自明确资源:等级、属性、技能熟练度、装备、血脉、职业或系统任务奖励。 +2. 升级必须有代价或条件:经验、冷却、材料、任务、风险、消耗、负面状态至少占一项。 +3. 越级胜利必须解释机制:克制、环境、情报差、一次性底牌、队友配合或对方限制。 +4. 数值只服务剧情,不得随章临时发明新规则;新系统必须先登记规则再生效。 +5. Boss 与副本要有门槛、奖励和失败代价,避免无损刷级导致战力通胀。""" + + +DEFAULT_TIER_SCHEMA = """凡人/新手 < 入门/黑铁 < 熟练/青铜 < 精英/白银 < 统领/黄金 < 领主/铂金 < 传奇/钻石 < 超凡/史诗 < 神话""" + + +class PowerSystemService: + """管理战力规则、角色战力档案和崩坏风险提示。""" + + def __init__(self, repository): + self.repository = repository + + def get_overview(self, novel_id: str) -> dict[str, Any]: + rules = self.repository.get_rules(novel_id) + profiles = self.repository.list_profiles(novel_id) + events = self.repository.list_events(novel_id, limit=30) + warnings = self._build_warnings(rules, profiles, events) + + return { + "novel_id": novel_id, + "standard": SYSTEM_GAME_STANDARD, + "rules": rules or self._default_rules(novel_id), + "profiles": profiles, + "recent_events": events, + "warnings": warnings, + } + + def upsert_rules( + self, + *, + novel_id: str, + genre_type: str = "system_game", + tier_schema: str = "", + core_rules: str = "", + taboo_rules: str = "", + escalation_rules: str = "", + ) -> dict[str, Any]: + return self.repository.upsert_rules( + novel_id=novel_id, + genre_type=genre_type or "system_game", + tier_schema=tier_schema or DEFAULT_TIER_SCHEMA, + core_rules=core_rules or SYSTEM_GAME_STANDARD, + taboo_rules=taboo_rules or "禁止无代价越级、禁止临时新增隐藏规则、禁止同阶数值忽高忽低。", + escalation_rules=escalation_rules or "每次大升级必须有铺垫、代价、验证战与后遗症记录。", + ) + + def upsert_profile( + self, + *, + novel_id: str, + character_name: str, + tier: str = "", + rank_score: int = 0, + abilities: str = "", + limitations: str = "", + growth_stage: str = "", + last_verified_chapter: Optional[int] = None, + notes: str = "", + ) -> dict[str, Any]: + return self.repository.upsert_profile( + novel_id=novel_id, + character_name=character_name.strip(), + tier=tier.strip(), + rank_score=int(rank_score), + abilities=abilities.strip(), + limitations=limitations.strip(), + growth_stage=growth_stage.strip(), + last_verified_chapter=last_verified_chapter, + notes=notes.strip(), + ) + + def create_event( + self, + *, + novel_id: str, + chapter_number: int, + character_name: str, + event_type: str = "battle", + opponent: str = "", + outcome: str = "", + power_delta: int = 0, + evidence: str = "", + ) -> dict[str, Any]: + return self.repository.create_event( + novel_id=novel_id, + chapter_number=int(chapter_number), + character_name=character_name.strip(), + event_type=event_type.strip() or "battle", + opponent=opponent.strip(), + outcome=outcome.strip(), + power_delta=int(power_delta), + evidence=evidence.strip(), + ) + + @staticmethod + def _default_rules(novel_id: str) -> dict[str, Any]: + return { + "id": "", + "novel_id": novel_id, + "genre_type": "system_game", + "tier_schema": DEFAULT_TIER_SCHEMA, + "core_rules": SYSTEM_GAME_STANDARD, + "taboo_rules": "禁止无代价越级、禁止临时新增隐藏规则、禁止同阶数值忽高忽低。", + "escalation_rules": "每次大升级必须有铺垫、代价、验证战与后遗症记录。", + "created_at": "", + "updated_at": "", + } + + def _build_warnings( + self, + rules: Optional[dict[str, Any]], + profiles: list[dict[str, Any]], + events: list[dict[str, Any]], + ) -> list[dict[str, Any]]: + warnings: list[dict[str, Any]] = [] + + if not rules: + warnings.append({ + "severity": "warning", + "title": "尚未固化战力规则", + "message": "建议先保存境界/等级表、升级代价和禁忌规则,否则系统文或游戏文后期容易临时加设定。", + }) + else: + for key, title in ( + ("tier_schema", "境界/等级表为空"), + ("core_rules", "核心战力规则为空"), + ("taboo_rules", "禁忌规则为空"), + ): + if not str(rules.get(key) or "").strip(): + warnings.append({ + "severity": "warning", + "title": title, + "message": "请补齐后再让外部模型或自动生成大规模战斗章节。", + }) + + for profile in profiles: + if int(profile.get("rank_score") or 0) >= 80 and not str(profile.get("limitations") or "").strip(): + warnings.append({ + "severity": "error", + "title": f"{profile.get('character_name')} 缺少高战力限制", + "message": "高战力角色必须登记弱点、消耗、冷却或行动约束,否则容易无解化。", + }) + + for event in events: + delta = int(event.get("power_delta") or 0) + evidence = str(event.get("evidence") or "") + outcome = str(event.get("outcome") or "") + if delta >= 3: + warnings.append({ + "severity": "error", + "title": f"第{event.get('chapter_number')}章战力跳升过快", + "message": f"{event.get('character_name')} 单次变化 +{delta},建议补铺垫、代价或拆成多章成长。", + }) + if ("胜" in outcome or "击败" in outcome) and delta >= 2 and not self._has_cost_marker(evidence): + warnings.append({ + "severity": "warning", + "title": f"第{event.get('chapter_number')}章疑似无代价越级", + "message": "胜利事件缺少代价/限制/克制说明,建议补充受伤、消耗、底牌或环境优势。", + }) + + return warnings[:12] + + @staticmethod + def _has_cost_marker(text: str) -> bool: + markers = ("代价", "消耗", "冷却", "受伤", "重伤", "反噬", "克制", "环境", "底牌", "队友", "限制", "失败") + return any(marker in text for marker in markers) diff --git a/application/analyst/services/prop_ledger_service.py b/application/analyst/services/prop_ledger_service.py new file mode 100644 index 000000000..bf0a5261d --- /dev/null +++ b/application/analyst/services/prop_ledger_service.py @@ -0,0 +1,361 @@ +"""道具账本服务。""" +from __future__ import annotations + +import re +from typing import Any, Optional + + +DISCOVERABLE_PROP_CATEGORIES: dict[str, str] = { + "录音笔": "证物", + "钥匙": "钥匙", + "信": "信物", + "信件": "信物", + "遗书": "信物", + "照片": "证物", + "相片": "证物", + "玉佩": "信物", + "戒指": "信物", + "项链": "信物", + "芯片": "证物", + "U盘": "证物", + "u盘": "证物", + "账本": "证物", + "名单": "证物", + "合同": "证物", + "药瓶": "药物", + "药剂": "药物", + "匕首": "武器", + "短刀": "武器", + "手枪": "武器", + "令牌": "信物", + "徽章": "信物", + "地图": "线索", + "档案": "证物", + "盒子": "容器", + "匣子": "容器", +} + + +class PropLedgerService: + """管理关键道具的当前状态与历史事件。""" + + def __init__(self, repository): + self.repository = repository + + def get_overview(self, novel_id: str) -> dict[str, Any]: + items = self.repository.list_items(novel_id) + events = self.repository.list_events(novel_id, limit=50) + return { + "novel_id": novel_id, + "items": items, + "recent_events": events, + "warnings": self._build_warnings(items), + } + + def upsert_item( + self, + *, + novel_id: str, + name: str, + category: str = "", + status: str = "", + current_holder: str = "", + current_location: str = "", + first_seen_chapter: Optional[int] = None, + last_seen_chapter: Optional[int] = None, + importance: str = "normal", + description: str = "", + notes: str = "", + ) -> dict[str, Any]: + clean_name = name.strip() + if not clean_name: + raise ValueError("prop name is required") + first_seen = self._positive_or_none(first_seen_chapter) + last_seen = self._positive_or_none(last_seen_chapter) or first_seen + return self.repository.upsert_item( + novel_id=novel_id, + name=clean_name, + category=category.strip(), + status=status.strip(), + current_holder=current_holder.strip(), + current_location=current_location.strip(), + first_seen_chapter=first_seen, + last_seen_chapter=last_seen, + importance=self._normalize_importance(importance), + description=description.strip(), + notes=notes.strip(), + ) + + def create_event( + self, + *, + novel_id: str, + prop_name: str, + chapter_number: int, + event_type: str = "mention", + holder: str = "", + location: str = "", + status: str = "", + evidence: str = "", + notes: str = "", + ) -> dict[str, Any]: + clean_name = prop_name.strip() + if not clean_name: + raise ValueError("prop name is required") + chapter = int(chapter_number) + if chapter < 1: + raise ValueError("chapter_number must be greater than 0") + item = self.repository.get_item_by_name(novel_id, clean_name) + if item is None: + item = self.upsert_item( + novel_id=novel_id, + name=clean_name, + status=status, + current_holder=holder, + current_location=location, + first_seen_chapter=chapter, + last_seen_chapter=chapter, + ) + return self.repository.create_event( + novel_id=novel_id, + prop_id=item["id"], + prop_name=item["name"], + chapter_number=chapter, + event_type=event_type.strip() or "mention", + holder=holder.strip(), + location=location.strip(), + status=status.strip(), + evidence=evidence.strip(), + notes=notes.strip(), + ) + + def suggest_events_from_chapter( + self, + *, + novel_id: str, + chapter_number: int, + content: str, + ) -> list[dict[str, Any]]: + """从章节正文中提示可能需要人工确认的道具事件。""" + chapter = int(chapter_number) + if chapter < 1: + raise ValueError("chapter_number must be greater than 0") + clean_content = (content or "").strip() + if not clean_content: + return [] + + suggestions: list[dict[str, Any]] = [] + items = self.repository.list_items(novel_id) + known_names: set[str] = set() + for item in items: + name = str(item.get("name") or "").strip() + known_names.add(name) + if not name or name not in clean_content: + continue + evidence = self._build_evidence_snippet(clean_content, name) + event_type, status, reason, confidence = self._classify_event(evidence) + suggestions.append({ + "prop_name": name, + "chapter_number": chapter, + "event_type": event_type, + "status": status or str(item.get("status") or ""), + "holder": "", + "location": self._extract_location(evidence), + "evidence": evidence, + "reason": reason, + "confidence": confidence, + "is_new_prop": False, + "category": str(item.get("category") or ""), + "importance": str(item.get("importance") or "normal"), + }) + suggestions.extend(self._discover_new_prop_suggestions( + content=clean_content, + chapter_number=chapter, + known_names=known_names, + )) + return suggestions[:12] + + @staticmethod + def _positive_or_none(value: Optional[int]) -> Optional[int]: + if value is None: + return None + parsed = int(value) + return parsed if parsed > 0 else None + + @staticmethod + def _normalize_importance(value: str) -> str: + return value if value in {"major", "normal", "minor"} else "normal" + + @staticmethod + def _build_evidence_snippet(content: str, prop_name: str, radius: int = 36) -> str: + index = content.find(prop_name) + if index < 0: + return "" + start = max(0, index - radius) + end = min(len(content), index + len(prop_name) + radius) + return content[start:end].strip() + + @staticmethod + def _classify_event(evidence: str) -> tuple[str, str, str, float]: + rules = [ + ( + "sealed", + "被封存", + 0.82, + "正文出现已登记道具,并命中封存/证物相关表达。", + ("封存", "证物袋", "证物柜", "保险柜", "锁进", "收押"), + ), + ( + "lost_or_broken", + "疑似丢失/损坏", + 0.78, + "正文出现已登记道具,并命中丢失/损坏相关表达。", + ("丢失", "不见", "遗失", "摔碎", "碎裂", "折断", "损坏", "烧毁"), + ), + ( + "transfer", + "疑似转交", + 0.74, + "正文出现已登记道具,并命中转交相关表达。", + ("递给", "交给", "交到", "递到", "塞给", "转交", "给了"), + ), + ( + "use", + "已使用", + 0.70, + "正文出现已登记道具,并命中使用相关表达。", + ("使用", "打开", "启动", "点燃", "按下", "照亮", "刺入", "割开", "解开"), + ), + ( + "acquire", + "被取得", + 0.68, + "正文出现已登记道具,并命中取得/带走相关表达。", + ("拿到", "拿起", "取出", "接过", "收下", "获得", "捡起", "握住", "攥住", "带走"), + ), + ] + for event_type, status, confidence, reason, keywords in rules: + if any(keyword in evidence for keyword in keywords): + return event_type, status, reason, confidence + return "mention", "", "正文提到已登记道具,建议确认当前状态是否变化。", 0.48 + + @classmethod + def _discover_new_prop_suggestions( + cls, + *, + content: str, + chapter_number: int, + known_names: set[str], + ) -> list[dict[str, Any]]: + suggestions: list[dict[str, Any]] = [] + seen: set[str] = set() + for sentence in cls._split_sentences(content): + event_type, status, _, confidence = cls._classify_event(sentence) + if event_type == "mention": + continue + for prop_name, category in cls._extract_plot_prop_names(sentence): + if prop_name in known_names or prop_name in seen: + continue + seen.add(prop_name) + suggestions.append({ + "prop_name": prop_name, + "chapter_number": chapter_number, + "event_type": event_type, + "status": status, + "holder": "", + "location": cls._extract_location(sentence), + "evidence": sentence, + "reason": cls._new_prop_reason(event_type), + "confidence": round(max(0.5, confidence - 0.06), 2), + "is_new_prop": True, + "category": category, + "importance": "major" if category in {"证物", "钥匙", "信物", "武器"} else "normal", + }) + return suggestions + + @staticmethod + def _split_sentences(content: str) -> list[str]: + return [ + part.strip() + for part in re.findall(r"[^。!?!?\n]+[。!?!?]?", content) + if part.strip() + ] + + @staticmethod + def _extract_plot_prop_names(sentence: str) -> list[tuple[str, str]]: + results: list[tuple[str, str]] = [] + ordered_items = sorted( + DISCOVERABLE_PROP_CATEGORIES.items(), + key=lambda item: len(item[0]), + reverse=True, + ) + for noun, category in ordered_items: + escaped = re.escape(noun) + patterns = ( + rf"((?:[\u4e00-\u9fff]{{1,6}}的){escaped})", + rf"([\u4e00-\u9fff]{{1,4}}{escaped})", + rf"({escaped})", + ) + for pattern in patterns: + for match in re.finditer(pattern, sentence): + name = PropLedgerService._clean_discovered_name(match.group(1)) + if name and not any(existing == name for existing, _ in results): + results.append((name, category)) + if results and results[-1][0].endswith(noun): + break + return results + + @staticmethod + def _clean_discovered_name(name: str) -> str: + cleaned = re.sub(r"^[她他它我你们的了把将又再正刚才却并便就那这一个一只一枚一张一支一把一封]+", "", name) + return cleaned.strip(" ,。;;、") + + @staticmethod + def _new_prop_reason(event_type: str) -> str: + labels = { + "sealed": "封存/证物", + "lost_or_broken": "丢失/损坏", + "transfer": "转交", + "use": "使用", + "acquire": "取得/带走", + } + label = labels.get(event_type, "剧情动作") + return f"正文出现疑似关键新道具,并命中{label}相关表达。" + + @staticmethod + def _extract_location(evidence: str) -> str: + patterns = [ + r"(警局证物柜|证物柜|保险柜|证物袋)", + r"(?:锁进|放进|装进|塞进|收入|放入)([^,。;;、\s]{0,20}(?:柜|箱|袋|盒|室|库|房|抽屉))", + ] + matches: list[str] = [] + for pattern in patterns: + matches.extend(match.group(1) for match in re.finditer(pattern, evidence)) + if not matches: + return "" + return max(matches, key=len) + + @staticmethod + def _build_warnings(items: list[dict[str, Any]]) -> list[dict[str, str]]: + warnings: list[dict[str, str]] = [] + if not items: + warnings.append({ + "severity": "info", + "title": "尚未登记关键道具", + "message": "把钥匙、信物、武器、证物、一次性底牌等先登记,后续章节更不容易写丢。", + }) + return warnings + for item in items: + if item.get("importance") == "major" and not item.get("last_seen_chapter"): + warnings.append({ + "severity": "warning", + "title": f"{item.get('name')} 缺少最近章节", + "message": "重要道具建议记录首次/最近出现章节,方便后续回收或再次使用。", + }) + if item.get("importance") == "major" and not item.get("current_holder") and not item.get("current_location"): + warnings.append({ + "severity": "warning", + "title": f"{item.get('name')} 去向不明", + "message": "重要道具最好至少登记持有人或当前位置。", + }) + return warnings[:8] diff --git a/application/audit/services/cliche_scanner.py b/application/audit/services/cliche_scanner.py index ab0385ee3..92cd68d28 100644 --- a/application/audit/services/cliche_scanner.py +++ b/application/audit/services/cliche_scanner.py @@ -45,6 +45,23 @@ class ClicheHit: # 一抹系列 (r"(嘴角|脸上|眼中).{0,3}(浮现|闪过|掠过)一抹", "一抹系列"), + + # 氛围凝固系列 + (r"(空气|时间).{0,4}(仿佛|好像|像是)?(凝固|停止|静止)", "氛围凝固系列"), + + # 模糊心理系列 + (r"(某种|一种).{0,8}(说不清|难以言喻|无法形容).{0,8}(情绪|感觉|东西)", "模糊心理系列"), + + # 宿命总结系列 + (r"(再也回不去|改变了.{0,8}命运|命运的齿轮|一切才刚刚开始)", "宿命总结系列"), + + # 抽象转折系列 + (r"(有什么东西|某些东西).{0,6}(碎了|变了|坍塌了|崩塌了)", "抽象转折系列"), + + # 压缩表达系列 + (r"(一番|经过).{0,6}(交谈|解释|讨论|沟通|交流)后", "压缩表达系列"), + (r"(很快|最终|随后).{0,8}(达成共识|说明了情况|解释清楚|解决了问题)", "压缩表达系列"), + (r"(简单|简短|大致).{0,4}(说明|解释|交代).{0,6}(情况|经过|缘由|来龙去脉)", "压缩表达系列"), ] diff --git a/application/blueprint/services/story_structure_service.py b/application/blueprint/services/story_structure_service.py index 69ee1bca4..ab6341d28 100644 --- a/application/blueprint/services/story_structure_service.py +++ b/application/blueprint/services/story_structure_service.py @@ -89,6 +89,9 @@ def _enrich_flat_chapter_nodes(self, novel_id: str, nodes: List[Dict[str, Any]]) async def get_tree(self, novel_id: str) -> Dict[str, Any]: """获取小说的完整结构树""" + # 同步 chapters 表中缺失的章节节点到 story_nodes 表 + await self._sync_orphan_chapters_to_nodes(novel_id) + tree = await self.repository.get_tree(novel_id) data = tree.to_tree_dict() self._enrich_chapter_nodes_from_chapters_table(novel_id, data.get("nodes") or []) @@ -97,6 +100,80 @@ async def get_tree(self, novel_id: str) -> Dict[str, Any]: "tree": data, } + async def _sync_orphan_chapters_to_nodes(self, novel_id: str) -> None: + """将 chapters 表中存在但 story_nodes 表中缺失的章节同步到 story_nodes 表""" + if not self._chapter_repository: + return + + try: + # 获取所有章节 + chapters = self._chapter_repository.list_by_novel(NovelId(novel_id)) + if not chapters: + return + + # 获取现有的章节节点 + all_nodes = await self.repository.get_by_novel(novel_id) + existing_chapter_nums = { + n.number for n in all_nodes if n.node_type.value == "chapter" and n.number is not None + } + + # 为缺失的章节创建节点 + orphan_chapters = [c for c in chapters if c.number not in existing_chapter_nums] + if not orphan_chapters: + return + + # 找到合适的父节点:优先找最后一个幕,其次最后一个卷,最后为 None(顶级) + act_nodes = sorted( + [n for n in all_nodes if n.node_type.value == "act"], + key=lambda n: n.number or 0 + ) + volume_nodes = sorted( + [n for n in all_nodes if n.node_type.value == "volume"], + key=lambda n: n.number or 0 + ) + + # 根据章节号分配到合适的幕 + from domain.structure.story_node import StoryNode, NodeType, PlanningStatus, PlanningSource + import logging + logger = logging.getLogger(__name__) + + for chapter in sorted(orphan_chapters, key=lambda c: c.number): + # 尝试找到包含这个章节号的幕节点 + parent_id = None + for act in act_nodes: + if act.chapter_start and act.chapter_end: + if act.chapter_start <= chapter.number <= act.chapter_end: + parent_id = act.id + break + + # 如果没有找到匹配的幕,放到最后一个幕下面 + if parent_id is None and act_nodes: + parent_id = act_nodes[-1].id + elif parent_id is None and volume_nodes: + parent_id = volume_nodes[-1].id + + node_id = f"chapter-{novel_id}-chapter-{chapter.number}" + node = StoryNode( + id=node_id, + novel_id=novel_id, + parent_id=parent_id, + node_type=NodeType.CHAPTER, + number=chapter.number, + title=chapter.title or f"第{chapter.number}章", + description="", + order_index=chapter.number - 1, + planning_status=PlanningStatus.CONFIRMED, + planning_source=PlanningSource.MANUAL, + word_count=chapter.word_count.value if hasattr(chapter.word_count, "value") else chapter.word_count, + status=chapter.status.value if hasattr(chapter.status, "value") else chapter.status, + ) + await self.repository.save(node) + logger.info(f"[StoryStructure] 已同步孤儿章节到 story_nodes: 第{chapter.number}章") + + except Exception as e: + import logging + logging.getLogger(__name__).warning(f"_sync_orphan_chapters_to_nodes 失败: {e}") + async def get_children(self, novel_id: str, parent_id: Optional[str] = None) -> List[Dict[str, Any]]: """获取子节点(用于渐进式加载)""" nodes = await self.repository.get_children(parent_id) diff --git a/application/codex/chronicles_service.py b/application/codex/chronicles_service.py index edd3cf3a0..bfd2ad1ba 100644 --- a/application/codex/chronicles_service.py +++ b/application/codex/chronicles_service.py @@ -8,6 +8,7 @@ logger = logging.getLogger(__name__) _CHAPTER_IN_TEXT = re.compile(r"第\s*(\d+)\s*章") +_CANDIDATE_ACCEPT_IN_NAME = re.compile(r"^\[候选稿采纳\]\s*第\s*\d+\s*章\s*[·•]\s*(.+?)\s*$") def infer_chapter_from_texts(*parts: str) -> Optional[int]: @@ -28,6 +29,20 @@ def anchor_chapter_from_pointers( return max(nums) if nums else None +def infer_snapshot_origin(name: str) -> Dict[str, Optional[str]]: + raw_name = (name or "").strip() + match = _CANDIDATE_ACCEPT_IN_NAME.match(raw_name) + if match: + return { + "origin_type": "candidate_accept", + "candidate_source": match.group(1).strip() or None, + } + return { + "origin_type": "snapshot", + "candidate_source": None, + } + + def build_chronicles_rows( timeline_notes: List[Tuple[str, str, str, str]], snapshots: List[Dict[str, Any]], @@ -77,6 +92,7 @@ def ensure(ch: int) -> Dict[str, List]: "created_at": snap.get("created_at"), "description": (snap.get("description") or "").strip() or None, "anchor_chapter": anchor, + **infer_snapshot_origin(snap.get("name") or ""), } ) diff --git a/application/core/dtos/chapter_candidate_draft_dto.py b/application/core/dtos/chapter_candidate_draft_dto.py new file mode 100644 index 000000000..2b501ad1d --- /dev/null +++ b/application/core/dtos/chapter_candidate_draft_dto.py @@ -0,0 +1,36 @@ +"""Chapter candidate draft DTO.""" +from dataclasses import dataclass +from typing import Any, Dict + + +@dataclass +class ChapterCandidateDraftDTO: + id: str + novel_id: str + chapter_number: int + branch_name: str + source: str + status: str + title: str + content: str + rationale: str + metadata: Dict[str, Any] + created_at: str + updated_at: str + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "ChapterCandidateDraftDTO": + return cls( + id=data["id"], + novel_id=data["novel_id"], + chapter_number=int(data["chapter_number"]), + branch_name=data.get("branch_name") or "main", + source=data["source"], + status=data.get("status") or "draft", + title=data.get("title") or "", + content=data.get("content") or "", + rationale=data.get("rationale") or "", + metadata=data.get("metadata") or {}, + created_at=data.get("created_at") or "", + updated_at=data.get("updated_at") or "", + ) diff --git a/application/core/dtos/novel_dto.py b/application/core/dtos/novel_dto.py index bc76dbac0..c51088427 100644 --- a/application/core/dtos/novel_dto.py +++ b/application/core/dtos/novel_dto.py @@ -75,6 +75,7 @@ class NovelDTO: premise: str chapters: List[ChapterDTO] total_word_count: int + slug: str = "" has_bible: bool = False has_outline: bool = False autopilot_status: str = "stopped" @@ -105,6 +106,7 @@ def from_domain(cls, novel: 'Novel') -> 'NovelDTO': return cls( id=novel.novel_id.value, + slug=getattr(novel, 'slug', novel.novel_id.value) or novel.novel_id.value, title=novel.title, author=novel.author, target_chapters=novel.target_chapters, diff --git a/application/core/services/chapter_candidate_draft_service.py b/application/core/services/chapter_candidate_draft_service.py new file mode 100644 index 000000000..7911ea9f2 --- /dev/null +++ b/application/core/services/chapter_candidate_draft_service.py @@ -0,0 +1,268 @@ +"""Chapter candidate draft service.""" +from __future__ import annotations + +from difflib import SequenceMatcher +from typing import Any, Dict, List, Optional + +from application.core.dtos.chapter_candidate_draft_dto import ChapterCandidateDraftDTO +from domain.shared.exceptions import EntityNotFoundError + + +class ChapterCandidateDraftService: + def __init__(self, repository, chapter_service): + self.repository = repository + self.chapter_service = chapter_service + + def create_draft( + self, + *, + novel_id: str, + chapter_number: int, + source: str, + title: str, + content: str, + rationale: str = "", + metadata: Optional[Dict[str, Any]] = None, + branch_name: str = "main", + ) -> ChapterCandidateDraftDTO: + draft = self.repository.create( + novel_id=novel_id, + chapter_number=chapter_number, + source=source, + title=title, + content=content, + rationale=rationale, + metadata=metadata or {}, + branch_name=branch_name, + ) + return ChapterCandidateDraftDTO.from_dict(draft) + + def list_drafts( + self, + novel_id: str, + chapter_number: int, + *, + branch_name: Optional[str] = None, + ) -> List[ChapterCandidateDraftDTO]: + return [ + ChapterCandidateDraftDTO.from_dict(item) + for item in self.repository.list_by_chapter( + novel_id, + chapter_number, + branch_name=branch_name, + ) + ] + + def get_draft(self, draft_id: str) -> ChapterCandidateDraftDTO: + draft = self.repository.get(draft_id) + if not draft: + raise EntityNotFoundError("ChapterCandidateDraft", draft_id) + return ChapterCandidateDraftDTO.from_dict(draft) + + def reject_draft(self, draft_id: str) -> ChapterCandidateDraftDTO: + draft = self.repository.get(draft_id) + if not draft: + raise EntityNotFoundError("ChapterCandidateDraft", draft_id) + updated = self.repository.update_status(draft_id, "rejected") + return ChapterCandidateDraftDTO.from_dict(updated) + + def compare_with_primary(self, draft_id: str) -> Dict[str, Any]: + draft = self.repository.get(draft_id) + if not draft: + raise EntityNotFoundError("ChapterCandidateDraft", draft_id) + + chapter = self.chapter_service.get_chapter_by_novel_and_number( + draft["novel_id"], + int(draft["chapter_number"]), + ) + primary_content = getattr(chapter, "content", "") if chapter else "" + candidate_content = str(draft.get("content") or "") + primary_paragraphs = self._split_paragraphs(primary_content) + candidate_paragraphs = self._split_paragraphs(candidate_content) + + return { + "draft": ChapterCandidateDraftDTO.from_dict(draft), + "primary_word_count": len(primary_content.strip()), + "candidate_word_count": len(candidate_content.strip()), + "similarity": round(SequenceMatcher(None, primary_content, candidate_content).ratio(), 3) + if primary_content or candidate_content + else 1.0, + "paragraphs": self._build_paragraph_compare(primary_paragraphs, candidate_paragraphs), + } + + def list_branch_summaries(self, novel_id: str, chapter_number: int) -> List[Dict[str, Any]]: + return [ + { + "branch_name": row.get("branch_name") or "main", + "draft_count": int(row.get("draft_count") or 0), + "accepted_count": int(row.get("accepted_count") or 0), + "updated_at": row.get("updated_at") or "", + } + for row in self.repository.list_branches(novel_id, chapter_number) + ] + + def merge_branch_to_candidate( + self, + *, + novel_id: str, + chapter_number: int, + source_branch: str, + target_branch: str = "main", + rule: str = "latest_candidate", + ) -> ChapterCandidateDraftDTO: + source_branch = (source_branch or "").strip() + target_branch = (target_branch or "main").strip() or "main" + if not source_branch: + raise ValueError("source_branch is required") + if source_branch == target_branch: + raise ValueError("source_branch and target_branch must be different") + + source = self.repository.get_latest_by_branch(novel_id, chapter_number, source_branch) + if not source: + raise EntityNotFoundError("BranchCandidateDraft", source_branch) + + merged = self.repository.create( + novel_id=novel_id, + chapter_number=chapter_number, + source="branch-merge", + title=f"合并 {source_branch} → {target_branch}:{source.get('title') or f'第{chapter_number}章'}", + content=source.get("content") or "", + rationale=( + f"按规则「{rule}」从分支「{source_branch}」合并到「{target_branch}」。" + "合并结果仍作为候选稿,需要作者采纳后才进入主稿。" + ), + metadata={ + **(source.get("metadata") or {}), + "merge_rule": rule, + "merge_source_branch": source_branch, + "merge_target_branch": target_branch, + "merge_source_draft_id": source.get("id") or "", + }, + branch_name=target_branch, + ) + return ChapterCandidateDraftDTO.from_dict(merged) + + def build_branch_memory_diff( + self, + *, + novel_id: str, + chapter_number: int, + source_branch: str, + target_branch: str = "main", + ) -> Dict[str, Any]: + source_drafts = self.repository.list_by_chapter( + novel_id, + chapter_number, + branch_name=source_branch, + ) + target_drafts = self.repository.list_by_chapter( + novel_id, + chapter_number, + branch_name=target_branch, + ) + source_latest = source_drafts[0] if source_drafts else None + target_latest = target_drafts[0] if target_drafts else None + source_content = str((source_latest or {}).get("content") or "") + target_content = str((target_latest or {}).get("content") or "") + + return { + "novel_id": novel_id, + "chapter_number": chapter_number, + "source_branch": source_branch, + "target_branch": target_branch, + "source_draft_count": len(source_drafts), + "target_draft_count": len(target_drafts), + "source_latest_draft_id": (source_latest or {}).get("id") or "", + "target_latest_draft_id": (target_latest or {}).get("id") or "", + "similarity": round(SequenceMatcher(None, source_content, target_content).ratio(), 3) + if source_content or target_content + else 1.0, + "memory_impacts": self._infer_memory_impacts(source_drafts, source_content, target_content), + } + + def accept_draft_as_primary(self, draft_id: str) -> Dict[str, Any]: + draft = self.repository.get(draft_id) + if not draft: + raise EntityNotFoundError("ChapterCandidateDraft", draft_id) + + chapter_title = (draft.get("title") or "").strip() or f"第{draft['chapter_number']}章" + self.chapter_service.ensure_chapter( + draft["novel_id"], + int(draft["chapter_number"]), + chapter_title, + ) + chapter = self.chapter_service.update_chapter_by_novel_and_number( + draft["novel_id"], + int(draft["chapter_number"]), + draft["content"], + ) + updated_draft = self.repository.update_status(draft_id, "accepted") or {} + merged_draft = {**draft, **updated_draft} + return { + "draft": ChapterCandidateDraftDTO.from_dict(merged_draft), + "chapter": chapter, + } + + @staticmethod + def _split_paragraphs(content: str) -> List[str]: + return [part.strip() for part in str(content or "").split("\n\n") if part.strip()] + + @staticmethod + def _build_paragraph_compare(primary: List[str], candidate: List[str]) -> List[Dict[str, Any]]: + items: List[Dict[str, Any]] = [] + max_len = max(len(primary), len(candidate)) + for index in range(max_len): + left = primary[index] if index < len(primary) else "" + right = candidate[index] if index < len(candidate) else "" + if left and right: + similarity = round(SequenceMatcher(None, left, right).ratio(), 3) + change_type = "unchanged" if similarity >= 0.98 else "modified" + elif right: + similarity = 0.0 + change_type = "added" + else: + similarity = 0.0 + change_type = "removed" + items.append( + { + "index": index, + "type": change_type, + "primary": left, + "candidate": right, + "similarity": similarity, + } + ) + return items + + @staticmethod + def _infer_memory_impacts( + drafts: List[Dict[str, Any]], + source_content: str, + target_content: str, + ) -> List[Dict[str, str]]: + metadata_keys = { + str(key) + for draft in drafts + for key in (draft.get("metadata") or {}).keys() + } + text = f"{source_content}\n{target_content}" + impacts: List[Dict[str, str]] = [] + + def add(label: str, level: str, detail: str) -> None: + impacts.append({"label": label, "level": level, "detail": detail}) + + if "external_model" in metadata_keys or "external_prompt" in metadata_keys: + add("外部模型稿", "info", "该分支包含外部/直连模型产出的候选稿,需要确认事实与口吻。") + if "partial_source_draft_id" in metadata_keys: + add("部分采纳", "warning", "该分支包含部分采纳稿,建议确认段落衔接和上下文连续。") + if "rewrite_task_id" in metadata_keys: + add("改稿任务结果", "info", "该分支包含按任务生成的改稿结果,建议核对任务约束是否完整落实。") + if any(token in text for token in ("关系", "和解", "决裂", "信任", "背叛", "暧昧")): + add("角色关系", "warning", "文本疑似涉及关系变化,合并前建议检查连续性关系事件。") + if any(token in text for token in ("伏笔", "秘密", "真相", "线索", "预言")): + add("伏笔状态", "warning", "文本疑似涉及伏笔或线索,合并前建议检查伏笔账本。") + if any(token in text for token in ("突破", "升级", "境界", "战力", "技能")): + add("战力状态", "warning", "文本疑似涉及战力变化,合并前建议检查战力系统。") + if not impacts: + add("正文事实", "info", "未检测到显著结构化风险,仍建议按候选稿 diff 逐段确认。") + return impacts diff --git a/application/core/services/chapter_service.py b/application/core/services/chapter_service.py index df68db7d0..fbb4c4064 100644 --- a/application/core/services/chapter_service.py +++ b/application/core/services/chapter_service.py @@ -218,6 +218,17 @@ def save_chapter_review( if chapter is None: raise EntityNotFoundError("Chapter", f"{novel_id}/chapter-{chapter_number}") + # 同步更新章节状态:approved -> completed, reviewed -> reviewing + status_to_chapter_status = { + "approved": ChapterStatus.COMPLETED, + "reviewed": ChapterStatus.REVIEWING, + "draft": ChapterStatus.DRAFT, + } + new_chapter_status = status_to_chapter_status.get(status) + if new_chapter_status and chapter.status != new_chapter_status: + chapter.status = new_chapter_status + self.chapter_repository.save(chapter) + # 使用数据库 repository if self.chapter_review_repository: return self.chapter_review_repository.upsert( diff --git a/application/core/services/novel_service.py b/application/core/services/novel_service.py index 862d2ed3c..af0227be6 100644 --- a/application/core/services/novel_service.py +++ b/application/core/services/novel_service.py @@ -174,6 +174,13 @@ def _check_has_bible(self, novel_id: str) -> bool: return False def _check_has_outline(self, novel_id: str) -> bool: + storage = getattr(self.novel_repository, "storage", None) + if storage is not None and hasattr(storage, "exists"): + try: + return bool(storage.exists(f"novels/{novel_id}/outline.json")) + except Exception: + pass + if not self.story_node_repository: return False try: @@ -190,7 +197,13 @@ def list_novels(self) -> List[NovelDTO]: NovelDTO 列表 """ novels = self.novel_repository.list_all() - return [NovelDTO.from_domain(self._hydrate_chapters(novel)) for novel in novels] + dtos = [] + for novel in novels: + dto = NovelDTO.from_domain(self._hydrate_chapters(novel)) + dto.has_bible = self._check_has_bible(novel.novel_id.value) + dto.has_outline = self._check_has_outline(novel.novel_id.value) + dtos.append(dto) + return dtos def delete_novel(self, novel_id: str) -> None: """删除小说 diff --git a/application/engine/services/autopilot_daemon.py b/application/engine/services/autopilot_daemon.py index ec60a3244..66f5d72b7 100644 --- a/application/engine/services/autopilot_daemon.py +++ b/application/engine/services/autopilot_daemon.py @@ -330,6 +330,13 @@ async def _handle_act_planning(self, novel: Novel): novel_id = novel.novel_id.value target_act_number = novel.current_act + 1 # 1-indexed + # 提前计算结构推荐参数,供后续多处使用(避免动态幕生成失败时变量未定义) + from application.blueprint.services.continuous_planning_service import calculate_structure_params + target_chapters = novel.target_chapters or 100 + struct_params = calculate_structure_params(target_chapters) + rec_chapters_per_act = struct_params["chapters_per_act"] + rec_acts_per_volume = struct_params["acts_per_volume"] + all_nodes = await self.story_node_repo.get_by_novel(novel_id) act_nodes = sorted( [n for n in all_nodes if n.node_type.value == "act"], @@ -346,13 +353,6 @@ async def _handle_act_planning(self, novel: Novel): key=lambda n: n.number ) - # 使用结构计算引擎获取推荐参数(替代硬编码的 // 3) - from application.blueprint.services.continuous_planning_service import calculate_structure_params - target_chapters = novel.target_chapters or 100 - struct_params = calculate_structure_params(target_chapters) - rec_chapters_per_act = struct_params["chapters_per_act"] - rec_acts_per_volume = struct_params["acts_per_volume"] - # 智能父卷选择:优先让当前卷填满(达到 rec_acts_per_volume 幕),再跳下一卷 parent_volume = self._find_parent_volume_for_new_act( volume_nodes=volume_nodes, @@ -653,7 +653,11 @@ async def _handle_writing(self, novel: Novel): voice_anchors=voice_anchors, chapter_draft_so_far=chapter_content, ) - max_tokens = int(beat.target_words * 1.5) + # 字数控制策略: + # - prompt 中要求目标的 75%(在 context_builder 中处理) + # - max_tokens = prompt 目标 × 1.1(硬性上限,超出会被截断) + # - 最终输出应接近 prompt 目标,略低于原始目标 + max_tokens = int(beat.target_words * 1.1) cfg = GenerationConfig(max_tokens=max_tokens, temperature=0.85) beat_content = await self._stream_llm_with_stop_watch(prompt, cfg, novel=novel) else: @@ -668,6 +672,10 @@ async def _handle_writing(self, novel: Novel): ) if beat_content.strip(): + # V8: 截断检测与自动续写(软着陆) + beat_content = await self._ensure_complete_ending( + beat_content, beat, outline, chapter_content, novel + ) chapter_content += ("\n\n" if chapter_content else "") + beat_content await self._upsert_chapter_content(novel, next_chapter_node, chapter_content, status="draft") @@ -683,7 +691,11 @@ async def _handle_writing(self, novel: Novel): novel.current_beat_index = i + 1 self._flush_novel(novel) - logger.info(f"[{novel.novel_id}] ✅ 节拍 {i+1}/{len(beats)} 完成: {len(beat_content)} 字") + actual_len = len(beat_content) + target_len = beat.target_words + ratio = actual_len / target_len if target_len > 0 else 0 + warning = f" ⚠️ 超出 {int((ratio - 1) * 100)}%" if ratio > 1.1 else "" + logger.info(f"[{novel.novel_id}] ✅ 节拍 {i+1}/{len(beats)} 完成: {actual_len} 字 (目标 {target_len}){warning}") else: # 降级:无节拍,一次生成 if not self._is_still_running(novel): @@ -726,7 +738,26 @@ async def _handle_writing(self, novel: Novel): except Exception as e: logger.warning(f"post_process_generated_chapter 失败(仍落库):{e}") - # 7. 章节完成,标记 completed + # 7. 章节完成,标记 completed(带字数验证) + actual_word_count = len(chapter_content.strip()) + target_word_count = int(getattr(novel, "target_words_per_chapter", None) or 2500) + + # 字数警告:低于目标 60% 或超出 120% 时发出警告 + if actual_word_count < target_word_count * 0.6: + logger.warning( + f"[{novel.novel_id}] ⚠️ 第 {chapter_num} 章字数不足:{actual_word_count} 字 " + f"(目标 {target_word_count} 字,低于 60%)" + ) + elif actual_word_count > target_word_count * 1.2: + logger.warning( + f"[{novel.novel_id}] ⚠️ 第 {chapter_num} 章字数超出:{actual_word_count} 字 " + f"(目标 {target_word_count} 字,超出 {int((actual_word_count / target_word_count - 1) * 100)}%)" + ) + else: + logger.info( + f"[{novel.novel_id}] 第 {chapter_num} 章字数:{actual_word_count} 字 (目标 {target_word_count})" + ) + await self._upsert_chapter_content(novel, next_chapter_node, chapter_content, status="completed") # 8. 更新计数器,重置节拍索引 @@ -736,7 +767,10 @@ async def _handle_writing(self, novel: Novel): novel.current_stage = NovelStage.AUDITING self._flush_novel(novel) - logger.info(f"[{novel.novel_id}] 🎉 第 {chapter_num} 章完成:{len(chapter_content)} 字 (共 {novel.current_auto_chapters}/{novel.target_chapters} 章)") + logger.info( + f"[{novel.novel_id}] 🎉 第 {chapter_num} 章完成:{actual_word_count} 字 " + f"(目标 {target_word_count} 字,共 {novel.current_auto_chapters}/{novel.target_chapters} 章)" + ) def _latest_completed_chapter_number(self, novel_id: NovelId) -> Optional[int]: """已完结章节的最大章节号(与故事树全局章节号一致)。 @@ -758,6 +792,7 @@ async def _handle_auditing(self, novel: Novel): chapter_num = self._latest_completed_chapter_number(NovelId(novel.novel_id.value)) if chapter_num is None: novel.current_stage = NovelStage.WRITING + self._flush_novel(novel) return chapter = self.chapter_repository.get_by_novel_and_number( @@ -765,11 +800,16 @@ async def _handle_auditing(self, novel: Novel): ) if not chapter: novel.current_stage = NovelStage.WRITING + self._flush_novel(novel) return content = chapter.content or "" chapter_id = ChapterId(chapter.id) + # 审计阶段:保存进度以便前端能看到 + novel.audit_progress = "voice_check" + self._flush_novel(novel) + # 1. 先做文风预检;若严重偏离则定向改写,最多两轮,再执行章后管线,避免分析结果与最终正文错位 drift_result = await self._score_voice_only( novel.novel_id.value, @@ -784,6 +824,9 @@ async def _handle_auditing(self, novel: Novel): ) # 2. 统一章后管线:叙事/向量、文风(一次)、KG 推断;三元组与伏笔在叙事同步单次 LLM 中落库 + novel.audit_progress = "aftermath_pipeline" + self._flush_novel(novel) + if self.aftermath_pipeline: try: drift_result = await self.aftermath_pipeline.run_after_chapter_saved( @@ -806,6 +849,9 @@ async def _handle_auditing(self, novel: Novel): ) # 2. 张力打分(轻量 LLM 调用,~200 token) + novel.audit_progress = "tension_scoring" + self._flush_novel(novel) + tension = await self._score_tension(content) novel.last_chapter_tension = tension # 保存张力值到章节(用于张力曲线图) @@ -851,6 +897,7 @@ async def _handle_auditing(self, novel: Novel): ) novel.current_stage = NovelStage.WRITING + novel.audit_progress = None # 清除审计进度 # 5. 全书完成检测 chapters = self.chapter_repository.list_by_novel(NovelId(novel.novel_id.value)) @@ -1280,6 +1327,82 @@ async def _push_streaming_chunk(self, novel_id: str, chunk: str): from application.engine.services.streaming_bus import streaming_bus streaming_bus.publish(novel_id, chunk) + async def _ensure_complete_ending( + self, + content: str, + beat: "Beat", + outline: str, + chapter_draft_so_far: str, + novel=None, + ) -> str: + """V8: 截断检测与自动续写(软着陆) + + 检测内容是否被截断(没有以句号等结束符结尾), + 如果被截断,自动发起续写请求完成收尾。 + + Args: + content: 已生成的内容 + beat: 当前节拍对象 + outline: 章节大纲 + chapter_draft_so_far: 本章已生成的正文 + novel: 小说对象 + + Returns: + 完整的内容(可能包含续写部分) + """ + import re + + if not content or not content.strip(): + return content + + # 检测是否以句子结束符结尾 + # 中文句号、英文句号、叹号、问号、引号、省略号 + ending_pattern = r'[。!?…)】》"\'』」]$' + stripped = content.rstrip() + + if re.search(ending_pattern, stripped): + # 结尾完整,无需续写 + return content + + # 检测是否被截断 + logger.warning(f"[截断检测] 内容未以结束符结尾,可能被截断,发起自动续写") + + # 构建续写 Prompt + continuation_prompt = Prompt( + system="你是小说续写助手。你的任务是为被截断的段落提供一个简短、自然的结尾。" + "不要重复已有内容,只需在 150 字以内完成收尾,让段落有完整的结尾。", + user=f"""以下段落被截断了,请续写一个简短的结尾(150字以内)让它完整结束: + +---截断的内容--- +{stripped[-500:]} + +---续写要求--- +1. 承接上文,给出自然的收尾 +2. 不要重复已有内容 +3. 必须以句号结束 +4. 字数控制在 150 字以内 + +请直接续写,不要解释:""" + ) + + try: + config = GenerationConfig(max_tokens=300, temperature=0.7) + continuation = await self._stream_llm_with_stop_watch( + continuation_prompt, config, novel=novel + ) + + if continuation and continuation.strip(): + # 拼接续写内容 + result = stripped + continuation.strip() + logger.info(f"[截断续写] 成功续写 {len(continuation.strip())} 字") + return result + + except Exception as e: + logger.warning(f"[截断续写] 续写失败: {e}") + + # 续写失败,返回原内容(至少加个句号让它看起来完整) + return stripped + "。" + async def _stream_one_beat( self, outline, @@ -1320,30 +1443,61 @@ async def _stream_one_beat( user_parts.append(f"\n{beat_prompt}") user_parts.append("\n\n开始撰写:") - max_tokens = int(beat.target_words * 1.5) if beat else 3000 + # 字数控制策略(与主流程一致) + max_tokens = int(beat.target_words * 1.1) if beat else 3000 prompt = Prompt(system=system, user="\n".join(user_parts)) config = GenerationConfig(max_tokens=max_tokens, temperature=0.85) return await self._stream_llm_with_stop_watch(prompt, config, novel=novel) async def _upsert_chapter_content(self, novel, chapter_node, content: str, status: str): - """最小事务:只更新章节内容,不涉及其他表""" + """最小事务:只更新章节内容,不涉及其他表 + + 安全规则: + 1. 空内容不能将状态更新为 completed(防止空章节被标记为完成) + 2. 空内容不会覆盖已有内容(防止意外清空) + """ from domain.novel.entities.chapter import Chapter, ChapterStatus from domain.novel.value_objects.novel_id import NovelId + content_str = (content or "").strip() + existing = self.chapter_repository.get_by_novel_and_number( NovelId(novel.novel_id.value), chapter_node.number ) if existing: - # 防御:避免意外用空串覆盖已有正文(例如并发/异常分支写入空内容) - if (not (content or "").strip()) and (existing.content or "").strip(): - existing.status = ChapterStatus(status) - self.chapter_repository.save(existing) + existing_content = (existing.content or "").strip() + + # 安全检查:空内容不能标记为 completed + if not content_str and status == "completed": + logger.warning( + f"[{novel.novel_id}] 拒绝将章节 {chapter_node.number} 标记为 completed:内容为空" + ) return + + # 防御:避免意外用空串覆盖已有正文 + if not content_str: + # 空内容:只允许更新状态为 draft(不能覆盖已有内容,不能标记为 completed) + if status == "draft" and existing_content: + logger.debug( + f"[{novel.novel_id}] 章节 {chapter_node.number} 内容为空,仅更新状态为 draft(保留已有内容)" + ) + existing.status = ChapterStatus.DRAFT + self.chapter_repository.save(existing) + return + + # 正常更新:有内容时才更新 existing.update_content(content) existing.status = ChapterStatus(status) self.chapter_repository.save(existing) else: + # 新建章节:空内容只能创建为 draft + if not content_str and status == "completed": + logger.warning( + f"[{novel.novel_id}] 拒绝创建空的 completed 章节 {chapter_node.number}" + ) + return + chapter = Chapter( id=chapter_node.id, novel_id=NovelId(novel.novel_id.value), @@ -1351,7 +1505,7 @@ async def _upsert_chapter_content(self, novel, chapter_node, content: str, statu title=chapter_node.title, content=content, outline=chapter_node.outline or "", - status=ChapterStatus(status) + status=ChapterStatus(status if content_str else "draft") ) self.chapter_repository.save(chapter) diff --git a/application/engine/services/autopilot_runtime_state.py b/application/engine/services/autopilot_runtime_state.py new file mode 100644 index 000000000..801180c70 --- /dev/null +++ b/application/engine/services/autopilot_runtime_state.py @@ -0,0 +1,44 @@ +"""Process-local runtime state for the managed autopilot daemon. + +The API process owns the daemon child process. Routes use this lightweight +state to avoid marking novels as running when the daemon is disabled or absent. +""" +from __future__ import annotations + +from dataclasses import dataclass +from threading import Lock +from typing import Optional + + +@dataclass(frozen=True) +class AutopilotRuntimeState: + running: bool = False + pid: Optional[int] = None + disabled: bool = False + reason: str = "" + + +_state = AutopilotRuntimeState(reason="守护进程尚未初始化") +_lock = Lock() + + +def set_autopilot_runtime_state( + *, + running: bool, + pid: Optional[int] = None, + disabled: bool = False, + reason: str = "", +) -> None: + global _state + with _lock: + _state = AutopilotRuntimeState( + running=running, + pid=pid, + disabled=disabled, + reason=reason, + ) + + +def get_autopilot_runtime_state() -> AutopilotRuntimeState: + with _lock: + return _state diff --git a/application/engine/services/background_task_service.py b/application/engine/services/background_task_service.py index 7b99aa8e9..db775896a 100644 --- a/application/engine/services/background_task_service.py +++ b/application/engine/services/background_task_service.py @@ -61,6 +61,7 @@ def __init__( chapter_repository=None, plot_arc_repository=None, narrative_event_repository=None, + obsidian_memory_service=None, ): self.voice_drift_service = voice_drift_service self.llm_service = llm_service @@ -72,6 +73,7 @@ def __init__( self.chapter_repository = chapter_repository self.plot_arc_repository = plot_arc_repository self.narrative_event_repository = narrative_event_repository + self.obsidian_memory_service = obsidian_memory_service self._queue = queue.Queue(maxsize=200) # 防队列无限增长 self._worker = threading.Thread(target=self._worker_loop, daemon=True, name="bg-task-worker") @@ -193,4 +195,14 @@ def _handle_extract_bundle(self, task): ) finally: loop.close() + if self.obsidian_memory_service: + try: + self.obsidian_memory_service.sync_chapter(task.novel_id.value, chapter_number) + except Exception as e: + logger.warning( + "[BG] Obsidian 长期记忆同步失败:novel=%s ch=%s err=%s", + task.novel_id.value, + chapter_number, + e, + ) logger.info(f"[BG] extract_bundle 完成:第 {chapter_number} 章") diff --git a/application/engine/services/chapter_aftermath_pipeline.py b/application/engine/services/chapter_aftermath_pipeline.py index aa0cdc9d3..e45888ccc 100644 --- a/application/engine/services/chapter_aftermath_pipeline.py +++ b/application/engine/services/chapter_aftermath_pipeline.py @@ -66,6 +66,7 @@ def __init__( chapter_repository: Any = None, plot_arc_repository: Any = None, narrative_event_repository: Any = None, + obsidian_memory_service: Any = None, ) -> None: self._knowledge = knowledge_service self._indexing = chapter_indexing_service @@ -77,6 +78,7 @@ def __init__( self._chapter_repository = chapter_repository self._plot_arc_repository = plot_arc_repository self._narrative_event_repository = narrative_event_repository + self._obsidian_memory = obsidian_memory_service async def run_after_chapter_saved( self, @@ -95,6 +97,8 @@ async def run_after_chapter_saved( "vector_stored": False, "foreshadow_stored": False, "triples_extracted": False, + "obsidian_memory_synced": False, + "obsidian_memory_path": None, } if not content or not str(content).strip(): @@ -163,4 +167,13 @@ async def run_after_chapter_saved( # 3) 结构树 KG 推断 await infer_kg_from_chapter(novel_id, chapter_number) + # 4) Obsidian 长期记忆镜像:SQLite Knowledge 仍是唯一权威数据源。 + if self._obsidian_memory: + try: + sync_result = self._obsidian_memory.sync_chapter(novel_id, chapter_number) + out["obsidian_memory_synced"] = bool(sync_result.get("synced")) + out["obsidian_memory_path"] = sync_result.get("chapter_note") + except Exception as e: + logger.warning("Obsidian 长期记忆同步失败 novel=%s ch=%s: %s", novel_id, chapter_number, e) + return out diff --git a/application/engine/services/context_budget_allocator.py b/application/engine/services/context_budget_allocator.py index 9d7d83ac9..74dd09a1e 100644 --- a/application/engine/services/context_budget_allocator.py +++ b/application/engine/services/context_budget_allocator.py @@ -159,8 +159,9 @@ class ContextBudgetAllocator: MAX_VECTOR_RECALL_TOKENS = 5000 # 最近章节槽位:紧邻上一章侧重章末承接;更早章节仅章首短预览以省预算 - PREV_CHAPTER_BRIDGE_HEAD_CHARS = 250 - PREV_CHAPTER_BRIDGE_TAIL_CHARS = 1200 + # V8 优化:增加章末保留量,提升章节间连贯性 + PREV_CHAPTER_BRIDGE_HEAD_CHARS = 300 # 章首略览 + PREV_CHAPTER_BRIDGE_TAIL_CHARS = 2000 # 章末完整保留(原 1200 → 2000) OLDER_CHAPTER_HEAD_PREVIEW_CHARS = 500 def __init__( diff --git a/application/engine/services/context_builder.py b/application/engine/services/context_builder.py index a4e189c0d..270b7f8b0 100644 --- a/application/engine/services/context_builder.py +++ b/application/engine/services/context_builder.py @@ -269,14 +269,18 @@ def magnify_outline_to_beats(self, chapter_number: int, outline: str, target_cha ), ] - # 调整字数分配 + # 调整字数分配:尽量贴近目标字数,避免系统性写不满 total_words = sum(b.target_words for b in beats) + prompt_target_ratio = 0.95 if total_words != target_chapter_words: - ratio = target_chapter_words / total_words + ratio = (target_chapter_words * prompt_target_ratio) / total_words for beat in beats: beat.target_words = int(beat.target_words * ratio) - logger.info(f"节拍放大器:将大纲拆分为 {len(beats)} 个节拍") + logger.info( + f"节拍放大器:将大纲拆分为 {len(beats)} 个节拍," + f"prompt 目标 {sum(b.target_words for b in beats)} 字(实际目标 {target_chapter_words} 字的 {int(prompt_target_ratio * 100)}%)" + ) return beats # 节拍聚焦指令已迁移至 prompts_defaults.json (id=beat-focus-instructions) diff --git a/application/reader/__init__.py b/application/reader/__init__.py new file mode 100644 index 000000000..fee2ec493 --- /dev/null +++ b/application/reader/__init__.py @@ -0,0 +1,11 @@ +"""读者模拟 Agent 模块 + +模拟不同类型读者(硬核粉、休闲读者、挑刺党)阅读每章后的反馈, +输出悬疑保持度、爽感评分、劝退风险、情感共鸣度等多维度评估。 + +核心组件: +- ReaderSimulationService: 读者模拟分析服务(LLM 驱动) +- ReaderPersona: 读者人设枚举(硬核粉/休闲读者/挑刺党) +- ReaderFeedback: 单个读者视角的反馈结果 +- ChapterReaderReport: 章节级的综合读者报告 +""" diff --git a/application/reader/dtos/__init__.py b/application/reader/dtos/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/application/reader/dtos/reader_feedback_dto.py b/application/reader/dtos/reader_feedback_dto.py new file mode 100644 index 000000000..3fd090474 --- /dev/null +++ b/application/reader/dtos/reader_feedback_dto.py @@ -0,0 +1,99 @@ +"""读者模拟反馈 DTO — 面向 API 层的序列化模型。""" +from __future__ import annotations + +from dataclasses import dataclass, field +from datetime import datetime +from typing import Any, Dict, List, Optional + + +@dataclass +class ReaderDimensionScoresDTO: + """四维度评分""" + suspense_retention: float = 50.0 + thrill_score: float = 50.0 + churn_risk: float = 30.0 + emotional_resonance: float = 50.0 + + def to_dict(self) -> Dict[str, float]: + return { + "suspense_retention": round(self.suspense_retention, 1), + "thrill_score": round(self.thrill_score, 1), + "churn_risk": round(self.churn_risk, 1), + "emotional_resonance": round(self.emotional_resonance, 1), + } + + +@dataclass +class ReaderFeedbackDTO: + """单个读者人设的反馈""" + persona: str # hardcore / casual / nitpicker + persona_label: str # 硬核粉 / 休闲读者 / 挑刺党 + scores: ReaderDimensionScoresDTO + one_line_verdict: str = "" + highlights: List[str] = field(default_factory=list) + pain_points: List[str] = field(default_factory=list) + suggestions: List[str] = field(default_factory=list) + + def to_dict(self) -> Dict[str, Any]: + return { + "persona": self.persona, + "persona_label": self.persona_label, + "scores": self.scores.to_dict(), + "one_line_verdict": self.one_line_verdict, + "highlights": self.highlights, + "pain_points": self.pain_points, + "suggestions": self.suggestions, + } + + +PERSONA_LABELS = { + "hardcore": "硬核粉", + "casual": "休闲读者", + "nitpicker": "挑刺党", +} + + +@dataclass +class ChapterReaderReportDTO: + """章节级读者模拟报告""" + novel_id: str + chapter_number: int + feedbacks: List[ReaderFeedbackDTO] = field(default_factory=list) + overall_readability: float = 50.0 + chapter_hook_strength: str = "medium" + pacing_verdict: str = "" + analyzed_at: Optional[datetime] = None + # 降级标识:True 表示 LLM 调用失败或解析失败,所有评分为默认值 + # 该字段用于 API 层判断是否持久化、返回什么 HTTP 状态码 + is_fallback: bool = False + # 降级原因(仅 is_fallback=True 时填充) + error_message: str = "" + + def to_dict(self) -> Dict[str, Any]: + return { + "novel_id": self.novel_id, + "chapter_number": self.chapter_number, + "feedbacks": [f.to_dict() for f in self.feedbacks], + "overall_readability": round(self.overall_readability, 1), + "chapter_hook_strength": self.chapter_hook_strength, + "pacing_verdict": self.pacing_verdict, + "analyzed_at": self.analyzed_at.isoformat() if self.analyzed_at else None, + # 便捷聚合:三个读者的平均分 + "avg_scores": self._compute_avg_scores(), + "is_fallback": self.is_fallback, + "error_message": self.error_message, + } + + def _compute_avg_scores(self) -> Dict[str, float]: + if not self.feedbacks: + return { + "suspense_retention": 0, "thrill_score": 0, + "churn_risk": 0, "emotional_resonance": 0, + } + n = len(self.feedbacks) + return { + "suspense_retention": round(sum(f.scores.suspense_retention for f in self.feedbacks) / n, 1), + "thrill_score": round(sum(f.scores.thrill_score for f in self.feedbacks) / n, 1), + "churn_risk": round(sum(f.scores.churn_risk for f in self.feedbacks) / n, 1), + "emotional_resonance": round(sum(f.scores.emotional_resonance for f in self.feedbacks) / n, 1), + } diff --git a/application/reader/schema.py b/application/reader/schema.py new file mode 100644 index 000000000..b88b7b988 --- /dev/null +++ b/application/reader/schema.py @@ -0,0 +1,138 @@ +"""读者模拟 Agent — LLM 输出的 Pydantic 结构化模型。 + +与 prompt 约定的 JSON 字段一致;额外字段忽略。 +""" +from __future__ import annotations + +from typing import List, Optional + +from pydantic import BaseModel, ConfigDict, Field, field_validator + + +class ReaderDimensionScores(BaseModel): + """单个读者视角的四维度评分""" + + model_config = ConfigDict(extra="ignore") + + suspense_retention: float = Field( + default=50.0, + description="悬疑保持度 (0-100): 本章是否让读者产生「接下来会怎样」的好奇", + ) + thrill_score: float = Field( + default=50.0, + description="爽感评分 (0-100): 本章是否提供了令人满足的情绪高潮或反转", + ) + churn_risk: float = Field( + default=30.0, + description="劝退风险 (0-100): 读者在本章后弃书的概率,越低越好", + ) + emotional_resonance: float = Field( + default=50.0, + description="情感共鸣度 (0-100): 本章是否触动读者情感", + ) + + @field_validator( + "suspense_retention", "thrill_score", "churn_risk", "emotional_resonance", + mode="before", + ) + @classmethod + def clamp_score(cls, value: object) -> float: + """将评分归一到 0-100 范围。""" + if value is None: + return 50.0 + try: + v = float(value) + except (TypeError, ValueError): + return 50.0 + return max(0.0, min(100.0, v)) + + +class SingleReaderFeedbackPayload(BaseModel): + """单个读者人设的 LLM 输出""" + + model_config = ConfigDict(extra="ignore") + + persona: str = Field(description="读者人设标识: hardcore / casual / nitpicker") + scores: ReaderDimensionScores + one_line_verdict: str = Field( + default="", + description="一句话总评(口语化,带该读者的语气特色)", + ) + highlights: List[str] = Field( + default_factory=list, + description="本章亮点(该读者视角)", + ) + pain_points: List[str] = Field( + default_factory=list, + description="本章痛点 / 劝退点", + ) + suggestions: List[str] = Field( + default_factory=list, + description="改进建议", + ) + + @field_validator("persona", mode="before") + @classmethod + def normalize_persona(cls, value: object) -> str: + if value is None: + return "casual" + raw = str(value).strip().lower() + mapping = { + "hardcore": "hardcore", + "硬核粉": "hardcore", + "硬核": "hardcore", + "casual": "casual", + "休闲读者": "casual", + "休闲": "casual", + "nitpicker": "nitpicker", + "挑刺党": "nitpicker", + "挑刺": "nitpicker", + } + return mapping.get(raw, raw) + + +class ReaderSimulationLlmPayload(BaseModel): + """完整的 LLM 输出——包含三个读者视角的反馈""" + + model_config = ConfigDict(extra="ignore") + + feedbacks: List[SingleReaderFeedbackPayload] = Field( + default_factory=list, + description="三个读者人设的反馈列表", + ) + overall_readability: float = Field( + default=50.0, + description="综合可读性 (0-100)", + ) + chapter_hook_strength: str = Field( + default="medium", + description="章末钩子强度: weak / medium / strong", + ) + pacing_verdict: str = Field( + default="", + description="节奏总评(一句话)", + ) + + @field_validator("overall_readability", mode="before") + @classmethod + def clamp_readability(cls, value: object) -> float: + if value is None: + return 50.0 + try: + v = float(value) + except (TypeError, ValueError): + return 50.0 + return max(0.0, min(100.0, v)) + + @field_validator("chapter_hook_strength", mode="before") + @classmethod + def normalize_hook(cls, value: object) -> str: + if value is None: + return "medium" + raw = str(value).strip().lower() + mapping = { + "weak": "weak", "弱": "weak", "w": "weak", + "medium": "medium", "中": "medium", "m": "medium", + "strong": "strong", "强": "strong", "s": "strong", + } + return mapping.get(raw, "medium") diff --git a/application/reader/services/__init__.py b/application/reader/services/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/application/reader/services/reader_simulation_service.py b/application/reader/services/reader_simulation_service.py new file mode 100644 index 000000000..aa850b36a --- /dev/null +++ b/application/reader/services/reader_simulation_service.py @@ -0,0 +1,373 @@ +"""读者模拟 Agent 服务 — 模拟三类读者视角评估章节质量 + +核心流程: +1. 加载章节正文 + 上下文(前一章摘要、大纲) +2. 构建包含三个读者人设的 Prompt +3. 调用 LLM 获取结构化 JSON 反馈 +4. 解析并转为 DTO 返回 + +三类读者人设: +- 硬核粉 (hardcore): 深度追更、关注伏笔/世界观一致性、不容忍逻辑漏洞 +- 休闲读者 (casual): 碎片时间阅读、追求爽感和节奏、耐心有限 +- 挑刺党 (nitpicker): 关注文笔/表达、指出陈词滥调、对重复描写敏感 +""" +from __future__ import annotations + +import logging +from datetime import datetime +from typing import Dict, List, Optional + +from application.ai.structured_json_pipeline import ( + parse_and_repair_json, + sanitize_llm_output, + validate_json_schema, +) +from application.reader.schema import ( + ReaderSimulationLlmPayload, + SingleReaderFeedbackPayload, +) +from application.reader.dtos.reader_feedback_dto import ( + ChapterReaderReportDTO, + ReaderDimensionScoresDTO, + ReaderFeedbackDTO, + PERSONA_LABELS, +) +from domain.novel.repositories.chapter_repository import ChapterRepository +from domain.novel.value_objects.novel_id import NovelId + +logger = logging.getLogger(__name__) + +_CHAPTER_EXCERPT_MAX_CHARS = 6000 +_DEFAULT_MAX_TOKENS = 4096 +_DEFAULT_TEMPERATURE = 0.4 + + +def _excerpt(text: str, max_chars: int = _CHAPTER_EXCERPT_MAX_CHARS) -> str: + """截断过长正文,保留头尾。""" + stripped = (text or "").strip() + if len(stripped) <= max_chars: + return stripped + half = max_chars // 2 + return stripped[:half] + "\n…(正文过长,已截取首尾)…\n" + stripped[-half:] + + +class ReaderSimulationService: + """读者模拟 Agent 服务""" + + def __init__( + self, + chapter_repository: ChapterRepository, + llm_client, + knowledge_repository=None, + ) -> None: + self._chapter_repo = chapter_repository + self._llm_client = llm_client + self._knowledge_repo = knowledge_repository + + async def simulate( + self, + novel_id: str, + chapter_number: int, + ) -> ChapterReaderReportDTO: + """对指定章节运行三类读者模拟。 + + Args: + novel_id: 小说 ID + chapter_number: 章节号 + + Returns: + ChapterReaderReportDTO 包含三个读者视角的反馈 + """ + novel_id_vo = NovelId(value=novel_id) + chapters = self._chapter_repo.list_by_novel(novel_id_vo) + current = next((c for c in chapters if c.number == chapter_number), None) + if current is None: + return self._empty_report(novel_id, chapter_number, "章节不存在") + + content = (current.content or "").strip() + if not content: + return self._empty_report(novel_id, chapter_number, "章节内容为空") + + # 收集上下文 + prev_chapter = next((c for c in chapters if c.number == chapter_number - 1), None) + next_chapter = next((c for c in chapters if c.number == chapter_number + 1), None) + + # 尝试获取章节摘要 + prev_summary = "" + if self._knowledge_repo and prev_chapter: + try: + knowledge = self._knowledge_repo.get_by_novel_id(novel_id) + if knowledge: + ch_sum = knowledge.get_chapter(chapter_number - 1) + if ch_sum: + prev_summary = ch_sum.summary or "" + except Exception: + pass + + context = self._build_context( + current_content=content, + current_outline=current.outline or "", + current_title=current.title or f"第{chapter_number}章", + chapter_number=chapter_number, + prev_summary=prev_summary, + prev_content=_excerpt(prev_chapter.content, 2000) if prev_chapter else "", + next_exists=next_chapter is not None, + tension_score=current.tension_score, + ) + + prompt = self._build_prompt(context) + + # LLM 调用隔离:网络错误/超时/认证失败等均转为降级报告, + # 让上层 API 明确感知 LLM 失败而非被通用 500 掩盖。 + try: + response = await self._llm_client.generate(prompt) + except Exception as e: + logger.error( + "读者模拟 LLM 调用失败 novel=%s ch=%d: %s", + novel_id, chapter_number, e, + ) + return self._empty_report( + novel_id, chapter_number, + f"LLM 调用失败: {type(e).__name__}: {e}", + ) + + report = self._parse_response(novel_id, chapter_number, response) + return report + + def _build_context( + self, + current_content: str, + current_outline: str, + current_title: str, + chapter_number: int, + prev_summary: str, + prev_content: str, + next_exists: bool, + tension_score: float, + ) -> Dict[str, str]: + """组装供 prompt 使用的上下文数据。""" + parts = { + "chapter_title": current_title, + "chapter_number": str(chapter_number), + "chapter_content": _excerpt(current_content), + "chapter_outline": current_outline, + "tension_score": f"{tension_score:.0f}", + "has_next": "是" if next_exists else "否(本章是最新章)", + } + if prev_summary: + parts["prev_summary"] = prev_summary + elif prev_content: + parts["prev_summary"] = prev_content + return parts + + def _build_prompt(self, ctx: Dict[str, str]) -> str: + prev_block = "" + if ctx.get("prev_summary"): + prev_block = f"\n上一章摘要/片段:\n{ctx['prev_summary']}\n" + + outline_block = "" + if ctx.get("chapter_outline"): + outline_block = f"\n本章大纲:\n{ctx['chapter_outline']}\n" + + return f"""你是一位专业的小说质量分析师,需要模拟三种不同类型的读者来评估章节质量。 + +=== 三种读者人设 === + +1. **硬核粉 (hardcore)** + - 从第一章追到现在的深度读者 + - 关注伏笔回收、世界观一致性、角色成长合理性 + - 不容忍逻辑漏洞和人设崩塌 + - 对「填坑」和「前后呼应」特别敏感 + - 语气:认真、细致、偶尔兴奋 + +2. **休闲读者 (casual)** + - 碎片时间阅读,可能跳着看 + - 追求「爽感」和情节推进速度 + - 耐心有限——3段无高潮就想划走 + - 对信息密度过高、铺垫过长容易疲倦 + - 语气:随意、直接、"看得爽就行" + +3. **挑刺党 (nitpicker)** + - 文笔鉴赏家,关注遣词造句质量 + - 对陈词滥调("不由自主"、"嘴角上扬")过敏 + - 指出重复描写、水字数、逻辑硬伤 + - 会对比同类作品打分 + - 语气:犀利、挑剔、有理有据 + +=== 待评估章节 === + +标题: {ctx['chapter_title']}(第{ctx['chapter_number']}章) +系统张力评分: {ctx['tension_score']}/100 +是否有下一章: {ctx['has_next']} +{prev_block}{outline_block} +正文: +{ctx['chapter_content']} + +=== 评估要求 === + +请从三个读者视角分别评分并给出反馈。 + +**四个维度** (每项 0-100): +- **suspense_retention** (悬疑保持度): 读完本章后是否想知道"接下来会怎样" +- **thrill_score** (爽感评分): 本章是否提供了令人满足的情绪高潮、反转或爽点 +- **churn_risk** (劝退风险): 读者在本章后放弃此书的概率(0=绝不弃书, 100=必弃) +- **emotional_resonance** (情感共鸣度): 本章是否触动了读者情感 + +另外给出: +- **overall_readability** (综合可读性 0-100) +- **chapter_hook_strength** (章末钩子强度: weak/medium/strong) +- **pacing_verdict** (节奏总评,一句话) + +请以 JSON 格式返回: +{{ + "feedbacks": [ + {{ + "persona": "hardcore", + "scores": {{ + "suspense_retention": 75, + "thrill_score": 60, + "churn_risk": 15, + "emotional_resonance": 70 + }}, + "one_line_verdict": "一句话总评(带该读者的口吻)", + "highlights": ["亮点1", "亮点2"], + "pain_points": ["痛点1"], + "suggestions": ["建议1"] + }}, + {{ + "persona": "casual", + "scores": {{ ... }}, + "one_line_verdict": "...", + "highlights": [...], + "pain_points": [...], + "suggestions": [...] + }}, + {{ + "persona": "nitpicker", + "scores": {{ ... }}, + "one_line_verdict": "...", + "highlights": [...], + "pain_points": [...], + "suggestions": [...] + }} + ], + "overall_readability": 72, + "chapter_hook_strength": "strong", + "pacing_verdict": "节奏总评一句话" +}}""" + + def _parse_response( + self, + novel_id: str, + chapter_number: int, + response: str, + ) -> ChapterReaderReportDTO: + """解析 LLM 响应为 DTO。""" + cleaned = sanitize_llm_output(response) + data, parse_errors = parse_and_repair_json(cleaned) + + if data is None: + logger.warning( + "读者模拟 JSON 解析失败 novel=%s ch=%d: %s", + novel_id, chapter_number, "; ".join(parse_errors[:4]), + ) + return self._empty_report( + novel_id, chapter_number, + "LLM 返回无法解析: " + "; ".join(parse_errors[:2]), + ) + + payload, schema_errors = validate_json_schema( + data, ReaderSimulationLlmPayload, + ) + + if payload is None: + logger.warning( + "读者模拟 Schema 校验失败 novel=%s ch=%d: %s", + novel_id, chapter_number, "; ".join(schema_errors[:4]), + ) + return self._empty_report( + novel_id, chapter_number, + "JSON 结构校验失败: " + "; ".join(schema_errors[:2]), + ) + + # 空响应保护:LLM 可能返回空对象、空字符串或拒答, + # 这种情况下 payload.feedbacks 为空但 schema 能过(所有字段都有默认值)。 + # 此时应判定为降级而非假成功,避免 API 层返回空报告却宣称成功。 + if not payload.feedbacks: + logger.warning( + "读者模拟 LLM 返回空 feedbacks novel=%s ch=%d(可能是密钥缺失/模型拒答)", + novel_id, chapter_number, + ) + preview = (response or "").strip()[:200] or "(空响应)" + return self._empty_report( + novel_id, chapter_number, + f"LLM 返回无有效读者反馈(响应预览: {preview})", + ) + + feedbacks = [] + for fb in payload.feedbacks: + feedbacks.append(ReaderFeedbackDTO( + persona=fb.persona, + persona_label=PERSONA_LABELS.get(fb.persona, fb.persona), + scores=ReaderDimensionScoresDTO( + suspense_retention=fb.scores.suspense_retention, + thrill_score=fb.scores.thrill_score, + churn_risk=fb.scores.churn_risk, + emotional_resonance=fb.scores.emotional_resonance, + ), + one_line_verdict=fb.one_line_verdict, + highlights=list(fb.highlights), + pain_points=list(fb.pain_points), + suggestions=list(fb.suggestions), + )) + + # 确保三个人设都有(缺失时填默认) + existing_personas = {f.persona for f in feedbacks} + for persona_key in ("hardcore", "casual", "nitpicker"): + if persona_key not in existing_personas: + feedbacks.append(ReaderFeedbackDTO( + persona=persona_key, + persona_label=PERSONA_LABELS.get(persona_key, persona_key), + scores=ReaderDimensionScoresDTO(), + one_line_verdict="(该读者视角的反馈未能生成)", + )) + + return ChapterReaderReportDTO( + novel_id=novel_id, + chapter_number=chapter_number, + feedbacks=feedbacks, + overall_readability=payload.overall_readability, + chapter_hook_strength=payload.chapter_hook_strength, + pacing_verdict=payload.pacing_verdict, + analyzed_at=datetime.utcnow(), + ) + + @staticmethod + def _empty_report( + novel_id: str, + chapter_number: int, + reason: str, + ) -> ChapterReaderReportDTO: + """生成空报告(用于异常/降级分支)。 + + 所有降级分支(章节不存在、LLM 失败、JSON 解析失败、Schema 校验失败) + 均走此入口,标记 is_fallback=True 让 API 层能精准识别并拒绝持久化 + 假数据。 + """ + feedbacks = [] + for persona_key in ("hardcore", "casual", "nitpicker"): + feedbacks.append(ReaderFeedbackDTO( + persona=persona_key, + persona_label=PERSONA_LABELS.get(persona_key, persona_key), + scores=ReaderDimensionScoresDTO(), + one_line_verdict=reason, + )) + return ChapterReaderReportDTO( + novel_id=novel_id, + chapter_number=chapter_number, + feedbacks=feedbacks, + pacing_verdict=reason, + analyzed_at=datetime.utcnow(), + is_fallback=True, + error_message=reason, + ) diff --git a/application/services/novel_service.py b/application/services/novel_service.py new file mode 100644 index 000000000..5bbc539b5 --- /dev/null +++ b/application/services/novel_service.py @@ -0,0 +1,5 @@ +"""Compatibility import for legacy tests and integrations.""" + +from application.core.services.novel_service import NovelService + +__all__ = ["NovelService"] diff --git a/application/style_bible/__init__.py b/application/style_bible/__init__.py new file mode 100644 index 000000000..599965a2b --- /dev/null +++ b/application/style_bible/__init__.py @@ -0,0 +1 @@ +"""写作手法知识库应用层。""" diff --git a/application/style_bible/dtos.py b/application/style_bible/dtos.py new file mode 100644 index 000000000..89eedb437 --- /dev/null +++ b/application/style_bible/dtos.py @@ -0,0 +1,117 @@ +"""写作手法知识库 DTO。""" +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Any, Optional + + +@dataclass +class StyleSampleImportRequestDTO: + title: str + content: str + source_type: str = "reference" + genre: str = "" + scene_type: str = "" + pov: str = "" + allowed_for_generation: bool = False + novel_id: str = "" + profile_id: str = "" + create_profile: bool = False + profile_name: str = "" + + +@dataclass +class StyleProfileGenerateRequestDTO: + novel_id: str = "" + name: str = "写作手法档案" + description: str = "" + sample_ids: list[str] = field(default_factory=list) + use_llm: bool = False + llm_profile_id: str = "" + + +@dataclass +class StyleSampleDTO: + id: str + title: str + content: str + source_type: str + genre: str + scene_type: str + pov: str + allowed_for_generation: bool + novel_id: str + profile_id: str + content_hash: str + char_count: int + + +@dataclass +class StyleChunkDTO: + id: str + sample_id: str + chunk_type: str + sequence: int + chapter_number: int + title: str + content: str + char_count: int + metrics: dict[str, Any] = field(default_factory=dict) + + +@dataclass +class StyleTechniqueCardDTO: + id: str + profile_id: str + title: str + category: str + scene_type: str + rule_text: str + example_summary: str + prompt_instruction: str + enabled: bool + weight: float + + +@dataclass +class StyleProfileDTO: + id: str + name: str + description: str + status: str + novel_id: str + profile: dict[str, Any] + metrics: dict[str, Any] + rules: list[Any] + forbidden_patterns: list[str] + version: int + + +@dataclass +class StyleSampleImportResultDTO: + sample: StyleSampleDTO + chunks: list[StyleChunkDTO] + profile: Optional[StyleProfileDTO] = None + cards: list[StyleTechniqueCardDTO] = field(default_factory=list) + + +@dataclass +class StyleProfileGenerateResultDTO: + profile: StyleProfileDTO + cards: list[StyleTechniqueCardDTO] + + +@dataclass +class StyleProfileMatchReportDTO: + profile_id: str + score: float + metrics: dict[str, Any] = field(default_factory=dict) + issues: list[str] = field(default_factory=list) + + +@dataclass +class StylePromptOverlayDTO: + prompt: str + profile_id: str = "" + profile_name: str = "" + card_ids: list[str] = field(default_factory=list) diff --git a/application/style_bible/services/__init__.py b/application/style_bible/services/__init__.py new file mode 100644 index 000000000..cfeaa9ad2 --- /dev/null +++ b/application/style_bible/services/__init__.py @@ -0,0 +1 @@ +"""写作手法知识库应用服务。""" diff --git a/application/style_bible/services/style_metric_analyzer.py b/application/style_bible/services/style_metric_analyzer.py new file mode 100644 index 000000000..f77c56f2a --- /dev/null +++ b/application/style_bible/services/style_metric_analyzer.py @@ -0,0 +1,247 @@ +"""写作手法知识库风格指标分析。""" +from __future__ import annotations + +import re +from typing import Any + +from application.audit.services.cliche_scanner import ClicheScanner + + +class StyleMetricAnalyzer: + """用确定性启发式提取可用于提示词的写作指标。""" + + SENTENCE_RE = re.compile(r"[^。!?!?]+[。!?!?]?") + QUOTED_DIALOGUE_RE = re.compile(r"[“「『](.*?)[”」』]") + BLANK_LINE_RE = re.compile(r"\n\s*\n+") + + DIALOGUE_MARKERS = ("说", "问", "答", "喊", "低声", "开口", "道") + ACTION_MARKERS = ("走", "推", "拉", "抬", "转", "握", "冲", "停", "看", "拿") + PSYCHOLOGY_MARKERS = ("想", "觉得", "意识到", "心里", "脑海", "明白", "害怕", "犹豫") + ENVIRONMENT_MARKERS = ("雨", "风", "灯", "门", "窗", "街", "夜", "屋", "楼", "光", "影") + + def __init__(self, cliche_scanner: ClicheScanner | None = None): + self.cliche_scanner = cliche_scanner or ClicheScanner() + + def analyze(self, text: str) -> dict[str, Any]: + content = (text or "").strip() + if not content: + return self._empty_metrics(sample_count=1) + + paragraphs = self._split_paragraphs(content) + sentences = self._split_sentences(content) + sentence_count = len(sentences) + char_count = len(re.sub(r"\s+", "", content)) + dialogue_chars = self._dialogue_char_count(content, sentences) + cliche_hits = self.cliche_scanner.scan_cliches(content) + + return { + "sample_count": 1, + "char_count": char_count, + "sentence_count": sentence_count, + "paragraph_count": len(paragraphs), + "avg_sentence_length": round(char_count / sentence_count, 2) + if sentence_count + else 0.0, + "avg_paragraph_length": round(char_count / len(paragraphs), 2) + if paragraphs + else 0.0, + "dialogue_ratio": self._ratio(dialogue_chars, char_count), + "action_ratio": self._sentence_ratio(sentences, self.ACTION_MARKERS), + "psychology_ratio": self._sentence_ratio(sentences, self.PSYCHOLOGY_MARKERS), + "environment_ratio": self._sentence_ratio(sentences, self.ENVIRONMENT_MARKERS), + "cliche_hit_count": len(cliche_hits), + "cliche_patterns": sorted({hit.pattern for hit in cliche_hits}), + "hook_score": self._hook_score(sentences), + } + + def aggregate(self, metrics_list: list[dict[str, Any]]) -> dict[str, Any]: + valid_metrics = [metrics for metrics in metrics_list if metrics] + if not valid_metrics: + return self._empty_metrics(sample_count=0) + + sample_count = len(valid_metrics) + char_count = sum(int(metrics.get("char_count") or 0) for metrics in valid_metrics) + sentence_count = sum( + int(metrics.get("sentence_count") or 0) for metrics in valid_metrics + ) + paragraph_count = sum( + int(metrics.get("paragraph_count") or 0) for metrics in valid_metrics + ) + cliche_patterns: set[str] = set() + for metrics in valid_metrics: + cliche_patterns.update(str(item) for item in metrics.get("cliche_patterns") or []) + + return { + "sample_count": sample_count, + "char_count": char_count, + "sentence_count": sentence_count, + "paragraph_count": paragraph_count, + "avg_sentence_length": round(char_count / sentence_count, 2) + if sentence_count + else 0.0, + "avg_paragraph_length": round(char_count / paragraph_count, 2) + if paragraph_count + else 0.0, + "dialogue_ratio": self._weighted_average(valid_metrics, "dialogue_ratio"), + "action_ratio": self._weighted_average(valid_metrics, "action_ratio"), + "psychology_ratio": self._weighted_average(valid_metrics, "psychology_ratio"), + "environment_ratio": self._weighted_average(valid_metrics, "environment_ratio"), + "cliche_hit_count": sum( + int(metrics.get("cliche_hit_count") or 0) for metrics in valid_metrics + ), + "cliche_patterns": sorted(cliche_patterns), + "hook_score": self._weighted_average(valid_metrics, "hook_score"), + } + + def match_profile( + self, + text: str, + profile_metrics: dict[str, Any], + forbidden_patterns: list[str] | None = None, + ) -> dict[str, Any]: + """计算正文与风格档案的基础匹配度。""" + metrics = self.analyze(text) + issues: list[str] = [] + score = 100.0 + + score -= self._metric_penalty( + metrics, + profile_metrics, + "avg_sentence_length", + tolerance=0.35, + label="平均句长", + issues=issues, + ) + score -= self._metric_penalty( + metrics, + profile_metrics, + "avg_paragraph_length", + tolerance=0.45, + label="段落长度", + issues=issues, + ) + score -= self._metric_penalty( + metrics, + profile_metrics, + "dialogue_ratio", + tolerance=0.4, + label="对白占比", + issues=issues, + max_penalty=14.0, + ) + + if metrics["cliche_hit_count"] > 0: + penalty = min(18.0, metrics["cliche_hit_count"] * 4.0) + score -= penalty + issues.append(f"AI 套话命中 {metrics['cliche_hit_count']} 处") + + forbidden_hits = self._forbidden_hits(text, forbidden_patterns or []) + if forbidden_hits: + score -= min(20.0, len(forbidden_hits) * 5.0) + issues.append("命中禁用表达:" + "、".join(forbidden_hits[:5])) + + return { + "score": round(max(0.0, min(100.0, score)), 1), + "metrics": metrics, + "issues": issues, + "forbidden_hits": forbidden_hits, + } + + def _split_paragraphs(self, text: str) -> list[str]: + return [ + paragraph.strip() + for paragraph in self.BLANK_LINE_RE.split(text.strip()) + if paragraph.strip() + ] + + def _split_sentences(self, text: str) -> list[str]: + return [ + sentence.strip() + for sentence in self.SENTENCE_RE.findall(text) + if sentence.strip() + ] + + def _dialogue_char_count(self, text: str, sentences: list[str]) -> int: + quoted_count = sum(len(match.group(1)) for match in self.QUOTED_DIALOGUE_RE.finditer(text)) + marker_count = sum( + len(sentence) + for sentence in sentences + if any(marker in sentence for marker in self.DIALOGUE_MARKERS) + ) + return max(quoted_count, marker_count) + + def _sentence_ratio(self, sentences: list[str], markers: tuple[str, ...]) -> float: + if not sentences: + return 0.0 + count = sum(1 for sentence in sentences if any(marker in sentence for marker in markers)) + return round(count / len(sentences), 4) + + def _hook_score(self, sentences: list[str]) -> float: + if not sentences: + return 0.0 + ending = sentences[-1] + markers = ("?", "?", "忽然", "却", "不是", "门", "人影", "声音") + return 1.0 if any(marker in ending for marker in markers) else 0.0 + + @staticmethod + def _ratio(part: int, whole: int) -> float: + return round(part / whole, 4) if whole else 0.0 + + @staticmethod + def _weighted_average(metrics_list: list[dict[str, Any]], key: str) -> float: + total_weight = sum(int(metrics.get("char_count") or 0) for metrics in metrics_list) + if total_weight <= 0: + return 0.0 + total = sum( + float(metrics.get(key) or 0.0) * int(metrics.get("char_count") or 0) + for metrics in metrics_list + ) + return round(total / total_weight, 4) + + @staticmethod + def _metric_penalty( + metrics: dict[str, Any], + baseline: dict[str, Any], + key: str, + *, + tolerance: float, + label: str, + issues: list[str], + max_penalty: float = 16.0, + ) -> float: + current = float(metrics.get(key) or 0.0) + expected = float(baseline.get(key) or 0.0) + if current <= 0 or expected <= 0: + return 0.0 + drift = abs(current - expected) / max(expected, 1e-6) + if drift <= tolerance: + return 0.0 + issues.append(f"{label}偏离样本:当前 {current:.2f},目标 {expected:.2f}") + return min(max_penalty, (drift - tolerance) * 30.0) + + @staticmethod + def _forbidden_hits(text: str, patterns: list[str]) -> list[str]: + hits: list[str] = [] + for pattern in patterns: + item = str(pattern or "").strip() + if item and item in text and item not in hits: + hits.append(item) + return hits + + @staticmethod + def _empty_metrics(sample_count: int) -> dict[str, Any]: + return { + "sample_count": sample_count, + "char_count": 0, + "sentence_count": 0, + "paragraph_count": 0, + "avg_sentence_length": 0.0, + "avg_paragraph_length": 0.0, + "dialogue_ratio": 0.0, + "action_ratio": 0.0, + "psychology_ratio": 0.0, + "environment_ratio": 0.0, + "cliche_hit_count": 0, + "cliche_patterns": [], + "hook_score": 0.0, + } diff --git a/application/style_bible/services/style_profile_service.py b/application/style_bible/services/style_profile_service.py new file mode 100644 index 000000000..03c22b8ad --- /dev/null +++ b/application/style_bible/services/style_profile_service.py @@ -0,0 +1,429 @@ +"""写作手法档案生成服务。""" +from __future__ import annotations + +import logging +from typing import Any, Callable, Optional + +from application.style_bible.dtos import ( + StyleChunkDTO, + StyleProfileDTO, + StyleProfileGenerateRequestDTO, + StyleProfileGenerateResultDTO, + StyleProfileMatchReportDTO, + StyleSampleDTO, + StyleSampleImportRequestDTO, + StyleSampleImportResultDTO, + StyleTechniqueCardDTO, +) +from application.style_bible.services.style_metric_analyzer import StyleMetricAnalyzer +from application.style_bible.services.text_splitter import StyleTextSplitter +from domain.style_bible.entities import ( + StyleProfile, + StyleSample, + StyleTechniqueCard, +) +from domain.style_bible.repositories import StyleBibleRepository + + +LlmExtractor = Callable[[list[StyleSample], dict[str, Any], str], dict[str, Any]] +logger = logging.getLogger(__name__) + + +class StyleProfileService: + """协调样本导入、指标分析和风格档案生成。""" + + def __init__( + self, + repository: StyleBibleRepository, + splitter: Optional[StyleTextSplitter] = None, + analyzer: Optional[StyleMetricAnalyzer] = None, + llm_extractor: Optional[LlmExtractor] = None, + ): + self.repository = repository + self.splitter = splitter or StyleTextSplitter() + self.analyzer = analyzer or StyleMetricAnalyzer() + self.llm_extractor = llm_extractor + + def import_sample( + self, + request: StyleSampleImportRequestDTO, + ) -> StyleSampleImportResultDTO: + sample = StyleSample( + title=request.title, + content=request.content, + source_type=request.source_type, + genre=request.genre, + scene_type=request.scene_type, + pov=request.pov, + allowed_for_generation=request.allowed_for_generation, + novel_id=request.novel_id, + profile_id=request.profile_id, + ) + chunks = self.splitter.split(sample.id, sample.content) + for chunk in chunks: + chunk.metrics = self.analyzer.analyze(chunk.content) + + saved_sample = self.repository.save_sample(sample, chunks) + profile_result: StyleProfileGenerateResultDTO | None = None + if request.create_profile: + profile_result = self.generate_profile_from_samples( + StyleProfileGenerateRequestDTO( + novel_id=request.novel_id, + name=request.profile_name or request.title, + sample_ids=[saved_sample.id], + use_llm=False, + ) + ) + + return StyleSampleImportResultDTO( + sample=self._sample_to_dto(saved_sample), + chunks=[self._chunk_to_dto(chunk) for chunk in chunks], + profile=profile_result.profile if profile_result else None, + cards=profile_result.cards if profile_result else [], + ) + + def generate_profile_from_samples( + self, + request: StyleProfileGenerateRequestDTO, + ) -> StyleProfileGenerateResultDTO: + samples = self._resolve_samples(request) + metrics = self.analyzer.aggregate( + [self.analyzer.analyze(sample.content) for sample in samples] + ) + payload = self._extract_llm_payload(request, samples, metrics) + if payload: + summary = payload["profile_summary"] + rhythm_rules = payload["rhythm_rules"] + forbidden_patterns = payload["forbidden_patterns"] + cards = self._cards_from_payload("", payload["technique_cards"]) + else: + summary = self._fallback_summary(metrics) + rhythm_rules = self._fallback_rules(metrics) + forbidden_patterns = self._fallback_forbidden_patterns(metrics) + cards = self._fallback_cards("", metrics, samples) + + profile = StyleProfile( + name=request.name, + description=request.description or summary, + novel_id=request.novel_id, + profile={ + "summary": summary, + "source_sample_ids": [sample.id for sample in samples], + }, + metrics=metrics, + rules=rhythm_rules, + forbidden_patterns=forbidden_patterns, + ) + saved_profile = self.repository.save_profile(profile) + + cards = [ + StyleTechniqueCard( + id=card.id, + profile_id=saved_profile.id, + title=card.title, + category=card.category, + scene_type=card.scene_type, + rule_text=card.rule_text, + example_summary=card.example_summary, + prompt_instruction=card.prompt_instruction, + enabled=card.enabled, + weight=card.weight, + created_at=card.created_at, + updated_at=card.updated_at, + ) + for card in cards + ] + saved_cards = self.repository.save_technique_cards(saved_profile.id, cards) + return StyleProfileGenerateResultDTO( + profile=self._profile_to_dto(saved_profile), + cards=[self._card_to_dto(card) for card in saved_cards], + ) + + def match_text( + self, + profile_id: str, + content: str, + novel_id: str = "", + ) -> StyleProfileMatchReportDTO: + """评估正文与指定写作手法档案的匹配度。""" + profile = self.repository.get_profile(profile_id) + if profile is None: + raise ValueError("style profile not found") + if novel_id and profile.novel_id and profile.novel_id != novel_id: + raise ValueError("style profile not found") + + report = self.analyzer.match_profile( + content, + profile.metrics, + profile.forbidden_patterns, + ) + return StyleProfileMatchReportDTO( + profile_id=profile.id, + score=float(report["score"]), + metrics=report["metrics"], + issues=report["issues"], + ) + + def normalize_llm_profile_payload(self, payload: dict[str, Any]) -> dict[str, Any]: + normalized_cards: list[dict[str, str]] = [] + for item in self._as_list(payload.get("technique_cards")): + if not isinstance(item, dict): + continue + card = { + "title": self._as_text(item.get("title")), + "category": self._as_text(item.get("category")) or "pacing", + "scene_type": self._as_text(item.get("scene_type")), + "rule_text": self._as_text(item.get("rule_text")), + "example_summary": self._as_text(item.get("example_summary")), + "prompt_instruction": self._as_text(item.get("prompt_instruction")), + } + if card["title"] and card["rule_text"] and card["prompt_instruction"]: + normalized_cards.append(card) + + return { + "profile_summary": self._as_text(payload.get("profile_summary")), + "rhythm_rules": [self._as_text(item) for item in self._as_list(payload.get("rhythm_rules")) if self._as_text(item)], + "forbidden_patterns": [ + self._as_text(item) + for item in self._as_list(payload.get("forbidden_patterns")) + if self._as_text(item) + ], + "technique_cards": normalized_cards, + } + + def _resolve_samples( + self, + request: StyleProfileGenerateRequestDTO, + ) -> list[StyleSample]: + samples: list[StyleSample] = [] + for sample_id in request.sample_ids: + sample = self.repository.get_sample(sample_id) + if sample is not None: + samples.append(sample) + if not samples and request.novel_id: + samples = self.repository.list_samples(novel_id=request.novel_id) + if not samples: + raise ValueError("No style samples available for profile generation") + return samples + + def _extract_llm_payload( + self, + request: StyleProfileGenerateRequestDTO, + samples: list[StyleSample], + metrics: dict[str, Any], + ) -> dict[str, Any] | None: + if not request.use_llm or self.llm_extractor is None: + return None + try: + payload = self.llm_extractor(samples, metrics, request.llm_profile_id) + except Exception as exc: + logger.warning("Style Bible LLM extraction failed: %s", exc, exc_info=True) + return None + if not isinstance(payload, dict): + logger.warning( + "Style Bible LLM extraction returned non-object payload: %s", + type(payload).__name__, + ) + return None + normalized = self.normalize_llm_profile_payload(payload) + if normalized["profile_summary"] and normalized["technique_cards"]: + return normalized + logger.warning( + "Style Bible LLM extraction payload incomplete: keys=%s summary=%s cards=%s", + sorted(payload.keys()), + bool(normalized["profile_summary"]), + len(normalized["technique_cards"]), + ) + return None + + def _fallback_summary(self, metrics: dict[str, Any]) -> str: + sentence_len = metrics.get("avg_sentence_length") or 0 + dialogue_ratio = metrics.get("dialogue_ratio") or 0 + return ( + f"平均句长约 {sentence_len:.1f} 字,对白占比约 {dialogue_ratio:.0%}," + "以可执行的节奏和动作细节学习样本写法。" + ) + + def _fallback_rules(self, metrics: dict[str, Any]) -> list[str]: + avg_paragraph = int(metrics.get("avg_paragraph_length") or 0) + avg_sentence = int(metrics.get("avg_sentence_length") or 0) + rules = [ + f"平均句长控制在 {max(8, avg_sentence - 4)}-{max(12, avg_sentence + 4)} 字附近", + f"段落以 {max(40, avg_paragraph - 80)}-{max(80, avg_paragraph + 80)} 字为主", + "每 600-900 字至少出现一次信息、关系或目标变化", + ] + if metrics.get("dialogue_ratio", 0) > 0: + rules.append("对白必须承担试探、冲突或信息推进,不写空泛寒暄") + return rules + + def _fallback_forbidden_patterns(self, metrics: dict[str, Any]) -> list[str]: + patterns = [str(item) for item in metrics.get("cliche_patterns") or []] + base = ["五味杂陈", "眼中闪过一丝复杂", "空气仿佛凝固"] + result: list[str] = [] + for item in patterns + base: + if item and item not in result: + result.append(item) + return result + + def _fallback_cards( + self, + profile_id: str, + metrics: dict[str, Any], + samples: list[StyleSample], + ) -> list[StyleTechniqueCard]: + sample_scene_type = next((sample.scene_type for sample in samples if sample.scene_type), "") + cards = [ + StyleTechniqueCard( + profile_id=profile_id or "pending", + title="节奏推进", + category="pacing", + scene_type=sample_scene_type, + rule_text="用短目标、动作和信息变化推动段落。", + example_summary="从样本句长和段落长度提取节奏边界。", + prompt_instruction="每 600-900 字安排一次信息、关系或目标变化,避免连续解释。", + weight=1.0, + ), + StyleTechniqueCard( + profile_id=profile_id or "pending", + title="对白试探", + category="dialogue", + scene_type=sample_scene_type, + rule_text="对白必须带有试探、反问、隐瞒或信息交换。", + example_summary="根据样本对白占比生成对白约束。", + prompt_instruction="对白不要只表达态度,每两轮对白释放一个新信息或改变关系压力。", + weight=0.9 if metrics.get("dialogue_ratio", 0) > 0 else 0.55, + ), + StyleTechniqueCard( + profile_id=profile_id or "pending", + title="去AI味禁用", + category="anti_ai", + scene_type="", + rule_text="禁用总结式抒情和常见套话。", + example_summary="结合样本或系统俗套扫描器形成禁用项。", + prompt_instruction="不要写五味杂陈、眼神复杂、空气凝固等套话;情绪必须落到动作、选择和对白上。", + weight=1.0 if metrics.get("cliche_hit_count", 0) > 0 else 0.7, + ), + StyleTechniqueCard( + profile_id=profile_id or "pending", + title="章尾钩子", + category="hook", + scene_type=sample_scene_type, + rule_text="章尾保留未解信息或关系压力。", + example_summary="根据样本末句疑问、转折或新信息形成钩子规则。", + prompt_instruction="结尾保留一个未确认事实、异常细节或关系压力,不要用总结收束。", + weight=0.85, + ), + ] + return cards + + def _cards_from_payload( + self, + profile_id: str, + cards_payload: list[dict[str, str]], + ) -> list[StyleTechniqueCard]: + return [ + StyleTechniqueCard( + profile_id=profile_id or "pending", + title=item["title"], + category=item["category"], + scene_type=item["scene_type"], + rule_text=item["rule_text"], + example_summary=item["example_summary"], + prompt_instruction=item["prompt_instruction"], + ) + for item in cards_payload + ] + + @staticmethod + def _as_text(value: Any) -> str: + if value is None: + return "" + if isinstance(value, str): + return value.strip() + if isinstance(value, dict): + return ";".join( + str(item).strip() for item in value.values() if str(item).strip() + ) + if isinstance(value, (list, tuple, set)): + return ";".join( + str(item).strip() for item in value if str(item).strip() + ) + return str(value).strip() + + @staticmethod + def _as_list(value: Any) -> list[Any]: + if value is None: + return [] + if isinstance(value, list): + return value + if isinstance(value, tuple): + return list(value) + if isinstance(value, set): + return list(value) + if isinstance(value, dict): + return list(value.values()) + if isinstance(value, str): + text = value.strip() + return [text] if text else [] + return [value] + + @staticmethod + def _sample_to_dto(sample: StyleSample) -> StyleSampleDTO: + return StyleSampleDTO( + id=sample.id, + title=sample.title, + content=sample.content, + source_type=sample.source_type, + genre=sample.genre, + scene_type=sample.scene_type, + pov=sample.pov, + allowed_for_generation=sample.allowed_for_generation, + novel_id=sample.novel_id, + profile_id=sample.profile_id, + content_hash=sample.content_hash, + char_count=sample.char_count, + ) + + @staticmethod + def _chunk_to_dto(chunk) -> StyleChunkDTO: + return StyleChunkDTO( + id=chunk.id, + sample_id=chunk.sample_id, + chunk_type=chunk.chunk_type, + sequence=chunk.sequence, + chapter_number=chunk.chapter_number, + title=chunk.title, + content=chunk.content, + char_count=chunk.char_count, + metrics=chunk.metrics, + ) + + @staticmethod + def _profile_to_dto(profile: StyleProfile) -> StyleProfileDTO: + return StyleProfileDTO( + id=profile.id, + name=profile.name, + description=profile.description, + status=profile.status, + novel_id=profile.novel_id, + profile=profile.profile, + metrics=profile.metrics, + rules=profile.rules, + forbidden_patterns=profile.forbidden_patterns, + version=profile.version, + ) + + @staticmethod + def _card_to_dto(card: StyleTechniqueCard) -> StyleTechniqueCardDTO: + return StyleTechniqueCardDTO( + id=card.id, + profile_id=card.profile_id, + title=card.title, + category=card.category, + scene_type=card.scene_type, + rule_text=card.rule_text, + example_summary=card.example_summary, + prompt_instruction=card.prompt_instruction, + enabled=card.enabled, + weight=card.weight, + ) diff --git a/application/style_bible/services/style_prompt_overlay_service.py b/application/style_bible/services/style_prompt_overlay_service.py new file mode 100644 index 000000000..7f2988bf0 --- /dev/null +++ b/application/style_bible/services/style_prompt_overlay_service.py @@ -0,0 +1,188 @@ +"""写作手法知识库提示词 overlay 服务。""" +from __future__ import annotations + +import re +from typing import Optional + +from application.style_bible.dtos import StylePromptOverlayDTO +from domain.style_bible.entities import StyleProfile, StyleTechniqueCard +from domain.style_bible.repositories import StyleBibleRepository + + +class StylePromptOverlayService: + """把风格档案压缩为章节生成可用的提示词片段。""" + + def __init__(self, repository: StyleBibleRepository): + self.repository = repository + + def build_overlay( + self, + novel_id: str, + style_profile_id: str, + *, + scene_type: str = "", + max_cards: int = 6, + ) -> StylePromptOverlayDTO: + profile_id = (style_profile_id or "").strip() + if not profile_id: + return StylePromptOverlayDTO(prompt="") + + profile = self.repository.get_profile(profile_id) + if profile is None or profile.status != "active": + return StylePromptOverlayDTO(prompt="") + if novel_id and profile.novel_id and profile.novel_id != novel_id: + return StylePromptOverlayDTO(prompt="") + + cards = self._rank_cards( + self.repository.list_technique_cards(profile.id, enabled=True), + scene_type, + )[: max(1, int(max_cards or 6))] + prompt = self._render_prompt(profile, cards) + return StylePromptOverlayDTO( + prompt=prompt, + profile_id=profile.id, + profile_name=profile.name, + card_ids=[card.id for card in cards], + ) + + def _render_prompt( + self, + profile: StyleProfile, + cards: list[StyleTechniqueCard], + ) -> str: + metrics = profile.metrics or {} + sentence_len = metrics.get("avg_sentence_length") + paragraph_len = metrics.get("avg_paragraph_length") + rhythm_lines: list[str] = [] + if sentence_len: + rhythm_lines.append(f"- 平均句长靠近 {float(sentence_len):.1f} 字,关键动作可短句单独成段") + if paragraph_len: + rhythm_lines.append(f"- 段落以 {int(float(paragraph_len))} 字附近为主,避免连续长段解释") + for rule in profile.rules[:3]: + rhythm_lines.append(f"- {rule}") + if not rhythm_lines: + rhythm_lines.append("- 用动作、对白和信息变化推动节奏,避免解释性空转") + + card_lines = [ + f"- {card.prompt_instruction}" + for card in cards + if card.prompt_instruction + ] + if not card_lines: + card_lines.append("- 每个场景至少产生一次信息、关系或目标变化") + + forbidden_lines = [ + f"- {item}" for item in profile.forbidden_patterns[:6] if str(item).strip() + ] + if not forbidden_lines: + forbidden_lines.append("- 总结式抒情、空泛心理、套路化氛围句") + anchor_lines = self._collect_style_anchor_lines(profile) + + blocks = [ + "【写作手法库】", + f"使用风格包:{profile.name}", + "", + "节奏约束:", + *rhythm_lines, + "", + "技法卡:", + *card_lines, + ] + if anchor_lines: + blocks.extend( + [ + "", + "风格锚点(检索,不可复刻原句):", + *anchor_lines, + ] + ) + blocks.extend( + [ + "", + "禁用项:", + *forbidden_lines, + "", + "执行要求:", + "- 只学习写法和节奏,不复刻样本文字、角色、设定或专有表达。", + "- 风格锚点只能学“句法动作与节奏手法”,不得抄词复用。", + "- 本章必须服从当前小说 Bible、章节大纲和连续性约束。", + ] + ) + return "\n".join(blocks) + + def _collect_style_anchor_lines(self, profile: StyleProfile) -> list[str]: + raw_max = (profile.profile or {}).get("anchor_max") + try: + max_anchors = int(raw_max) if raw_max is not None else 6 + except (TypeError, ValueError): + max_anchors = 6 + max_anchors = max(0, min(10, max_anchors)) + if max_anchors <= 0: + return [] + + sample_ids = [] + for item in (profile.profile or {}).get("source_sample_ids", []) or []: + sid = str(item or "").strip() + if sid and sid not in sample_ids: + sample_ids.append(sid) + if not sample_ids: + return [] + + anchors: list[str] = [] + seen_norms: set[str] = set() + for sample_id in sample_ids: + sample = self.repository.get_sample(sample_id) + if sample is None: + continue + if not bool(getattr(sample, "allowed_for_generation", False)): + continue + for line in self._extract_anchor_candidates(sample.content): + norm = re.sub(r"\s+", "", line) + if norm in seen_norms: + continue + seen_norms.add(norm) + anchors.append(f"- {line}") + if len(anchors) >= max_anchors: + return anchors + return anchors + + @staticmethod + def _extract_anchor_candidates(text: str) -> list[str]: + source = str(text or "").strip() + if not source: + return [] + # 优先对白与动作短句,避免抽样成大段总结。 + raw = re.split(r"[\n\r。!?!?;;]", source) + candidates: list[str] = [] + for chunk in raw: + line = chunk.strip(" \t\"'“”‘’") + if not line: + continue + length = len(line) + if length < 14 or length > 56: + continue + score = 0 + if "“" in chunk or "”" in chunk or ":" in chunk: + score += 3 + if re.search(r"(抬|看|停|停住|拧|攥|贴|压|拽|敲|咬|盯|笑|喘|退|靠|转|转身|沉默|没说话)", line): + score += 2 + if re.search(r"(于是|然后|最终|总之|事实上|可以看出|显然)", line): + score -= 2 + if score <= 0: + continue + candidates.append((score, line)) + candidates.sort(key=lambda item: (-item[0], len(item[1]))) + return [line for _, line in candidates[:16]] + + @staticmethod + def _rank_cards( + cards: list[StyleTechniqueCard], + scene_type: Optional[str], + ) -> list[StyleTechniqueCard]: + wanted_scene = (scene_type or "").strip() + + def key(card: StyleTechniqueCard) -> tuple[int, float, str]: + scene_match = 1 if wanted_scene and card.scene_type == wanted_scene else 0 + return (scene_match, card.weight, card.title) + + return sorted(cards, key=key, reverse=True) diff --git a/application/style_bible/services/text_splitter.py b/application/style_bible/services/text_splitter.py new file mode 100644 index 000000000..9342c3c6e --- /dev/null +++ b/application/style_bible/services/text_splitter.py @@ -0,0 +1,170 @@ +"""写作手法知识库文本切分。""" +from __future__ import annotations + +import re + +from domain.style_bible.entities import StyleSampleChunk + + +class StyleTextSplitter: + """把参考文本切成章节、场景和段落。""" + + CHAPTER_HEADING_RE = re.compile( + r"^\s*(第(?P[一二三四五六七八九十百千万零〇两0-9]+)章[^\n]*)\s*$" + ) + BLANK_LINE_RE = re.compile(r"\n\s*\n+") + + def __init__( + self, + scene_min_chars: int = 1200, + scene_max_chars: int = 2500, + long_paragraph_chars: int = 900, + ): + self.scene_min_chars = max(1, int(scene_min_chars or 1200)) + self.scene_max_chars = max(self.scene_min_chars, int(scene_max_chars or 2500)) + self.long_paragraph_chars = max(100, int(long_paragraph_chars or 900)) + + def split(self, sample_id: str, text: str) -> list[StyleSampleChunk]: + content = (text or "").strip() + if not content: + return [] + + chunks: list[StyleSampleChunk] = [] + sequence = 1 + for chapter_number, title, chapter_text in self._split_chapters(content): + chapter = StyleSampleChunk( + sample_id=sample_id, + chunk_type="chapter", + sequence=sequence, + chapter_number=chapter_number, + title=title, + content=chapter_text, + ) + chunks.append(chapter) + sequence += 1 + + paragraphs = self._split_paragraphs(chapter_text) + for paragraph in paragraphs: + chunks.append( + StyleSampleChunk( + sample_id=sample_id, + chunk_type="paragraph", + sequence=sequence, + chapter_number=chapter_number, + content=paragraph, + ) + ) + sequence += 1 + + for scene in self._split_scenes(paragraphs): + chunks.append( + StyleSampleChunk( + sample_id=sample_id, + chunk_type="scene", + sequence=sequence, + chapter_number=chapter_number, + content=scene, + ) + ) + sequence += 1 + + return chunks + + def _split_chapters(self, text: str) -> list[tuple[int, str, str]]: + lines = text.splitlines() + headings: list[tuple[int, int, str]] = [] + for index, line in enumerate(lines): + match = self.CHAPTER_HEADING_RE.match(line) + if match: + headings.append((index, self._parse_chapter_number(match.group("num")), match.group(1).strip())) + + if not headings: + return [(1, "全文", text)] + + chapters: list[tuple[int, str, str]] = [] + for item_index, (line_index, chapter_number, title) in enumerate(headings): + next_line_index = ( + headings[item_index + 1][0] if item_index + 1 < len(headings) else len(lines) + ) + chapter_text = "\n".join(lines[line_index + 1 : next_line_index]).strip() + if chapter_text: + chapters.append((chapter_number, title, chapter_text)) + return chapters or [(1, "全文", text)] + + def _split_paragraphs(self, text: str) -> list[str]: + paragraphs = [ + paragraph.strip() + for paragraph in self.BLANK_LINE_RE.split(text.strip()) + if paragraph.strip() + ] + result: list[str] = [] + for paragraph in paragraphs or [text.strip()]: + if len(paragraph) <= self.long_paragraph_chars: + result.append(paragraph) + continue + result.extend(self._split_long_paragraph(paragraph)) + return result + + def _split_long_paragraph(self, paragraph: str) -> list[str]: + sentences = [item.strip() for item in re.findall(r".+?[。!?!?]|.+$", paragraph) if item.strip()] + result: list[str] = [] + buffer = "" + for sentence in sentences: + if buffer and len(buffer) + len(sentence) > self.long_paragraph_chars: + result.append(buffer) + buffer = sentence + else: + buffer = f"{buffer}{sentence}" if buffer else sentence + if buffer: + result.append(buffer) + return result + + def _split_scenes(self, paragraphs: list[str]) -> list[str]: + scenes: list[str] = [] + buffer: list[str] = [] + buffer_chars = 0 + for paragraph in paragraphs: + paragraph_len = len(paragraph) + should_flush = ( + buffer + and buffer_chars >= self.scene_min_chars + and buffer_chars + paragraph_len > self.scene_max_chars + ) + if should_flush: + scenes.append("\n\n".join(buffer)) + buffer = [] + buffer_chars = 0 + buffer.append(paragraph) + buffer_chars += paragraph_len + if buffer: + scenes.append("\n\n".join(buffer)) + return scenes + + @classmethod + def _parse_chapter_number(cls, value: str) -> int: + text = (value or "").strip() + if text.isdigit(): + return max(1, int(text)) + return max(1, cls._parse_chinese_number(text)) + + @staticmethod + def _parse_chinese_number(text: str) -> int: + digits = {"零": 0, "〇": 0, "一": 1, "二": 2, "两": 2, "三": 3, "四": 4, + "五": 5, "六": 6, "七": 7, "八": 8, "九": 9} + units = {"十": 10, "百": 100, "千": 1000, "万": 10000} + total = 0 + section = 0 + number = 0 + for char in text: + if char in digits: + number = digits[char] + elif char in units: + unit = units[char] + if unit == 10000: + section = (section + number) * unit + total += section + section = 0 + else: + section += (number or 1) * unit + number = 0 + return total + section + number diff --git a/application/topic/__init__.py b/application/topic/__init__.py new file mode 100644 index 000000000..3308c1741 --- /dev/null +++ b/application/topic/__init__.py @@ -0,0 +1,5 @@ +"""选题立项应用模块。""" + +from application.topic.dtos import TopicGenerateRequestDTO, TopicIdeaDTO + +__all__ = ["TopicGenerateRequestDTO", "TopicIdeaDTO"] diff --git a/application/topic/dtos.py b/application/topic/dtos.py new file mode 100644 index 000000000..a77528cd0 --- /dev/null +++ b/application/topic/dtos.py @@ -0,0 +1,237 @@ +"""选题立项 DTO。""" +from __future__ import annotations + +from dataclasses import asdict, dataclass, field +from typing import Any, Optional + +from domain.topic.entities import TopicIdea + + +@dataclass +class TopicGenerateRequestDTO: + """选题生成请求。""" + + brief: str = "" + genre: str = "" + world_preset: str = "" + length_tier: str = "" + keywords: list[str] = field(default_factory=list) + desired_selling_points: list[str] = field(default_factory=list) + avoid_patterns: list[str] = field(default_factory=list) + market_signals: list[dict[str, Any]] = field(default_factory=list) + count: int = 3 + + def normalized_count(self) -> int: + return max(3, min(int(self.count or 3), 5)) + + def to_source_brief(self) -> dict[str, Any]: + return asdict(self) + + +@dataclass +class TopicIdeaDTO: + """选题候选 DTO。""" + + id: str + title: str + status: str + genre: str = "" + world_preset: str = "" + length_tier: str = "" + logline: str = "" + premise: str = "" + protagonist_hook: str = "" + core_conflict: str = "" + opening_hook: str = "" + selling_points: list[str] = field(default_factory=list) + long_term_potential: str = "" + risk_notes: list[str] = field(default_factory=list) + market_tags: list[str] = field(default_factory=list) + score: int = 0 + adopted_novel_id: Optional[str] = None + source_brief: dict[str, Any] = field(default_factory=dict) + development_notes: dict[str, Any] = field(default_factory=dict) + evaluation: dict[str, Any] = field(default_factory=dict) + created_at: str = "" + updated_at: str = "" + + @classmethod + def from_domain(cls, idea: TopicIdea) -> "TopicIdeaDTO": + return cls( + id=idea.id, + title=idea.title, + status=idea.status.value if hasattr(idea.status, "value") else str(idea.status), + genre=idea.genre, + world_preset=idea.world_preset, + length_tier=idea.length_tier, + logline=idea.logline, + premise=idea.premise, + protagonist_hook=idea.protagonist_hook, + core_conflict=idea.core_conflict, + opening_hook=idea.opening_hook, + selling_points=list(idea.selling_points), + long_term_potential=idea.long_term_potential, + risk_notes=list(idea.risk_notes), + market_tags=list(idea.market_tags), + score=idea.score, + adopted_novel_id=idea.adopted_novel_id, + source_brief=idea.source_brief, + development_notes=idea.development_notes, + evaluation=idea.evaluation, + created_at=idea.created_at.isoformat() if hasattr(idea.created_at, "isoformat") else str(idea.created_at or ""), + updated_at=idea.updated_at.isoformat() if hasattr(idea.updated_at, "isoformat") else str(idea.updated_at or ""), + ) + + +@dataclass +class CompareTopicIdeasRequestDTO: + """选题对比请求。""" + + topic_ids: list[str] = field(default_factory=list) + + +@dataclass +class TopicIdeaRankingDTO: + """选题对比排序项。""" + + topic_id: str + title: str + score: int + reason: str + risks: list[str] = field(default_factory=list) + + +@dataclass +class TopicIdeaCompareResultDTO: + """选题对比结果。""" + + recommended_topic_id: str + summary: str + rankings: list[TopicIdeaRankingDTO] = field(default_factory=list) + + +@dataclass +class TopicMarketSignalImportRequestDTO: + """市场观察导入请求。""" + + raw_text: str = "" + source: str = "手动观察" + + +@dataclass +class TopicMarketSignalCollectRequestDTO: + """公开来源采集请求。""" + + source_keys: list[str] = field(default_factory=list) + limit_per_source: int = 10 + + +@dataclass +class TopicMarketSignalSourceConnectionDTO: + """市场信号来源连接诊断 DTO。""" + + source_key: str + source_name: str + ok: bool = False + count: int = 0 + message: str = "" + sample_titles: list[str] = field(default_factory=list) + + +@dataclass +class TopicMarketSignalSourceHealthDTO: + """市场信号来源采集健康状态 DTO。""" + + source_key: str + source_name: str + status: str = "unknown" + last_run_at: str = "" + last_success_at: str = "" + last_count: int = 0 + last_error: str = "" + next_run_at: str = "" + + +@dataclass +class TopicMarketSignalSourceDTO: + """市场信号来源配置 DTO。""" + + key: str + name: str + url: str + category: str = "novel" + source_type: str = "public_page" + requires_auth: bool = False + rank_urls: dict[str, str] = field(default_factory=dict) + + +@dataclass +class TopicMarketSignalDTO: + """选题市场信号 DTO。""" + + id: str + source: str + title: str = "" + genre: str = "" + tags: list[str] = field(default_factory=list) + summary: str = "" + raw_text: str = "" + created_at: str = "" + + +@dataclass +class TopicMarketSignalSummaryDTO: + """选题市场信号摘要 DTO。""" + + total: int = 0 + source_counts: dict[str, int] = field(default_factory=dict) + genre_counts: dict[str, int] = field(default_factory=dict) + tag_counts: dict[str, int] = field(default_factory=dict) + category_counts: dict[str, int] = field(default_factory=dict) + window_days: int = 30 + weighted_source_scores: dict[str, float] = field(default_factory=dict) + weighted_genre_scores: dict[str, float] = field(default_factory=dict) + weighted_tag_scores: dict[str, float] = field(default_factory=dict) + comic_opportunities: list[str] = field(default_factory=list) + daily_counts: list[dict[str, Any]] = field(default_factory=list) + recent_samples: list[TopicMarketSignalDTO] = field(default_factory=list) + + +@dataclass +class TopicMarketSignalAutomationSettingsDTO: + """市场信号自动采集设置 DTO。""" + + enabled: bool = False + interval_minutes: int = 180 + limit_per_source: int = 8 + lookback_days: int = 30 + source_weights: dict[str, float] = field(default_factory=dict) + selected_source_keys: list[str] = field(default_factory=list) + last_run_at: str = "" + last_status: str = "idle" + last_error: str = "" + updated_at: str = "" + + +@dataclass +class TopicMarketSignalSourceCredentialDTO: + """市场信号来源凭据 DTO,仅在服务端内部保存明文。""" + + source_key: str + api_key: str = "" + cookie: str = "" + endpoint_url: str = "" + headers: dict[str, str] = field(default_factory=dict) + updated_at: str = "" + + +@dataclass +class TopicMarketSignalSourceCredentialStatusDTO: + """市场信号来源凭据脱敏状态 DTO。""" + + source_key: str + api_key_configured: bool = False + cookie_configured: bool = False + endpoint_configured: bool = False + header_keys: list[str] = field(default_factory=list) + updated_at: str = "" diff --git a/application/topic/services/__init__.py b/application/topic/services/__init__.py new file mode 100644 index 000000000..b0a65a67f --- /dev/null +++ b/application/topic/services/__init__.py @@ -0,0 +1,8 @@ +"""选题立项应用服务。""" + +from application.topic.services.topic_idea_service import TopicIdeaService +from application.topic.services.topic_signal_automation_service import ( + TopicSignalAutomationService, +) + +__all__ = ["TopicIdeaService", "TopicSignalAutomationService"] diff --git a/application/topic/services/topic_idea_service.py b/application/topic/services/topic_idea_service.py new file mode 100644 index 000000000..acd7d03e6 --- /dev/null +++ b/application/topic/services/topic_idea_service.py @@ -0,0 +1,1598 @@ +"""选题立项应用服务。""" +from __future__ import annotations + +import json +import logging +import re +from datetime import datetime, timedelta, timezone +from html import unescape +from typing import Any, Optional +from urllib.error import HTTPError +from urllib.request import Request, urlopen +from uuid import uuid4 + +from application.ai.knowledge_llm_contract import parse_json_from_response +from application.core.services.novel_service import NovelService +from application.topic.dtos import ( + TopicMarketSignalCollectRequestDTO, + TopicMarketSignalAutomationSettingsDTO, + TopicMarketSignalSourceCredentialDTO, + TopicMarketSignalSourceCredentialStatusDTO, + TopicMarketSignalSourceConnectionDTO, + TopicMarketSignalSourceHealthDTO, + TopicMarketSignalDTO, + TopicMarketSignalImportRequestDTO, + TopicMarketSignalSummaryDTO, + TopicMarketSignalSourceDTO, + TopicGenerateRequestDTO, + TopicIdeaCompareResultDTO, + TopicIdeaDTO, + TopicIdeaRankingDTO, +) +from application.topic.services.topic_signal_collectors import ( + build_market_signal_collectors, + collect_market_signals_from_source, +) +from application.topic.services.topic_signal_sources import ( + DEFAULT_MARKET_SIGNAL_SOURCE_WEIGHTS, + MARKET_SIGNAL_SOURCES, +) +from domain.ai.services.llm_service import GenerationConfig, LLMService +from domain.ai.value_objects.prompt import Prompt +from domain.topic.entities import TopicIdea, TopicIdeaStatus +from domain.topic.repositories import TopicIdeaRepository + +logger = logging.getLogger(__name__) + +ENRICHMENT_FIELDS = { + "premise", + "protagonist_hook", + "core_conflict", + "opening_hook", + "selling_points", + "long_term_potential", + "risk_notes", + "market_tags", + "score", + "development_notes", + "evaluation", +} +TEXT_ENRICHMENT_FIELDS = { + "premise", + "protagonist_hook", + "core_conflict", + "opening_hook", + "long_term_potential", +} +LIST_ENRICHMENT_FIELDS = {"selling_points", "risk_notes", "market_tags"} +DICT_ENRICHMENT_FIELDS = {"development_notes", "evaluation"} + +class TopicIdeaGenerationError(RuntimeError): + """LLM 选题生成调用失败。""" + + +class TopicIdeaService: + """选题立项池核心用例。""" + + def __init__( + self, + repository: TopicIdeaRepository, + llm_service: Optional[LLMService] = None, + novel_service: Optional[NovelService] = None, + fetch_text: Any = None, + ): + self._repository = repository + self._llm = llm_service + self._novel_service = novel_service + self._fetch_text = fetch_text or self._fetch_url_text + self._collectors = build_market_signal_collectors() + + async def generate(self, request: TopicGenerateRequestDTO) -> list[TopicIdeaDTO]: + """生成选题候选,不足时用本地候选补足。""" + count = request.normalized_count() + raw_items = await self._generate_with_llm(request) if self._llm else [] + ideas = self._build_ideas(raw_items, request, count) + for idea in ideas: + self._repository.save(idea) + return [TopicIdeaDTO.from_domain(idea) for idea in ideas] + + def import_market_signals( + self, + request: TopicMarketSignalImportRequestDTO, + ) -> list[TopicMarketSignalDTO]: + """从粘贴文本导入市场观察信号。""" + signals = [ + self._market_signal_from_line(line, request.source) + for line in (request.raw_text or "").splitlines() + if line.strip() + ] + if not signals: + raise ValueError("No market signal text provided") + self._repository.save_market_signals(signals) + return signals + + def collect_market_signals( + self, + request: TopicMarketSignalCollectRequestDTO, + ) -> list[TopicMarketSignalDTO]: + """从公开来源手动触发采集市场信号。""" + source_keys = request.source_keys or list(MARKET_SIGNAL_SOURCES) + limit = max(1, min(int(request.limit_per_source or 10), 30)) + signals: list[TopicMarketSignalDTO] = [] + credentials_by_source = self._market_signal_credentials_by_source() + for source_key in source_keys: + source = MARKET_SIGNAL_SOURCES.get(str(source_key).strip()) + if source is None: + continue + credentials = credentials_by_source.get(source.key) + collected = collect_market_signals_from_source( + source=self._source_with_credentials(source, credentials), + fetch_text=self._fetch_text, + limit=limit, + collectors=self._collectors, + credentials=credentials, + ) + signals.extend(collected) + self._record_market_signal_source_health(source, collected) + if not signals: + raise ValueError("No market signals collected") + self._repository.save_market_signals(signals) + return signals + + def test_market_signal_sources( + self, + request: TopicMarketSignalCollectRequestDTO, + ) -> list[TopicMarketSignalSourceConnectionDTO]: + """测试市场信号来源连接,不入库。""" + source_keys = request.source_keys or list(MARKET_SIGNAL_SOURCES) + limit = max(1, min(int(request.limit_per_source or 1), 5)) + credentials_by_source = self._market_signal_credentials_by_source() + results: list[TopicMarketSignalSourceConnectionDTO] = [] + for source_key in source_keys: + key = str(source_key or "").strip() + source = MARKET_SIGNAL_SOURCES.get(key) + if source is None: + results.append( + TopicMarketSignalSourceConnectionDTO( + source_key=key, + source_name=key, + ok=False, + message=f"Unknown source: {key}", + ) + ) + continue + credentials = credentials_by_source.get(source.key) + signals = collect_market_signals_from_source( + source=self._source_with_credentials(source, credentials), + fetch_text=self._fetch_text, + limit=limit, + collectors=self._collectors, + credentials=credentials, + ) + results.append( + TopicMarketSignalSourceConnectionDTO( + source_key=source.key, + source_name=source.name, + ok=bool(signals), + count=len(signals), + message="ok" if signals else "No signals collected", + sample_titles=[signal.title or signal.summary for signal in signals[:3]], + ) + ) + return results + + def list_market_signals(self, limit: int = 20) -> list[TopicMarketSignalDTO]: + safe_limit = max(1, min(int(limit or 20), 100)) + return self._repository.list_market_signals(safe_limit) + + def list_market_signal_source_health(self) -> list[TopicMarketSignalSourceHealthDTO]: + getter = getattr(self._repository, "list_market_signal_source_health", None) + saved = getter() if callable(getter) else [] + saved_by_key = { + item.source_key: item + for item in saved + if isinstance(item, TopicMarketSignalSourceHealthDTO) + } + settings = self.get_market_signal_settings() + return [ + self._source_health_for(source, saved_by_key.get(source_key), settings) + for source_key, source in MARKET_SIGNAL_SOURCES.items() + ] + + def summarize_market_signals(self, limit: int = 100) -> TopicMarketSignalSummaryDTO: + """汇总最近市场信号,用于快速判断来源、题材和分类趋势。""" + safe_limit = max(1, min(int(limit or 100), 300)) + settings = self.get_market_signal_settings() + signals = self._recent_market_signals( + limit=max(safe_limit, 200), + lookback_days=settings.lookback_days, + )[:safe_limit] + source_counts: dict[str, int] = {} + genre_counts: dict[str, int] = {} + tag_counts: dict[str, int] = {} + category_counts: dict[str, int] = {} + weighted_source_scores: dict[str, float] = {} + weighted_genre_scores: dict[str, float] = {} + weighted_tag_scores: dict[str, float] = {} + comic_opportunities: list[str] = [] + daily_counts: dict[str, int] = {} + for signal in signals: + self._increment_count(source_counts, signal.source or "未知来源") + if signal.genre: + self._increment_count(genre_counts, signal.genre) + for tag in signal.tags: + if tag: + self._increment_count(tag_counts, tag) + self._increment_count(category_counts, self._infer_market_signal_category(signal)) + weight = self._source_weight_for_signal(signal, settings) + self._increment_float(weighted_source_scores, signal.source or "未知来源", weight) + if signal.genre: + self._increment_float(weighted_genre_scores, signal.genre, weight) + for tag in signal.tags: + if tag: + self._increment_float(weighted_tag_scores, tag, weight) + day_key = self._signal_date(signal) + if day_key: + daily_counts[day_key] = daily_counts.get(day_key, 0) + 1 + if self._infer_market_signal_category(signal) == "comic": + comic_opportunities.extend( + self._comic_opportunities_for_signal(signal, "小说") + ) + return TopicMarketSignalSummaryDTO( + total=len(signals), + source_counts=source_counts, + genre_counts=genre_counts, + tag_counts=tag_counts, + category_counts=category_counts, + window_days=settings.lookback_days, + weighted_source_scores=self._rounded_scores(weighted_source_scores), + weighted_genre_scores=self._rounded_scores(weighted_genre_scores), + weighted_tag_scores=self._rounded_scores(weighted_tag_scores), + comic_opportunities=self._merge_unique([], comic_opportunities)[:6], + daily_counts=[ + {"date": date, "count": daily_counts[date]} + for date in sorted(daily_counts) + ], + recent_samples=signals[:10], + ) + + def list_market_signal_sources(self) -> list[TopicMarketSignalSourceDTO]: + return list(MARKET_SIGNAL_SOURCES.values()) + + def get_market_signal_settings(self) -> TopicMarketSignalAutomationSettingsDTO: + getter = getattr(self._repository, "get_market_signal_settings", None) + settings = getter() if callable(getter) else None + if not isinstance(settings, TopicMarketSignalAutomationSettingsDTO): + settings = TopicMarketSignalAutomationSettingsDTO() + return self._normalize_market_signal_settings(settings) + + def update_market_signal_settings( + self, + changes: dict[str, Any], + ) -> TopicMarketSignalAutomationSettingsDTO: + settings = self.get_market_signal_settings() + for key, value in (changes or {}).items(): + if hasattr(settings, key): + setattr(settings, key, value) + normalized = self._normalize_market_signal_settings(settings) + saver = getattr(self._repository, "save_market_signal_settings", None) + if callable(saver): + return saver(normalized) + return normalized + + def list_market_signal_source_credentials(self) -> list[TopicMarketSignalSourceCredentialStatusDTO]: + by_source = self._market_signal_credentials_by_source() + return [ + self._credential_status_for( + by_source.get(source_key) + or TopicMarketSignalSourceCredentialDTO(source_key=source_key) + ) + for source_key in MARKET_SIGNAL_SOURCES + ] + + def _market_signal_credentials_by_source(self) -> dict[str, TopicMarketSignalSourceCredentialDTO]: + getter = getattr(self._repository, "list_market_signal_credentials", None) + credentials = getter() if callable(getter) else [] + return { + credential.source_key: credential + for credential in credentials + if isinstance(credential, TopicMarketSignalSourceCredentialDTO) + } + + def update_market_signal_source_credentials( + self, + source_key: str, + changes: dict[str, Any], + ) -> TopicMarketSignalSourceCredentialStatusDTO: + key = str(source_key or "").strip() + if key not in MARKET_SIGNAL_SOURCES: + raise ValueError(f"Unknown market signal source: {source_key}") + existing = self._market_signal_credentials_by_source().get(key) + api_key = ( + str(changes.get("api_key") or "").strip() + if "api_key" in changes + else (existing.api_key if existing else "") + ) + cookie = ( + str(changes.get("cookie") or "").strip() + if "cookie" in changes + else (existing.cookie if existing else "") + ) + endpoint_url = ( + str(changes.get("endpoint_url") or "").strip() + if "endpoint_url" in changes + else (existing.endpoint_url if existing else "") + ) + headers = ( + self._normalize_credential_headers(changes.get("headers") or {}) + if "headers" in changes + else (existing.headers if existing else {}) + ) + credentials = TopicMarketSignalSourceCredentialDTO( + source_key=key, + api_key=api_key, + cookie=cookie, + endpoint_url=endpoint_url, + headers=headers, + updated_at=datetime.now(timezone.utc).isoformat(), + ) + saver = getattr(self._repository, "save_market_signal_credentials", None) + if callable(saver): + credentials = saver(credentials) + return self._credential_status_for(credentials) + + def list(self, status: str | None = None) -> list[TopicIdeaDTO]: + return [ + TopicIdeaDTO.from_domain(idea) + for idea in self._repository.list(status) + ] + + def get(self, idea_id: str) -> Optional[TopicIdeaDTO]: + idea = self._repository.get_by_id(idea_id) + return TopicIdeaDTO.from_domain(idea) if idea else None + + def update_status( + self, + idea_id: str, + status: str, + adopted_novel_id: Optional[str] = None, + ) -> Optional[TopicIdeaDTO]: + idea = self._repository.update_status(idea_id, status, adopted_novel_id) + return TopicIdeaDTO.from_domain(idea) if idea else None + + def update(self, idea_id: str, changes: dict[str, Any]) -> Optional[TopicIdeaDTO]: + idea = self._repository.get_by_id(idea_id) + if idea is None: + return None + + editable = { + "title", + "genre", + "world_preset", + "length_tier", + "logline", + "premise", + "protagonist_hook", + "core_conflict", + "opening_hook", + "selling_points", + "long_term_potential", + "risk_notes", + "market_tags", + "score", + "development_notes", + "evaluation", + } + for key, value in changes.items(): + if key in editable: + setattr(idea, key, value) + if "status" in changes: + idea.update_status(changes["status"]) + idea.__post_init__() + idea.updated_at = datetime.now(timezone.utc) + updated = self._repository.update(idea) + return TopicIdeaDTO.from_domain(updated) + + async def deepen(self, idea_id: str) -> TopicIdeaDTO: + """深化单条选题,补齐立项案核心字段。""" + idea = self._get_required(idea_id) + payload = await self._enrich_with_llm(idea, "deepen") if self._llm else None + if payload is None: + payload = self._fallback_deepen_payload(idea) + updated = self._apply_enrichment(idea, payload, fill_missing=False) + return TopicIdeaDTO.from_domain(updated) + + async def evaluate(self, idea_id: str) -> TopicIdeaDTO: + """评估单条选题,把结果落到现有立项字段。""" + idea = self._get_required(idea_id) + payload = await self._enrich_with_llm(idea, "evaluate") if self._llm else None + if payload is None: + payload = self._fallback_evaluate_payload(idea) + payload = self._merge_market_evaluation(idea, payload) + updated = self._apply_enrichment(idea, payload, fill_missing=False) + return TopicIdeaDTO.from_domain(updated) + + def compare(self, topic_ids: list[str]) -> TopicIdeaCompareResultDTO: + """对比多个选题,不落库。""" + clean_ids = [] + for topic_id in topic_ids: + clean_id = str(topic_id).strip() + if clean_id and clean_id not in clean_ids: + clean_ids.append(clean_id) + if len(clean_ids) < 2: + raise ValueError("At least two topic_ids are required") + if len(clean_ids) > 5: + raise ValueError("At most five topic_ids are supported") + + ideas = [self._get_required(topic_id) for topic_id in clean_ids] + rankings = sorted( + (self._ranking_for(idea) for idea in ideas), + key=lambda item: item.score, + reverse=True, + ) + recommended = rankings[0] + summary = ( + f"推荐《{recommended.title}》优先立项:综合评分 {recommended.score}," + f"{recommended.reason}" + ) + return TopicIdeaCompareResultDTO( + recommended_topic_id=recommended.topic_id, + summary=summary, + rankings=rankings, + ) + + def adopt(self, idea_id: str, author: str = "未知作者") -> Any: + """采纳选题创建小说;已采纳过则返回既有小说,避免重复创建。""" + if self._novel_service is None: + raise ValueError("NovelService is required to adopt a topic idea") + + idea = self._repository.get_by_id(idea_id) + if idea is None: + raise ValueError(f"Topic idea not found: {idea_id}") + + if idea.status == TopicIdeaStatus.ADOPTED and idea.adopted_novel_id: + existing = self._novel_service.get_novel(idea.adopted_novel_id) + if existing is not None: + return existing + + novel_id = f"novel-{uuid4().hex}" + dto = self._novel_service.create_novel( + novel_id=novel_id, + title=idea.title, + author=author, + target_chapters=self._target_chapters_for(idea.length_tier), + premise=self._compose_premise(idea), + genre=idea.genre, + world_preset=idea.world_preset, + length_tier=idea.length_tier or None, + ) + self._repository.update_status( + idea.id, + TopicIdeaStatus.ADOPTED, + adopted_novel_id=getattr(dto, "id", novel_id), + ) + return dto + + async def _generate_with_llm( + self, + request: TopicGenerateRequestDTO, + ) -> list[dict[str, Any]]: + prompt = Prompt( + system=( + "你是华语类型小说选题编辑。请输出严格 JSON,根字段为 topic_ideas。" + "每个候选包含 title、genre、world_preset、length_tier、logline、premise、" + "protagonist_hook、core_conflict、opening_hook、selling_points、" + "long_term_potential、risk_notes、market_tags、score、development_notes、evaluation。" + "development_notes 必须包含 立项定位、首卷大纲、前三章切入、角色关系、连载策略;" + "evaluation 必须包含 综合评分、市场匹配度、开篇钩子、大纲完整度、主要风险。" + "score 和 综合评分 必须是 0-100 的整数。不要创建 Bible 或章节正文。" + ), + user=json.dumps( + { + **request.to_source_brief(), + "brief_text": self._brief_text(request), + }, + ensure_ascii=False, + ), + ) + config = GenerationConfig( + max_tokens=4096, + temperature=0.8, + response_format={"type": "json_object"}, + ) + try: + result = await self._llm.generate(prompt, config) + except Exception as exc: + raise TopicIdeaGenerationError("选题生成调用失败,请检查模型配置或稍后重试") from exc + + try: + data = parse_json_from_response(result.content) + except Exception as exc: + logger.warning("topic idea JSON parse failed, using fallback: %s", exc) + return [] + items = data.get("topic_ideas") if isinstance(data, dict) else None + return items if isinstance(items, list) else [] + + async def _enrich_with_llm(self, idea: TopicIdea, mode: str) -> Optional[dict[str, Any]]: + action = "深化" if mode == "deepen" else "评估" + prompt = Prompt( + system=( + f"你是华语类型小说选题编辑。请对给定选题做{action},输出严格 JSON。" + "允许字段:premise、protagonist_hook、core_conflict、opening_hook、" + "selling_points、long_term_potential、risk_notes、market_tags、score、" + "development_notes、evaluation。" + "score 必须是 0-100 的整数。不要创建 Bible 或章节正文。" + ), + user=json.dumps( + { + "mode": mode, + "topic_idea": TopicIdeaDTO.from_domain(idea).__dict__, + }, + ensure_ascii=False, + ), + ) + config = GenerationConfig( + max_tokens=3072, + temperature=0.55 if mode == "evaluate" else 0.75, + response_format={"type": "json_object"}, + ) + try: + result = await self._llm.generate(prompt, config) + except Exception as exc: + raise TopicIdeaGenerationError(f"选题{action}调用失败,请检查模型配置或稍后重试") from exc + + try: + data = parse_json_from_response(result.content) + except Exception as exc: + logger.warning("topic idea %s JSON parse failed, using fallback: %s", mode, exc) + return None + if not isinstance(data, dict): + return None + root_fields = ENRICHMENT_FIELDS.intersection(data) + if root_fields - {"evaluation"} or (root_fields and len(data) > 1): + return data + for key in ("topic_idea", "evaluation", "result"): + nested = data.get(key) + if isinstance(nested, dict): + if ENRICHMENT_FIELDS.intersection(nested): + return nested + if key == "evaluation": + return {"evaluation": nested} + return None + + def _build_ideas( + self, + raw_items: list[dict[str, Any]], + request: TopicGenerateRequestDTO, + count: int, + ) -> list[TopicIdea]: + ideas: list[TopicIdea] = [] + for item in raw_items: + if not isinstance(item, dict): + continue + try: + ideas.append(self._idea_from_payload(item, request)) + except ValueError: + continue + if len(ideas) >= count: + return ideas + + for item in self._fallback_payloads(request): + if len(ideas) >= count: + break + ideas.append(self._idea_from_payload(item, request)) + return ideas[:count] + + def _get_required(self, idea_id: str) -> TopicIdea: + idea = self._repository.get_by_id(idea_id) + if idea is None: + raise ValueError(f"Topic idea not found: {idea_id}") + return idea + + def _apply_enrichment( + self, + idea: TopicIdea, + payload: dict[str, Any], + fill_missing: bool, + ) -> TopicIdea: + for key in ENRICHMENT_FIELDS: + if key not in payload: + continue + value = self._normalize_enrichment_value(key, payload[key]) + if fill_missing and getattr(idea, key): + continue + setattr(idea, key, value) + idea.__post_init__() + idea.updated_at = datetime.now(timezone.utc) + return self._repository.update(idea) + + @staticmethod + def _normalize_enrichment_value(key: str, value: Any) -> Any: + if key in TEXT_ENRICHMENT_FIELDS: + return TopicIdeaService._format_report_value(value) + if key in LIST_ENRICHMENT_FIELDS: + if isinstance(value, list): + return [ + text + for item in value + for text in [TopicIdeaService._format_report_value(item)] + if text + ] + text = TopicIdeaService._format_report_value(value) + return [text] if text else [] + if key in DICT_ENRICHMENT_FIELDS: + return value if isinstance(value, dict) else {} + return value + + @staticmethod + def _fallback_deepen_payload(idea: TopicIdea) -> dict[str, Any]: + genre = idea.genre or "类型小说" + base = idea.premise or idea.logline or f"《{idea.title}》围绕一次高压选择展开。" + protagonist = idea.protagonist_hook or "主角带着一个被低估的能力或身份缺口入局。" + conflict = idea.core_conflict or "主角的自我证明与既有秩序、利益集团持续碰撞。" + opening = idea.opening_hook or "开篇用一次失败交易、公开误判或迫近危机把主角推到台前。" + selling_points = TopicIdeaService._merge_unique( + idea.selling_points, + ["高压开局", "成长反馈明确", "冲突可连续升级"], + ) + market_tags = TopicIdeaService._merge_unique( + idea.market_tags, + [genre, "强钩子", "可系列化"], + ) + risks = TopicIdeaService._merge_unique( + idea.risk_notes, + ["需要尽早明确核心规则与代价,避免设定空转。"], + ) + return { + "premise": ( + f"{base} 立项可从主角被迫接下一个看似无解的局面切入," + f"通过连续选择展示能力边界、关系压力和世界规则。中段以资源争夺与身份反转扩大格局," + f"长期主线则落在主角能否把个人优势转化为改变秩序的筹码。" + ), + "protagonist_hook": protagonist, + "core_conflict": conflict, + "opening_hook": opening, + "selling_points": selling_points, + "long_term_potential": idea.long_term_potential + or "适合从个人危机扩展到组织、规则和终局真相三层结构,具备连载延展空间。", + "risk_notes": risks, + "market_tags": market_tags, + "score": max(idea.score, TopicIdeaService._fallback_score(idea) + 6), + "development_notes": { + "立项定位": f"{genre}方向,优先突出主角入局压力和连续升级反馈。", + "首卷抓手": [ + opening, + "用一次阶段性胜利确认主角能力边界。", + "在胜利后追加更高代价,推动进入长期主线。", + ], + "角色关系": "围绕主角的短期盟友、利益对手和隐藏规则知情者建立张力。", + "连载策略": "每个阶段保留一个未兑现承诺,并用新规则或新债务推动追读。", + }, + } + + @staticmethod + def _fallback_evaluate_payload(idea: TopicIdea) -> dict[str, Any]: + score = TopicIdeaService._fallback_score(idea) + risks: list[str] = [] + if not idea.opening_hook: + risks.append("开篇钩子不足,需要补一个能在第一章成立的强事件。") + if not idea.core_conflict: + risks.append("核心冲突还不够具体,容易变成泛泛升级。") + if len(idea.selling_points) < 2: + risks.append("卖点数量偏少,建议补足情绪爽点和长期追读点。") + if not risks: + risks.append("需控制信息密度,避免前期解释多于行动。") + return { + "score": score, + "risk_notes": TopicIdeaService._merge_unique(idea.risk_notes, risks), + "market_tags": TopicIdeaService._merge_unique( + idea.market_tags, + [idea.genre or "类型小说", "立项评估", "可打磨"], + ), + "selling_points": TopicIdeaService._merge_unique( + idea.selling_points, + ["核心钩子可视化", "追读目标清晰"], + ), + "long_term_potential": idea.long_term_potential + or "可通过阶段目标、对手升级和规则揭示维持中长篇连载张力。", + "evaluation": { + "综合评分": score, + "开篇钩子": "已具备" if idea.opening_hook else "需补强", + "核心冲突": "已具备" if idea.core_conflict else "需具体化", + "卖点密度": "充足" if len(idea.selling_points) >= 2 else "偏少", + "主要风险": risks, + }, + } + + def _ranking_for(self, idea: TopicIdea) -> TopicIdeaRankingDTO: + completeness_bonus = TopicIdeaService._completeness_bonus(idea) + market_fit = self._market_fit_score_from_evaluation(idea.evaluation) + market_bonus = max(-3, min(8, round((market_fit - 50) / 10))) if market_fit is not None else 0 + score = max(0, min(100, int(idea.score) + completeness_bonus + market_bonus)) + strengths = [] + if idea.opening_hook: + strengths.append("开篇钩子明确") + if idea.selling_points: + strengths.append("卖点可见") + if idea.long_term_potential: + strengths.append("长线空间较清楚") + if idea.development_notes: + strengths.append("立项案更完整") + if idea.evaluation: + strengths.append("评估维度已补齐") + if market_fit is not None and market_fit >= 65: + strengths.append("市场趋势贴合") + reason = "、".join(strengths) if strengths else "基础信息仍需补齐" + return TopicIdeaRankingDTO( + topic_id=idea.id, + title=idea.title, + score=score, + reason=reason, + risks=list(idea.risk_notes), + ) + + @staticmethod + def _fallback_score(idea: TopicIdea) -> int: + score = int(idea.score or 0) + score = max(score, 52) + score += TopicIdeaService._completeness_bonus(idea) + if len(idea.selling_points) >= 2: + score += 6 + if idea.risk_notes: + score -= min(10, len(idea.risk_notes) * 3) + return max(0, min(100, score)) + + @staticmethod + def _completeness_bonus(idea: TopicIdea) -> int: + fields = [ + idea.premise, + idea.protagonist_hook, + idea.core_conflict, + idea.opening_hook, + idea.long_term_potential, + ] + filled = sum(1 for value in fields if value) + filled += min(len(idea.selling_points), 3) + filled += min(len(idea.market_tags), 2) + if idea.development_notes: + filled += min(len(idea.development_notes), 3) + if idea.evaluation: + filled += min(len(idea.evaluation), 3) + return min(24, filled * 3) + + @staticmethod + def _merge_unique(existing: list[str], additions: list[str]) -> list[str]: + result: list[str] = [] + for value in [*existing, *additions]: + text = str(value or "").strip() + if text and text not in result: + result.append(text) + return result + + @staticmethod + def _increment_count(counts: dict[str, int], key: str) -> None: + text = str(key or "").strip() + if text: + counts[text] = counts.get(text, 0) + 1 + + @staticmethod + def _increment_float(counts: dict[str, float], key: str, value: float) -> None: + text = str(key or "").strip() + if text: + counts[text] = counts.get(text, 0.0) + float(value or 0.0) + + @staticmethod + def _rounded_scores(values: dict[str, float]) -> dict[str, float]: + return { + key: round(value, 2) + for key, value in values.items() + } + + @staticmethod + def _infer_market_signal_category(signal: TopicMarketSignalDTO) -> str: + source = signal.source or "" + content = " ".join( + [ + source, + signal.title or "", + signal.genre or "", + signal.summary or "", + " ".join(signal.tags or []), + ] + ) + if "漫画" in content or any(word in source for word in ("快看", "动漫")): + return "comic" + if signal.title or signal.genre or signal.summary: + return "novel" + return "unknown" + + @staticmethod + def _idea_from_payload( + item: dict[str, Any], + request: TopicGenerateRequestDTO, + ) -> TopicIdea: + idea = TopicIdea( + title=str(item.get("title") or "未命名选题"), + genre=str(item.get("genre") or request.genre or ""), + world_preset=str(item.get("world_preset") or request.world_preset or ""), + length_tier=str(item.get("length_tier") or request.length_tier or ""), + logline=str(item.get("logline") or ""), + premise=str(item.get("premise") or ""), + protagonist_hook=str(item.get("protagonist_hook") or ""), + core_conflict=str(item.get("core_conflict") or ""), + opening_hook=str(item.get("opening_hook") or ""), + selling_points=item.get("selling_points") or [], + long_term_potential=str(item.get("long_term_potential") or ""), + risk_notes=item.get("risk_notes") or [], + market_tags=item.get("market_tags") or [], + score=item.get("score") or 0, + source_brief=request.to_source_brief(), + development_notes=item.get("development_notes") or {}, + evaluation=item.get("evaluation") or {}, + ) + if not idea.development_notes: + idea.development_notes = TopicIdeaService._generated_development_notes(idea, request) + if not idea.evaluation: + idea.evaluation = TopicIdeaService._generated_evaluation(idea, request) + idea.__post_init__() + return idea + + @staticmethod + def _generated_development_notes( + idea: TopicIdea, + request: TopicGenerateRequestDTO, + ) -> dict[str, Any]: + genre = idea.genre or request.genre or "类型小说" + opening = idea.opening_hook or "第一章用一次公开误判、交易崩盘或突发追捕把主角推到台前。" + conflict = idea.core_conflict or "主角的短期求生目标与既有规则、利益集团持续碰撞。" + long_term = idea.long_term_potential or "从个人危机扩展到组织、规则和终局真相三层结构。" + market_basis = TopicIdeaService._market_basis_from_request(request) + positioning_suffix = f"参考市场信号:{'、'.join(market_basis[:3])}。" if market_basis else "参考当前类型市场的强钩子与追读反馈。" + return { + "立项定位": f"{genre}方向,优先突出可视化开局、明确代价和连续升级反馈。{positioning_suffix}", + "首卷大纲": [ + f"第1-3章:{opening}", + f"第4-10章:围绕「{conflict}」制造第一次可见胜利,同时暴露更高代价。", + "第11-20章:引入关键对手或同盟,让主角优势从单点能力变成可复用策略。", + f"卷末:用一次选择把短期目标推向长期主线,落到「{long_term}」。", + ], + "前三章切入": [ + "第一章先给具体事件和损失,不先解释设定。", + "第二章让主角做一次带代价的判断,验证核心钩子。", + "第三章给出榜单/组织/对手的反馈,明确追读目标。", + ], + "角色关系": "围绕主角建立短期盟友、利益对手、规则知情者三角张力。", + "连载策略": "每 3-5 章兑现一个小目标,同时保留一个未兑现承诺或新风险。", + } + + @staticmethod + def _generated_evaluation( + idea: TopicIdea, + request: TopicGenerateRequestDTO, + ) -> dict[str, Any]: + score = TopicIdeaService._fallback_score(idea) + market_basis = TopicIdeaService._market_basis_from_request(request) + market_score = max(55, min(92, score + min(10, len(market_basis) * 3))) + risks = list(idea.risk_notes or []) + if not risks: + risks = ["需要避免只复用榜单表层标签,必须落到主角目标、代价和第一章事件。"] + return { + "综合评分": score, + "市场匹配度": { + "score": market_score, + "level": "高" if market_score >= 78 else "中高" if market_score >= 65 else "中", + "依据": market_basis[:5] or ["类型钩子完整度", "开篇事件可视化", "连载延展性"], + }, + "开篇钩子": "已具备" if idea.opening_hook else "需补一个第一章可直接呈现的强事件", + "大纲完整度": "已生成首卷大纲和前三章切入,可继续深化为章节节拍", + "主要风险": risks, + } + + @staticmethod + def _market_basis_from_request(request: TopicGenerateRequestDTO) -> list[str]: + basis: list[str] = [] + for signal in request.market_signals or []: + if not isinstance(signal, dict): + continue + title = str(signal.get("title") or "").strip() + tags = signal.get("tags") or [] + if title: + basis.append(title) + if isinstance(tags, list): + basis.extend(str(tag).strip() for tag in tags if str(tag).strip()) + return TopicIdeaService._merge_unique([], basis) + + @staticmethod + def _fallback_payloads(request: TopicGenerateRequestDTO) -> list[dict[str, Any]]: + genre = request.genre or "类型小说" + world = request.world_preset or "可扩展世界观" + brief = TopicIdeaService._manual_brief_text(request) or "一个具备商业潜力的新故事" + signal_payloads = TopicIdeaService._fallback_payloads_from_signals(request) + base_payloads = [ + { + "title": "逆风开局的隐藏王牌", + "genre": genre, + "world_preset": world, + "length_tier": request.length_tier, + "logline": f"主角因{brief}被推入危局,只能用被低估的能力撬动更大的秩序。", + "premise": f"围绕「{brief}」展开,从一次失败任务切入,逐步揭开资源、身份与规则的真相。", + "protagonist_hook": "主角拥有一个看似鸡肋、实则能改变局面的优势。", + "core_conflict": "个体求生与既有秩序之间的持续对抗。", + "opening_hook": "第一章以误判、追捕或交易崩盘开场。", + "selling_points": ["强危机开局", "成长反馈明确", "可连续升级"], + "long_term_potential": "可扩展为势力、规则与终局真相三条长期线。", + "risk_notes": ["需要避免金手指过早失控。"], + "market_tags": [genre, "逆袭", "悬念"], + "score": 72, + }, + { + "title": "规则裂缝中的调查者", + "genre": genre, + "world_preset": world, + "length_tier": request.length_tier, + "logline": f"主角追查{brief}背后的异常,发现世界运行规则被人为改写。", + "premise": "以调查推进爽点,每个真相都带来新的利益交换和更高层级敌人。", + "protagonist_hook": "主角能从普通证据里看见别人忽略的因果断点。", + "core_conflict": "追求真相的人与维护黑箱的人之间的博弈。", + "opening_hook": "一份本不该存在的记录出现在主角手里。", + "selling_points": ["谜团推进", "反转空间", "角色智性爽点"], + "long_term_potential": "适合做单元案件到主线阴谋的递进结构。", + "risk_notes": ["需要控制信息密度,避免前期解释过多。"], + "market_tags": [genre, "调查", "阴谋"], + "score": 70, + }, + { + "title": "被选中的失败样本", + "genre": genre, + "world_preset": world, + "length_tier": request.length_tier, + "logline": f"所有人都认为主角是{brief}中的失败者,只有他知道失败本身藏着新路线。", + "premise": "主角从低评价标签出发,把缺陷转化为独特路线,持续打破评价体系。", + "protagonist_hook": "主角的失败记录反而是理解底层规则的钥匙。", + "core_conflict": "标签化评价与自我证明之间的冲突。", + "opening_hook": "主角在公开淘汰现场发现评判标准被操纵。", + "selling_points": ["废柴逆袭", "体系突破", "情绪代偿"], + "long_term_potential": "可持续展开学院、组织、赛场或门派阶梯。", + "risk_notes": ["需要让配角反应真实,避免单纯降智衬托。"], + "market_tags": [genre, "废柴流", "升级"], + "score": 68, + }, + { + "title": "长夜档案管理员", + "genre": genre, + "world_preset": world, + "length_tier": request.length_tier, + "logline": f"主角整理{brief}相关旧档案时,发现每份记录都在预告一场尚未发生的灾难。", + "premise": "主角原本只负责整理边缘档案,却发现旧记录会提前写出现实中的异常事件。为了避免灾难,也为了查清档案来源,主角被迫在记录、现实和幕后势力之间穿梭,逐渐意识到自己也是某份档案里的角色。", + "protagonist_hook": "主角能读出档案中被抹去的空白注脚。", + "core_conflict": "试图改写结局的记录者与制造既定命运的幕后系统对抗。", + "opening_hook": "主角在一份十年前的档案里,看见了明天自己的死亡时间。", + "selling_points": ["预告式悬念", "命运反抗", "单元事件推进"], + "long_term_potential": "可由单份档案扩展到城市、时代和世界底层记录系统。", + "risk_notes": ["预言机制要有明确代价,避免万能预知。"], + "market_tags": [genre, "档案", "命运"], + "score": 74, + }, + { + "title": "临界点上的继承人", + "genre": genre, + "world_preset": world, + "length_tier": request.length_tier, + "logline": f"主角继承了与{brief}有关的危险遗产,也继承了所有追债者和仇人。", + "premise": "主角在最不合适的时候继承一份无人敢接的遗产:它既是资源入口,也是灾祸源头。围绕遗产的争夺让主角迅速卷入多方势力,而遗产本身隐藏的使用规则,决定了主角能否把负债变成筹码。", + "protagonist_hook": "主角不是天选之人,只是唯一愿意接手烂摊子的人。", + "core_conflict": "继承危险遗产的新人 vs 想瓜分遗产的旧势力。", + "opening_hook": "遗产交接当天,所有债主比亲友更早抵达灵堂。", + "selling_points": ["遗产争夺", "负债开局", "势力博弈"], + "long_term_potential": "遗产可逐层解锁,每层引出新的债务、盟友和敌人。", + "risk_notes": ["遗产能力需要分阶段开放,避免开局资源过满。"], + "market_tags": [genre, "继承", "势力"], + "score": 76, + }, + ] + return [*signal_payloads, *base_payloads] + + @staticmethod + def _fallback_payloads_from_signals(request: TopicGenerateRequestDTO) -> list[dict[str, Any]]: + payloads: list[dict[str, Any]] = [] + for signal in request.market_signals or []: + if not isinstance(signal, dict): + continue + title = str(signal.get("title") or "").strip() + summary = str(signal.get("summary") or "").strip() + tags = [str(tag).strip() for tag in (signal.get("tags") or []) if str(tag).strip()] + if not title and not summary: + continue + signal_name = title or summary[:18] + genre = TopicIdeaService._genre_from_signal(signal, request) + opportunity = TopicIdeaService._signal_opportunity_text(signal_name, tags, summary) + payloads.append( + { + "title": f"榜单反推:{signal_name}的小说机会", + "genre": genre, + "world_preset": request.world_preset or "市场热点反推", + "length_tier": request.length_tier, + "logline": f"参考《{signal_name}》的{TopicIdeaService._compact_tags(tags)}信号,改写成主角在高压关系中切割旧秩序、夺回主动权的连载开局。", + "premise": opportunity, + "protagonist_hook": "主角不是被动受害者,而是能把公开羞辱、背叛或榜单压力转化为反击筹码的人。", + "core_conflict": "主角的自我翻盘与旧关系、旧评价体系、旧利益网络之间的正面碰撞。", + "opening_hook": TopicIdeaService._opening_from_signal(signal_name, summary), + "selling_points": TopicIdeaService._merge_unique( + ["榜单热点反推", "第一章冲突明确", "情绪代偿强"], + tags[:3], + ), + "long_term_potential": "可从第一场关系切割扩展到事业、身份、资源和幕后规则的多层反击线。", + "risk_notes": ["不能照搬原作品设定,只保留市场信号里的情绪结构和冲突模型。"], + "market_tags": TopicIdeaService._merge_unique([genre], tags), + "score": 78, + } + ) + if len(payloads) >= 5: + break + return payloads + + @staticmethod + def _manual_brief_text(request: TopicGenerateRequestDTO) -> str: + parts = [] + if request.brief.strip(): + parts.append(request.brief.strip()) + if request.keywords: + parts.append("关键词:" + "、".join(request.keywords)) + if request.desired_selling_points: + parts.append("目标爽点:" + "、".join(request.desired_selling_points)) + if request.avoid_patterns: + parts.append("避雷套路:" + "、".join(request.avoid_patterns)) + return ";".join(parts) + + @staticmethod + def _genre_from_signal(signal: dict[str, Any], request: TopicGenerateRequestDTO) -> str: + if request.genre: + return request.genre + content = " ".join( + str(value or "") + for value in ( + signal.get("title"), + signal.get("genre"), + signal.get("summary"), + " ".join(str(tag) for tag in (signal.get("tags") or [])), + ) + ) + if any(word in content for word in ("渣男", "未婚夫", "告白", "丈夫", "淑女", "闺女")): + return "现代言情" + if any(word in content for word in ("亡灵", "仙子", "鼎炉", "怪物", "罪恶之城")): + return "玄幻奇幻" + if any(word in content for word in ("检察官", "档案", "罪", "调查")): + return "都市悬疑" + return str(signal.get("genre") or "类型小说") + + @staticmethod + def _signal_opportunity_text(signal_name: str, tags: list[str], summary: str) -> str: + clue = summary or f"围绕《{signal_name}》暴露出的市场情绪展开。" + return ( + f"从《{signal_name}》提炼市场机会:保留“{TopicIdeaService._compact_tags(tags)}”背后的情绪张力," + f"把「{clue[:90]}」转译为小说第一章的公开冲突。主角先遭遇关系或身份层面的失衡," + "随后用一次明确选择切断旧评价体系,并引出更大的利益网络。" + ) + + @staticmethod + def _opening_from_signal(signal_name: str, summary: str) -> str: + if "渣男" in signal_name or "未婚夫" in summary or "怀孕" in summary: + return "第一章从未婚夫带着另一个女人公开归来写起,主角当场切割关系并抛出第一张证据。" + if "亡灵" in signal_name or "怪物" in signal_name: + return "第一章从主角获得被所有人误判为废物的危险能力写起,立刻用它解决一场近身危机。" + if "检察官" in signal_name or "罪" in signal_name: + return "第一章从一份足以翻案的证据突然落到主角手里写起,对手当天就开始灭口。" + return f"第一章以《{signal_name}》对应的高压事件开场,让主角在众目睽睽下做出不可回头的选择。" + + @staticmethod + def _compact_tags(tags: list[str]) -> str: + return "、".join(tags[:3]) if tags else "快速上榜" + + @staticmethod + def _compose_premise(idea: TopicIdea) -> str: + parts = [ + idea.logline, + idea.premise, + f"主角钩子:{idea.protagonist_hook}" if idea.protagonist_hook else "", + f"核心冲突:{idea.core_conflict}" if idea.core_conflict else "", + f"开篇钩子:{idea.opening_hook}" if idea.opening_hook else "", + ] + development_notes = TopicIdeaService._format_report_block( + "立项案", + idea.development_notes, + ) + evaluation = TopicIdeaService._format_report_block( + "立项评估", + idea.evaluation, + ) + if development_notes: + parts.append(development_notes) + if evaluation: + parts.append(evaluation) + return "\n\n".join(part for part in parts if part) + + @staticmethod + def _format_report_block(title: str, data: dict[str, Any]) -> str: + if not isinstance(data, dict) or not data: + return "" + lines = [] + for key, value in data.items(): + text = TopicIdeaService._format_report_value(value) + if text: + lines.append(f"- {key}:{text}") + if not lines: + return "" + return f"{title}:\n" + "\n".join(lines) + + @staticmethod + def _format_report_value(value: Any) -> str: + if value is None: + return "" + if isinstance(value, str): + return value.strip() + if isinstance(value, (int, float, bool)): + return str(value) + if isinstance(value, list): + return ";".join( + item + for item in ( + TopicIdeaService._format_report_value(item) + for item in value + ) + if item + ) + if isinstance(value, dict): + return ";".join( + f"{key}={text}" + for key, item in value.items() + for text in [TopicIdeaService._format_report_value(item)] + if text + ) + return str(value).strip() + + @staticmethod + def _brief_text(request: TopicGenerateRequestDTO) -> str: + parts = [] + if request.brief.strip(): + parts.append(request.brief.strip()) + if request.keywords: + parts.append("关键词:" + "、".join(request.keywords)) + if request.desired_selling_points: + parts.append("目标爽点:" + "、".join(request.desired_selling_points)) + if request.avoid_patterns: + parts.append("避雷套路:" + "、".join(request.avoid_patterns)) + signal_texts = [] + for signal in request.market_signals or []: + if not isinstance(signal, dict): + continue + fields = [] + title = str(signal.get("title") or "").strip() + genre = str(signal.get("genre") or "").strip() + summary = str(signal.get("summary") or "").strip() + tags = signal.get("tags") or [] + if title: + fields.append(title) + if genre: + fields.append(f"类型={genre}") + if isinstance(tags, list) and tags: + fields.append("标签=" + "、".join(str(tag).strip() for tag in tags if str(tag).strip())) + if summary: + fields.append(summary) + if fields: + signal_texts.append(";".join(fields)) + if signal_texts: + parts.append("市场观察:" + " | ".join(signal_texts[:8])) + return ";".join(parts) + + @staticmethod + def _target_chapters_for(length_tier: str) -> int: + return { + "short": 30, + "standard": 100, + "epic": 200, + }.get((length_tier or "").strip(), 100) + + @staticmethod + def _market_signal_from_line(line: str, source: str) -> TopicMarketSignalDTO: + text = line.strip() + parts = [part.strip() for part in text.split("|")] + title = parts[0] if len(parts) >= 4 else "" + genre = parts[1] if len(parts) >= 4 else "" + tags = TopicIdeaService._split_tags(parts[2]) if len(parts) >= 4 else [] + summary = parts[3] if len(parts) >= 4 else text + return TopicMarketSignalDTO( + id=f"signal-{uuid4().hex}", + source=(source or "手动观察").strip() or "手动观察", + title=title, + genre=genre, + tags=tags, + summary=summary, + raw_text=text, + created_at=datetime.now(timezone.utc).isoformat(), + ) + + @staticmethod + def _split_tags(value: str) -> list[str]: + tags: list[str] = [] + for raw in (value or "").replace(",", ",").replace("、", ",").split(","): + tag = raw.strip() + if tag and tag not in tags: + tags.append(tag) + return tags + + @staticmethod + def _fetch_url_text(url: str, headers: dict[str, str] | None = None) -> str: + request_headers = { + "User-Agent": ( + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) " + "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120 Safari/537.36" + ) + } + request_headers.update(headers or {}) + request = Request( + url, + headers=request_headers, + ) + try: + with urlopen(request, timeout=10) as response: + return TopicIdeaService._decode_response_body( + response.read(), + response.headers.get_content_charset(), + ) + except HTTPError as exc: + body = exc.read() + if body: + return TopicIdeaService._decode_response_body( + body, + exc.headers.get_content_charset() if exc.headers else None, + ) + raise + + + @staticmethod + def _decode_response_body(body: bytes, charset: str | None) -> str: + candidates = [charset] if charset else [] + candidates.extend(["utf-8", "gb18030"]) + best_text = "" + best_errors = 10**9 + for candidate in candidates: + if not candidate: + continue + text = body.decode(candidate, errors="replace") + errors = text.count("\ufffd") + if errors < best_errors: + best_text = text + best_errors = errors + if errors == 0: + break + return best_text + + + def _merge_market_evaluation(self, idea: TopicIdea, payload: dict[str, Any]) -> dict[str, Any]: + evaluation = payload.get("evaluation") + base_evaluation = dict(evaluation) if isinstance(evaluation, dict) else {} + market_fit = self._build_market_fit_snapshot(idea, payload) + if market_fit: + base_evaluation.update(market_fit) + payload["evaluation"] = base_evaluation + market_score = self._market_fit_score_from_evaluation(base_evaluation) + if market_score is not None: + payload["score"] = max( + int(payload.get("score") or idea.score or 0), + max(0, min(100, int(round((payload.get("score") or idea.score or 0) * 0.7 + market_score * 0.3)))), + ) + return payload + + def _build_market_fit_snapshot( + self, + idea: TopicIdea, + payload: dict[str, Any], + ) -> dict[str, Any]: + settings = self.get_market_signal_settings() + signals = self._recent_market_signals(limit=200, lookback_days=settings.lookback_days) + if not signals: + return {} + genre = str(payload.get("genre") or idea.genre or "").strip() + market_tags = self._merge_unique( + list(idea.market_tags), + [str(item).strip() for item in payload.get("market_tags") or []], + ) + selling_points = self._merge_unique( + list(idea.selling_points), + [str(item).strip() for item in payload.get("selling_points") or []], + ) + tokens = self._market_fit_tokens(genre, market_tags, selling_points, idea.title, idea.logline, idea.premise) + if not tokens and not genre: + return {} + matched_sources: dict[str, float] = {} + matched_tags: list[str] = [] + comic_opportunities: list[str] = [] + total_weight = 0.0 + for signal in signals: + signal_text = " ".join( + [signal.title, signal.genre, signal.summary, " ".join(signal.tags or [])] + ) + token_matches = [token for token in tokens if token and token in signal_text] + genre_match = bool(genre and genre in signal_text) + if not token_matches and not genre_match: + continue + weight = self._source_weight_for_signal(signal, settings) + total_weight += weight + min(len(token_matches) * 0.25, 1.0) + matched_sources[signal.source] = matched_sources.get(signal.source, 0.0) + weight + for tag in signal.tags: + if tag in tokens and tag not in matched_tags: + matched_tags.append(tag) + if self._infer_market_signal_category(signal) == "comic": + comic_opportunities.extend( + self._comic_opportunities_for_signal(signal, genre or idea.genre or "当前题材") + ) + if total_weight <= 0: + return {} + fit_score = max(35, min(95, int(round(48 + total_weight * 12 + len(matched_tags) * 2)))) + fit_level = "高" if fit_score >= 78 else "中高" if fit_score >= 65 else "中" + return { + "市场匹配度": { + "score": fit_score, + "level": fit_level, + "window_days": settings.lookback_days, + "matched_tags": matched_tags[:6], + "matched_sources": self._top_weighted_sources(matched_sources), + }, + "平台权重摘要": self._top_weighted_sources(matched_sources), + "趋势窗口": f"近 {settings.lookback_days} 天", + "漫画转题机会": comic_opportunities[:3], + } + + def _recent_market_signals(self, limit: int, lookback_days: int) -> list[TopicMarketSignalDTO]: + signals = self._repository.list_market_signals(max(1, min(int(limit or 100), 500))) + if lookback_days <= 0: + return signals + cutoff = datetime.now(timezone.utc).timestamp() - lookback_days * 86400 + result = [] + for signal in signals: + dt = self._parse_signal_datetime(signal.created_at) + if dt and dt.timestamp() >= cutoff: + result.append(signal) + return result or signals + + def _record_market_signal_source_health( + self, + source: TopicMarketSignalSourceDTO, + signals: list[TopicMarketSignalDTO], + ) -> None: + saver = getattr(self._repository, "save_market_signal_source_health", None) + if not callable(saver): + return + now = datetime.now(timezone.utc).isoformat() + count = len(signals) + saver( + TopicMarketSignalSourceHealthDTO( + source_key=source.key, + source_name=source.name, + status="success" if count > 0 else "error", + last_run_at=now, + last_success_at=now if count > 0 else "", + last_count=count, + last_error="" if count > 0 else "No signals collected", + ) + ) + + def _source_health_for( + self, + source: TopicMarketSignalSourceDTO, + saved: TopicMarketSignalSourceHealthDTO | None, + settings: TopicMarketSignalAutomationSettingsDTO, + ) -> TopicMarketSignalSourceHealthDTO: + health = saved or TopicMarketSignalSourceHealthDTO( + source_key=source.key, + source_name=source.name, + ) + return TopicMarketSignalSourceHealthDTO( + source_key=source.key, + source_name=source.name, + status=str(health.status or "unknown"), + last_run_at=str(health.last_run_at or ""), + last_success_at=str(health.last_success_at or ""), + last_count=max(0, int(health.last_count or 0)), + last_error=str(health.last_error or ""), + next_run_at=self._next_run_at_for_source(source.key, settings), + ) + + @staticmethod + def _next_run_at_for_source( + source_key: str, + settings: TopicMarketSignalAutomationSettingsDTO, + ) -> str: + if not settings.enabled or source_key not in settings.selected_source_keys: + return "" + last_run_at = str(settings.last_run_at or "").strip() + if not last_run_at: + return "" + try: + last_dt = datetime.fromisoformat(last_run_at) + except ValueError: + return "" + if last_dt.tzinfo is None: + last_dt = last_dt.replace(tzinfo=timezone.utc) + next_dt = last_dt.astimezone(timezone.utc) + timedelta( + minutes=max(15, int(settings.interval_minutes or 180)) + ) + return next_dt.isoformat() + + def _normalize_market_signal_settings( + self, + settings: TopicMarketSignalAutomationSettingsDTO, + ) -> TopicMarketSignalAutomationSettingsDTO: + selected_source_keys = [ + key for key in settings.selected_source_keys + if key in MARKET_SIGNAL_SOURCES + ] or list(MARKET_SIGNAL_SOURCES.keys()) + source_weights = dict(DEFAULT_MARKET_SIGNAL_SOURCE_WEIGHTS) + for key, value in (settings.source_weights or {}).items(): + if key in MARKET_SIGNAL_SOURCES: + try: + source_weights[key] = max(0.1, min(float(value), 3.0)) + except (TypeError, ValueError): + continue + return TopicMarketSignalAutomationSettingsDTO( + enabled=bool(settings.enabled), + interval_minutes=max(15, min(int(settings.interval_minutes or 180), 24 * 60)), + limit_per_source=max(1, min(int(settings.limit_per_source or 8), 30)), + lookback_days=max(1, min(int(settings.lookback_days or 30), 90)), + source_weights=source_weights, + selected_source_keys=selected_source_keys, + last_run_at=str(settings.last_run_at or ""), + last_status=str(settings.last_status or "idle"), + last_error=str(settings.last_error or ""), + updated_at=str(settings.updated_at or ""), + ) + + @staticmethod + def _normalize_credential_headers(values: dict[str, Any]) -> dict[str, str]: + result: dict[str, str] = {} + for key, value in (values or {}).items(): + name = str(key or "").strip() + text = str(value or "").strip() + if name and text: + result[name] = text + return result + + @staticmethod + def _credential_status_for( + credentials: TopicMarketSignalSourceCredentialDTO, + ) -> TopicMarketSignalSourceCredentialStatusDTO: + headers = TopicIdeaService._normalize_credential_headers(credentials.headers) + return TopicMarketSignalSourceCredentialStatusDTO( + source_key=credentials.source_key, + api_key_configured=bool(str(credentials.api_key or "").strip()), + cookie_configured=bool(str(credentials.cookie or "").strip()), + endpoint_configured=bool(str(credentials.endpoint_url or "").strip()), + header_keys=sorted(headers.keys()), + updated_at=str(credentials.updated_at or ""), + ) + + @staticmethod + def _source_with_credentials( + source: TopicMarketSignalSourceDTO, + credentials: TopicMarketSignalSourceCredentialDTO | None, + ) -> TopicMarketSignalSourceDTO: + endpoint_url = str(credentials.endpoint_url or "").strip() if credentials else "" + if not endpoint_url: + return source + return TopicMarketSignalSourceDTO( + key=source.key, + name=source.name, + url=endpoint_url, + category=source.category, + source_type="api", + requires_auth=source.requires_auth, + ) + + @staticmethod + def _signal_date(signal: TopicMarketSignalDTO) -> str: + dt = TopicIdeaService._parse_signal_datetime(signal.created_at) + return dt.date().isoformat() if dt else "" + + @staticmethod + def _parse_signal_datetime(value: str) -> Optional[datetime]: + text = str(value or "").strip() + if not text: + return None + try: + dt = datetime.fromisoformat(text) + except ValueError: + return None + if dt.tzinfo is None: + return dt.replace(tzinfo=timezone.utc) + return dt.astimezone(timezone.utc) + + @staticmethod + def _source_key_for_signal_name(name: str) -> str: + for key, source in MARKET_SIGNAL_SOURCES.items(): + if source.name == name: + return key + return str(name or "").strip() + + def _source_weight_for_signal( + self, + signal: TopicMarketSignalDTO, + settings: TopicMarketSignalAutomationSettingsDTO, + ) -> float: + source_key = self._source_key_for_signal_name(signal.source) + return float(settings.source_weights.get(source_key, DEFAULT_MARKET_SIGNAL_SOURCE_WEIGHTS.get(source_key, 0.8))) + + @staticmethod + def _market_fit_tokens( + genre: str, + market_tags: list[str], + selling_points: list[str], + *texts: str, + ) -> list[str]: + tokens: list[str] = [] + for value in [genre, *market_tags, *selling_points]: + text = str(value or "").strip() + if len(text) >= 2 and text not in tokens: + tokens.append(text) + for text in texts: + for token in re.findall(r"[\u4e00-\u9fffA-Za-z]{2,8}", str(text or "")): + if len(token) >= 2 and token not in tokens: + tokens.append(token) + if len(tokens) >= 12: + return tokens + return tokens + + @staticmethod + def _comic_opportunities_for_signal(signal: TopicMarketSignalDTO, target_genre: str) -> list[str]: + opportunities: list[str] = [] + signal_text = " ".join( + [signal.title or "", signal.genre or "", signal.summary or "", " ".join(signal.tags or [])] + ) + for keywords, opportunity in ( + ( + ("总裁", "豪门", "霸总", "职场"), + f"可转译为{target_genre}里的总裁职场线:用权力差、职业目标和情感误判制造连续拉扯。", + ), + ( + ("错撩", "误会", "替身", "白月光"), + f"可转译为{target_genre}里的错撩误会钩子:让第一章关系误判直接触发利益冲突。", + ), + ( + ("重生", "穿越", "改命", "逆袭"), + f"可转译为{target_genre}里的重生改命线:把视觉爽点换成选择代价和阶段性翻盘。", + ), + ( + ("契约", "婚约", "先婚", "联姻"), + f"可转译为{target_genre}里的契约关系线:用外部绑定制造同盟、试探和背叛成本。", + ), + ( + ("萌宝", "团宠", "幼崽", "公主"), + f"可转译为{target_genre}里的亲缘守护线:用萌点入口承载身份秘密和阵营选择。", + ), + ): + if any(keyword in signal_text for keyword in keywords): + opportunities.append(opportunity) + for tag in signal.tags or []: + text = str(tag or "").strip() + if text and text not in {"漫画", "人气榜", "新作榜", "飙升榜", "畅销榜", "韩漫榜", "日漫榜", "恋爱榜", "剧情榜", "投稿榜", "完结榜", "免费榜", "等免榜", "月票榜", "漫画榜"}: + opportunities.append(f"可把漫画热词“{text}”转译成{target_genre}里的强关系或高代价主线。") + if signal.title: + opportunities.append(f"参考《{signal.title}》的视觉冲突感,改写成{target_genre}里的开篇爆点。") + return TopicIdeaService._merge_unique([], opportunities) + + @staticmethod + def _top_weighted_sources(values: dict[str, float]) -> list[str]: + return [ + f"{key}({round(value, 2)})" + for key, value in sorted(values.items(), key=lambda item: item[1], reverse=True)[:3] + ] + + @staticmethod + def _market_fit_score_from_evaluation(evaluation: dict[str, Any]) -> Optional[int]: + market_fit = evaluation.get("市场匹配度") if isinstance(evaluation, dict) else None + if not isinstance(market_fit, dict): + return None + score = market_fit.get("score") + try: + return max(0, min(100, int(score))) + except (TypeError, ValueError): + return None diff --git a/application/topic/services/topic_signal_automation_service.py b/application/topic/services/topic_signal_automation_service.py new file mode 100644 index 000000000..963d89af1 --- /dev/null +++ b/application/topic/services/topic_signal_automation_service.py @@ -0,0 +1,107 @@ +"""市场信号自动采集后台服务。""" +from __future__ import annotations + +import logging +import threading +from datetime import datetime, timezone + +from application.topic.dtos import TopicMarketSignalCollectRequestDTO + +logger = logging.getLogger(__name__) + + +class TopicSignalAutomationService: + """轻量后台线程:按配置定时抓取市场信号。""" + + def __init__(self, topic_service, poll_interval_seconds: int = 60): + self._topic_service = topic_service + self._poll_interval_seconds = max(10, int(poll_interval_seconds or 60)) + self._stop_event = threading.Event() + self._worker: threading.Thread | None = None + self._lock = threading.Lock() + + def start(self) -> None: + with self._lock: + if self._worker and self._worker.is_alive(): + return + self._stop_event.clear() + self._worker = threading.Thread( + target=self._worker_loop, + daemon=True, + name="topic-signal-automation", + ) + self._worker.start() + logger.info("TopicSignalAutomationService worker thread started") + + def stop(self) -> None: + with self._lock: + self._stop_event.set() + worker = self._worker + self._worker = None + if worker and worker.is_alive(): + worker.join(timeout=2.0) + + def run_pending_once(self, force: bool = False) -> bool: + settings = self._topic_service.get_market_signal_settings() + if not settings.enabled and not force: + return False + if not force and not self._is_due(settings): + return False + now = datetime.now(timezone.utc).isoformat() + try: + self._topic_service.collect_market_signals( + TopicMarketSignalCollectRequestDTO( + source_keys=settings.selected_source_keys, + limit_per_source=settings.limit_per_source, + ) + ) + self._topic_service.update_market_signal_settings( + { + "last_run_at": now, + "last_status": "success", + "last_error": "", + } + ) + return True + except Exception as exc: + logger.warning("topic signal automation run failed: %s", exc) + self._topic_service.update_market_signal_settings( + { + "last_run_at": now, + "last_status": "error", + "last_error": str(exc), + } + ) + return False + + def _worker_loop(self) -> None: + self.run_forever(stop_event=self._stop_event, run_immediately=False) + + def run_forever(self, stop_event: threading.Event | None = None, run_immediately: bool = True) -> None: + """按配置持续采集,供独立守护进程复用。""" + active_stop_event = stop_event or self._stop_event + if run_immediately: + try: + self.run_pending_once(force=False) + except Exception as exc: + logger.warning("topic signal automation loop failed: %s", exc) + + while not active_stop_event.wait(self._poll_interval_seconds): + try: + self.run_pending_once(force=False) + except Exception as exc: + logger.warning("topic signal automation loop failed: %s", exc) + + @staticmethod + def _is_due(settings) -> bool: + last_run_at = str(getattr(settings, "last_run_at", "") or "").strip() + if not last_run_at: + return True + try: + last_dt = datetime.fromisoformat(last_run_at) + except ValueError: + return True + if last_dt.tzinfo is None: + last_dt = last_dt.replace(tzinfo=timezone.utc) + delta_seconds = (datetime.now(timezone.utc) - last_dt.astimezone(timezone.utc)).total_seconds() + return delta_seconds >= max(15, int(getattr(settings, "interval_minutes", 180) or 180)) * 60 diff --git a/application/topic/services/topic_signal_collectors.py b/application/topic/services/topic_signal_collectors.py new file mode 100644 index 000000000..0b3189e50 --- /dev/null +++ b/application/topic/services/topic_signal_collectors.py @@ -0,0 +1,1211 @@ +"""市场信号采集器抽象。""" +from __future__ import annotations + +import json +import logging +import re +from dataclasses import replace +from datetime import datetime, timezone +from html import unescape +import inspect +from typing import Callable +from uuid import uuid4 + +from application.topic.dtos import ( + TopicMarketSignalDTO, + TopicMarketSignalSourceCredentialDTO, + TopicMarketSignalSourceDTO, +) + +logger = logging.getLogger(__name__) + +_FANQIE_TEXT_FONT_MAP = { + "58670": "0", "58413": "1", "58678": "2", "58371": "3", "58353": "4", "58480": "5", + "58359": "6", "58449": "7", "58540": "8", "58692": "9", "58712": "a", "58542": "b", + "58575": "c", "58626": "d", "58691": "e", "58561": "f", "58362": "g", "58619": "h", + "58430": "i", "58531": "j", "58588": "k", "58440": "l", "58681": "m", "58631": "n", + "58376": "o", "58429": "p", "58555": "q", "58498": "r", "58518": "s", "58453": "t", + "58397": "u", "58356": "v", "58435": "w", "58514": "x", "58482": "y", "58529": "z", + "58515": "A", "58688": "B", "58709": "C", "58344": "D", "58656": "E", "58381": "F", + "58576": "G", "58516": "H", "58463": "I", "58649": "J", "58571": "K", "58558": "L", + "58433": "M", "58517": "N", "58387": "O", "58687": "P", "58537": "Q", "58541": "R", + "58458": "S", "58390": "T", "58466": "U", "58386": "V", "58697": "W", "58519": "X", + "58511": "Y", "58634": "Z", "58611": "的", "58590": "一", "58398": "是", "58422": "了", + "58657": "我", "58666": "不", "58562": "人", "58345": "在", "58510": "他", "58496": "有", + "58654": "这", "58441": "个", "58493": "上", "58714": "们", "58618": "来", "58528": "到", + "58620": "时", "58403": "大", "58461": "地", "58481": "为", "58700": "子", "58708": "中", + "58503": "你", "58442": "说", "58639": "生", "58506": "国", "58663": "年", "58436": "着", + "58563": "就", "58391": "那", "58357": "和", "58354": "要", "58695": "她", "58372": "出", + "58696": "也", "58551": "得", "58445": "里", "58408": "后", "58599": "自", "58424": "以", + "58394": "会", "58348": "家", "58426": "可", "58673": "下", "58417": "而", "58556": "过", + "58603": "天", "58565": "去", "58604": "能", "58522": "对", "58632": "小", "58622": "多", + "58350": "然", "58605": "于", "58617": "心", "58401": "学", "58637": "么", "58684": "之", + "58382": "都", "58464": "好", "58487": "看", "58693": "起", "58608": "发", "58392": "当", + "58474": "没", "58601": "成", "58355": "只", "58573": "如", "58499": "事", "58469": "把", + "58361": "还", "58698": "用", "58489": "第", "58711": "样", "58457": "道", "58635": "想", + "58492": "作", "58647": "种", "58623": "开", "58521": "美", "58609": "总", "58530": "从", + "58665": "无", "58652": "情", "58676": "已", "58456": "面", "58581": "最", "58509": "女", + "58488": "但", "58363": "现", "58685": "前", "58396": "些", "58523": "所", "58471": "同", + "58485": "日", "58613": "手", "58533": "又", "58589": "行", "58527": "意", "58593": "动", + "58699": "方", "58707": "期", "58414": "它", "58596": "头", "58570": "经", "58660": "长", + "58364": "儿", "58526": "回", "58501": "位", "58638": "分", "58404": "爱", "58677": "老", + "58535": "因", "58629": "很", "58577": "给", "58606": "名", "58497": "法", "58662": "间", + "58479": "斯", "58532": "知", "58380": "世", "58385": "什", "58405": "两", "58644": "次", + "58578": "使", "58505": "身", "58564": "者", "58412": "被", "58686": "高", "58624": "已", + "58667": "亲", "58607": "其", "58616": "进", "58368": "此", "58427": "话", "58423": "常", + "58633": "与", "58525": "活", "58543": "正", "58418": "感", "58597": "见", "58683": "明", + "58507": "问", "58621": "力", "58703": "理", "58438": "尔", "58536": "点", "58384": "文", + "58484": "几", "58539": "定", "58554": "本", "58421": "公", "58347": "特", "58569": "做", + "58710": "外", "58574": "孩", "58375": "相", "58645": "西", "58592": "果", "58572": "走", + "58388": "将", "58370": "月", "58399": "十", "58651": "实", "58546": "向", "58504": "声", + "58419": "车", "58407": "全", "58672": "信", "58675": "重", "58538": "三", "58465": "机", + "58374": "工", "58579": "物", "58402": "气", "58702": "每", "58553": "并", "58360": "别", + "58389": "真", "58560": "打", "58690": "太", "58473": "新", "58512": "比", "58653": "才", + "58704": "便", "58545": "夫", "58641": "再", "58475": "书", "58583": "部", "58472": "水", + "58478": "像", "58664": "眼", "58586": "等", "58568": "体", "58674": "却", "58490": "加", + "58476": "电", "58346": "主", "58630": "界", "58595": "门", "58502": "利", "58713": "海", + "58587": "受", "58548": "听", "58351": "表", "58547": "徳", "58443": "少", "58460": "克", + "58636": "代", "58585": "员", "58625": "许", "58694": "稜", "58428": "先", "58640": "口", + "58628": "由", "58612": "死", "58446": "安", "58468": "写", "58410": "性", "58508": "马", + "58594": "光", "58483": "白", "58544": "或", "58495": "住", "58450": "难", "58643": "望", + "58486": "教", "58406": "命", "58447": "花", "58669": "结", "58415": "乐", "58444": "色", + "58549": "更", "58494": "拉", "58409": "东", "58658": "神", "58557": "记", "58602": "处", + "58559": "让", "58610": "母", "58513": "父", "58500": "应", "58378": "直", "58680": "字", + "58352": "场", "58383": "平", "58454": "报", "58671": "友", "58668": "关", "58452": "放", + "58627": "至", "58400": "张", "58455": "认", "58416": "接", "58552": "告", "58614": "入", + "58582": "笑", "58534": "内", "58701": "英", "58349": "军", "58491": "侯", "58467": "民", + "58365": "岁", "58598": "往", "58425": "何", "58462": "度", "58420": "山", "58661": "觉", + "58615": "路", "58648": "带", "58470": "万", "58377": "男", "58520": "边", "58646": "风", + "58600": "解", "58431": "叫", "58715": "任", "58524": "金", "58439": "快", "58566": "原", + "58477": "吃", "58642": "妈", "58437": "变", "58411": "通", "58451": "师", "58395": "立", + "58369": "象", "58706": "数", "58705": "四", "58379": "失", "58567": "满", "58373": "战", + "58448": "远", "58659": "格", "58434": "士", "58679": "音", "58432": "轻", "58689": "目", + "58591": "条", "58682": "呢", +} + + +class MarketSignalCollector: + """统一采集器接口。""" + + source_type = "" + + def collect( + self, + source: TopicMarketSignalSourceDTO, + fetch_text: Callable[[str], str], + limit: int, + credentials: TopicMarketSignalSourceCredentialDTO | None = None, + ) -> list[TopicMarketSignalDTO]: + raise NotImplementedError + + +class PublicPageMarketSignalCollector(MarketSignalCollector): + """公开页面抓取采集器。""" + + source_type = "public_page" + + def collect( + self, + source: TopicMarketSignalSourceDTO, + fetch_text: Callable[[str], str], + limit: int, + credentials: TopicMarketSignalSourceCredentialDTO | None = None, + ) -> list[TopicMarketSignalDTO]: + rank_urls = _rank_urls_for_source(source) + if rank_urls: + signals: list[TopicMarketSignalDTO] = [] + seen: set[tuple[str, str]] = set() + for rank_label, rank_url in rank_urls: + rank_source = replace(source, url=rank_url) + for signal in self._collect_single_url( + rank_source, + fetch_text, + limit, + credentials, + ): + signal = _signal_with_rank_label(signal, rank_label) + signal_key = (rank_label, signal.title or signal.summary) + if signal_key in seen: + continue + seen.add(signal_key) + signals.append(signal) + return signals + return self._collect_single_url(source, fetch_text, limit, credentials) + + def _collect_single_url( + self, + source: TopicMarketSignalSourceDTO, + fetch_text: Callable[[str], str], + limit: int, + credentials: TopicMarketSignalSourceCredentialDTO | None = None, + ) -> list[TopicMarketSignalDTO]: + try: + html = _fetch_with_optional_headers( + fetch_text, + source.url, + _headers_from_credentials(credentials), + ) + except Exception as exc: + logger.warning("market signal collection failed for %s: %s", source.key, exc) + return [] + if source.key == "qidian_rank": + signals = _signals_from_qidian_rank_html(source, html, limit) + if signals: + return signals + if source.key == "jjwxc_rank": + signals = _signals_from_jjwxc_rank_html(source, html, limit) + if signals: + return signals + if source.key == "qimao_rank": + signals = _signals_from_qimao_rank_html(source, html, limit) + if signals: + return signals + if source.key == "fanqie_rank": + signals = _signals_from_fanqie_rank_html(source, html, limit) + if signals: + return signals + if source.key == "tencent_comic_rank": + signals = _signals_from_tencent_comic_rank_html(source, html, limit) + if signals: + return signals + if source.key == "kuaikan_comic": + signals = _signals_from_kuaikan_comic_rank_html(source, html, limit) + if signals: + return signals + return _signals_from_html(source, html, limit) + + +class ApiMarketSignalCollector(MarketSignalCollector): + """API 采集器。""" + + source_type = "api" + + def collect( + self, + source: TopicMarketSignalSourceDTO, + fetch_text: Callable[[str], str], + limit: int, + credentials: TopicMarketSignalSourceCredentialDTO | None = None, + ) -> list[TopicMarketSignalDTO]: + if not _has_credentials(credentials): + logger.info("market signal api source %s is not configured yet", source.key) + return [] + rank_urls = _rank_urls_for_source(source) + if rank_urls: + signals: list[TopicMarketSignalDTO] = [] + seen: set[tuple[str, str]] = set() + for rank_label, rank_url in rank_urls: + rank_source = replace(source, url=rank_url) + for signal in self._collect_single_url( + rank_source, + fetch_text, + limit, + credentials, + ): + signal = _signal_with_rank_label(signal, rank_label) + signal_key = (rank_label, signal.title or signal.summary) + if signal_key in seen: + continue + seen.add(signal_key) + signals.append(signal) + return signals + + return self._collect_single_url(source, fetch_text, limit, credentials) + + def _collect_single_url( + self, + source: TopicMarketSignalSourceDTO, + fetch_text: Callable[[str], str], + limit: int, + credentials: TopicMarketSignalSourceCredentialDTO | None = None, + ) -> list[TopicMarketSignalDTO]: + try: + text = _fetch_with_optional_headers( + fetch_text, + source.url, + _headers_from_credentials(credentials), + ) + except Exception as exc: + logger.warning("market signal api collection failed for %s: %s", source.key, exc) + return [] + signals = _signals_from_api_json(source, text, limit) + if signals: + return signals + return _signals_from_html(source, text, limit) + + +class AuthenticatedSourceMarketSignalCollector(MarketSignalCollector): + """未来登录态采集器占位。""" + + source_type = "authenticated_source" + + def collect( + self, + source: TopicMarketSignalSourceDTO, + fetch_text: Callable[[str], str], + limit: int, + credentials: TopicMarketSignalSourceCredentialDTO | None = None, + ) -> list[TopicMarketSignalDTO]: + if not _has_credentials(credentials): + logger.info("market signal authenticated source %s is not configured yet", source.key) + return [] + try: + text = _fetch_with_optional_headers( + fetch_text, + source.url, + _headers_from_credentials(credentials), + ) + except Exception as exc: + logger.warning("market signal authenticated collection failed for %s: %s", source.key, exc) + return [] + return _signals_from_html(source, text, limit) + + +def build_market_signal_collectors() -> dict[str, MarketSignalCollector]: + return { + PublicPageMarketSignalCollector.source_type: PublicPageMarketSignalCollector(), + ApiMarketSignalCollector.source_type: ApiMarketSignalCollector(), + AuthenticatedSourceMarketSignalCollector.source_type: AuthenticatedSourceMarketSignalCollector(), + } + + +def collect_market_signals_from_source( + source: TopicMarketSignalSourceDTO, + fetch_text: Callable[[str], str], + limit: int, + collectors: dict[str, MarketSignalCollector], + credentials: TopicMarketSignalSourceCredentialDTO | None = None, +) -> list[TopicMarketSignalDTO]: + collector = collectors.get(str(source.source_type or "").strip()) + if collector is None: + logger.info( + "market signal source %s skipped: unsupported source_type=%s", + source.key, + source.source_type, + ) + return [] + if source.requires_auth and not _has_credentials(credentials): + logger.info("market signal source %s skipped: requires auth", source.key) + return [] + return collector.collect( + source=source, + fetch_text=fetch_text, + limit=limit, + credentials=credentials, + ) + + +def _has_credentials(credentials: TopicMarketSignalSourceCredentialDTO | None) -> bool: + return bool( + credentials + and ( + str(credentials.api_key or "").strip() + or str(credentials.cookie or "").strip() + or str(credentials.endpoint_url or "").strip() + or credentials.headers + ) + ) + + +def _rank_urls_for_source(source: TopicMarketSignalSourceDTO) -> list[tuple[str, str]]: + return [ + (str(label).strip(), str(url).strip()) + for label, url in (source.rank_urls or {}).items() + if str(label).strip() and str(url).strip() + ] + + +def _signal_with_rank_label(signal: TopicMarketSignalDTO, rank_label: str) -> TopicMarketSignalDTO: + tags = list(signal.tags or []) + if rank_label and rank_label not in tags: + tags.append(rank_label) + summary = str(signal.summary or "") + if rank_label and rank_label not in summary: + summary = f"{rank_label}:{summary}" if summary else rank_label + return TopicMarketSignalDTO( + id=signal.id, + source=signal.source, + title=signal.title, + genre=signal.genre, + tags=tags[:5], + summary=summary, + raw_text=signal.raw_text, + created_at=signal.created_at, + ) + + +def _headers_from_credentials( + credentials: TopicMarketSignalSourceCredentialDTO | None, +) -> dict[str, str]: + headers = { + str(key).strip(): str(value).strip() + for key, value in ((credentials.headers if credentials else {}) or {}).items() + if str(key).strip() and str(value).strip() + } + if credentials and credentials.api_key and "Authorization" not in headers: + headers["Authorization"] = f"Bearer {credentials.api_key}" + if credentials and credentials.cookie and "Cookie" not in headers: + headers["Cookie"] = credentials.cookie + return headers + + +def _fetch_with_optional_headers( + fetch_text: Callable[[str], str], + url: str, + headers: dict[str, str], +) -> str: + try: + parameters = inspect.signature(fetch_text).parameters + accepts_headers = any( + parameter.kind == inspect.Parameter.VAR_POSITIONAL + for parameter in parameters.values() + ) or len(parameters) >= 2 + except (TypeError, ValueError): + accepts_headers = False + if accepts_headers: + return fetch_text(url, headers) + return fetch_text(url) + + +def _signals_from_api_json( + source: TopicMarketSignalSourceDTO, + payload: str, + limit: int, +) -> list[TopicMarketSignalDTO]: + try: + data = json.loads(payload or "") + except (TypeError, json.JSONDecodeError): + return [] + rank_name = _json_first_text_recursive(data, ("rankName", "rank_name", "榜单", "榜名")) + candidates = _json_candidate_items(data) + signals: list[TopicMarketSignalDTO] = [] + seen: set[str] = set() + for item in candidates: + if not isinstance(item, dict): + continue + title = _json_first_text( + item, + ("title", "bookName", "book_name", "name", "comicName", "comic_name"), + ) + if not _is_signal_title(title) or title in seen: + continue + seen.add(title) + genre = _json_genre(item) + tags = _json_tags(item, rank_name) + summary = _json_summary(source, title, item, rank_name, len(signals) + 1) + signals.append( + TopicMarketSignalDTO( + id=f"signal-{uuid4().hex}", + source=source.name, + title=title, + genre=genre or ("漫画" if source.category == "comic" else ""), + tags=tags, + summary=summary, + raw_text=json.dumps(item, ensure_ascii=False), + created_at=datetime.now(timezone.utc).isoformat(), + ) + ) + if len(signals) >= limit: + break + return signals + + +def _json_candidate_items(value: object) -> list[dict[str, object]]: + if isinstance(value, list): + return [item for item in value if isinstance(item, dict)] + if not isinstance(value, dict): + return [] + for key in ("books", "items", "list", "records", "data", "result", "rankList"): + nested = value.get(key) + if isinstance(nested, list): + return [item for item in nested if isinstance(item, dict)] + if isinstance(nested, dict): + items = _json_candidate_items(nested) + if items: + return items + for nested in value.values(): + items = _json_candidate_items(nested) + if items: + return items + return [] + + +def _json_first_text(value: object, keys: tuple[str, ...]) -> str: + if not isinstance(value, dict): + return "" + for key in keys: + text = str(value.get(key) or "").strip() + if text: + return re.sub(r"\s+", " ", text) + return "" + + +def _json_first_text_recursive(value: object, keys: tuple[str, ...]) -> str: + text = _json_first_text(value, keys) + if text: + return text + if isinstance(value, dict): + for nested in value.values(): + text = _json_first_text_recursive(nested, keys) + if text: + return text + if isinstance(value, list): + for nested in value: + text = _json_first_text_recursive(nested, keys) + if text: + return text + return "" + + +def _json_tags(item: dict[str, object], rank_name: str) -> list[str]: + tags: list[str] = [] + for key in ("categoryName", "category_name", "tags", "tag", "keywords", "labels", "categories"): + value = item.get(key) + if isinstance(value, list): + candidates = value + else: + candidates = re.split(r"[,,/、\s]+", str(value or "")) + for candidate in candidates: + text = _json_tag_text(candidate) + if text and text not in {"小说", "漫画"} and text not in tags: + tags.append(text) + if rank_name and rank_name not in tags: + tags.append(rank_name) + return tags[:5] + + +def _json_genre(item: dict[str, object]) -> str: + genre = _json_first_text( + item, + ("category", "categoryName", "category_name", "genre", "type", "className", "class_name"), + ) + if genre and not genre.isdigit(): + return genre + categories = item.get("categories") + if isinstance(categories, list): + names = [ + _json_tag_text(category) + for category in categories + if isinstance(category, dict) + ] + names = [name for name in names if name and name not in {"小说", "漫画"}] + return names[0] if names else "" + return "" + + +def _json_tag_text(value: object) -> str: + if isinstance(value, dict): + return _json_first_text(value, ("name", "shortName", "short_name", "title", "label")) + return str(value or "").strip() + + +def _json_summary( + source: TopicMarketSignalSourceDTO, + title: str, + item: dict[str, object], + rank_name: str, + fallback_rank: int, +) -> str: + rank = _json_first_text(item, ("rank", "ranking", "rankNo", "rank_no")) or str(fallback_rank) + intro = _json_first_text(item, ("intro", "description", "summary", "desc")) + heat = _json_first_text(item, ("heat", "hot", "popularity", "score", "metric")) + prefix = f"{source.name} {rank_name}第{rank}名:{title}" if rank_name else f"{source.name} API 信号:{title}" + parts = [prefix] + if heat: + parts.append(heat) + if intro: + parts.append(intro) + return ";".join(parts)[:240] + + +def _signals_from_html( + source: TopicMarketSignalSourceDTO, + html: str, + limit: int, +) -> list[TopicMarketSignalDTO]: + text = re.sub(r"<(script|style).*?", " ", html or "", flags=re.I | re.S) + candidates = re.findall(r"]*>(.*?)", text, flags=re.I | re.S) + if not candidates: + candidates = re.findall(r'title=["\']([^"\']{2,60})["\']', text, flags=re.I) + plain = unescape(re.sub(r"<[^>]+>", "\n", text)) + lines = [line.strip() for line in plain.splitlines() if line.strip()] + if not candidates: + candidates = lines + + seen = set() + signals: list[TopicMarketSignalDTO] = [] + for candidate in candidates: + title = unescape(re.sub(r"<[^>]+>", "", candidate)).strip() + title = re.sub(r"\s+", " ", title) + if not _is_signal_title(title) or title in seen: + continue + seen.add(title) + summary = _summary_for_collected_title(title, lines) + signals.append( + TopicMarketSignalDTO( + id=f"signal-{uuid4().hex}", + source=source.name, + title=title, + genre="漫画" if source.category == "comic" else "", + tags=["漫画"] if source.category == "comic" else [], + summary=summary or f"{source.name} 公开页面出现:{title}", + raw_text=title, + created_at=datetime.now(timezone.utc).isoformat(), + ) + ) + if len(signals) >= limit: + break + return signals + + +def _signals_from_qidian_rank_html( + source: TopicMarketSignalSourceDTO, + html: str, + limit: int, +) -> list[TopicMarketSignalDTO]: + blocks = re.findall( + r']+class=["\'][^"\']*book-mid-info[^"\']*["\'][^>]*>(.*?)', + html or "", + flags=re.I | re.S, + ) + signals: list[TopicMarketSignalDTO] = [] + seen: set[str] = set() + for block in blocks: + title = _first_link_text(block, r"]*>(.*?)") + if not _is_signal_title(title) or title in seen: + continue + seen.add(title) + metadata = _qidian_metadata(block) + summary = _qidian_intro(block) or f"{source.name} 公开页面出现:{title}" + signals.append( + TopicMarketSignalDTO( + id=f"signal-{uuid4().hex}", + source=source.name, + title=title, + genre=metadata[0] if metadata else "", + tags=metadata[:2], + summary=summary, + raw_text=title, + created_at=datetime.now(timezone.utc).isoformat(), + ) + ) + if len(signals) >= limit: + break + if signals: + return signals + return _signals_from_qidian_mobile_rank_html(source, html, limit) + + +def _signals_from_qidian_mobile_rank_html( + source: TopicMarketSignalSourceDTO, + html: str, + limit: int, +) -> list[TopicMarketSignalDTO]: + item_matches = list( + re.finditer( + r']+href=["\'][^"\']*m\.qidian\.com/book/\d+/?[^"\']*["\'][^>]*>.*?', + html or "", + flags=re.I | re.S, + ) + ) + signals: list[TopicMarketSignalDTO] = [] + seen: set[str] = set() + for item_match in item_matches: + block = item_match.group(0) + title_match = re.search(r"]*>(.*?)", block, flags=re.I | re.S) + title = _clean_html_text(title_match.group(1)) if title_match else "" + if not _is_signal_title(title) or title in seen: + continue + seen.add(title) + rank_name = _nearest_qidian_mobile_rank_title(html or "", item_match.start()) or "起点榜单" + rank = _qidian_mobile_rank_text(block) or str(len(signals) + 1) + metadata = _qidian_mobile_metadata(block) + tags = ([metadata[0]] if metadata else []) + [rank_name] + signals.append( + TopicMarketSignalDTO( + id=f"signal-{uuid4().hex}", + source=source.name, + title=title, + genre=metadata[0] if metadata else rank_name, + tags=tags[:3], + summary=f"{source.name} {rank_name}第{rank}名:{title}", + raw_text=title, + created_at=datetime.now(timezone.utc).isoformat(), + ) + ) + if len(signals) >= limit: + break + return signals + + +def _signals_from_jjwxc_rank_html( + source: TopicMarketSignalSourceDTO, + html: str, + limit: int, +) -> list[TopicMarketSignalDTO]: + rank_name = _jjwxc_rank_name(html) or "晋江榜单" + signals: list[TopicMarketSignalDTO] = [] + seen: set[str] = set() + for link_match in re.finditer( + r']+href=["\'][^"\']*/book2/\d+[^"\']*["\'][^>]*>(.*?)', + html or "", + flags=re.I | re.S, + ): + title = _clean_html_text(link_match.group(1)) + if not _is_signal_title(title) or title in seen: + continue + seen.add(title) + rank = len(signals) + 1 + signals.append( + TopicMarketSignalDTO( + id=f"signal-{uuid4().hex}", + source=source.name, + title=title, + genre=rank_name, + tags=["晋江", rank_name], + summary=f"{source.name} {rank_name}第{rank}名:{title}", + raw_text=title, + created_at=datetime.now(timezone.utc).isoformat(), + ) + ) + if len(signals) >= limit: + break + return signals + + +def _signals_from_qimao_rank_html( + source: TopicMarketSignalSourceDTO, + html: str, + limit: int, +) -> list[TopicMarketSignalDTO]: + rank_name = _qimao_rank_name(html) or "七猫榜单" + item_matches = re.finditer( + r']+class=["\'][^"\']*rank-list-item[^"\']*["\'][^>]*>.*?', + html or "", + flags=re.I | re.S, + ) + signals: list[TopicMarketSignalDTO] = [] + seen: set[str] = set() + for item_match in item_matches: + block = item_match.group(0) + title = _qimao_title(block) + if not _is_signal_title(title) or title in seen: + continue + seen.add(title) + tags = _qimao_tags(block) + rank = _qimao_rank_text(block) or str(len(signals) + 1) + metric = _qimao_metric(block) + intro = _qimao_intro(block) + summary_parts = [f"{source.name} {rank_name}第{rank}名:{title}"] + if metric: + summary_parts.append(metric) + if intro: + summary_parts.append(intro) + signals.append( + TopicMarketSignalDTO( + id=f"signal-{uuid4().hex}", + source=source.name, + title=title, + genre=tags[0] if tags else rank_name, + tags=(tags + [rank_name])[:3], + summary=";".join(summary_parts), + raw_text=title, + created_at=datetime.now(timezone.utc).isoformat(), + ) + ) + if len(signals) >= limit: + break + return signals + + +def _signals_from_fanqie_rank_html( + source: TopicMarketSignalSourceDTO, + html: str, + limit: int, +) -> list[TopicMarketSignalDTO]: + card_starts = list( + re.finditer( + r'<(?:div|li|article)[^>]+class=["\'][^"\']*(?:muye-rank-book-item|rank-book-item|book-card|book-item)[^"\']*["\'][^>]*>', + html or "", + flags=re.I | re.S, + ) + ) + blocks = [ + (html or "")[ + match.start():card_starts[index + 1].start() + if index + 1 < len(card_starts) + else min(len(html or ""), match.start() + 2500) + ] + for index, match in enumerate(card_starts) + ] + if not blocks: + blocks = [ + (html or "")[max(0, match.start() - 500):min(len(html or ""), match.end() + 1200)] + for match in re.finditer( + r']+href=["\'][^"\']*/page/\d+[^"\']*["\'][^>]*>.*?', + html or "", + flags=re.I | re.S, + ) + ] + + signals: list[TopicMarketSignalDTO] = [] + seen: set[str] = set() + for block in blocks: + title = _fanqie_title(block) + if not _is_signal_title(title) or title in seen: + continue + seen.add(title) + tags = _fanqie_tags(block, title) + summary = _fanqie_summary(block) or f"{source.name} 公开页面出现:{title}" + signals.append( + TopicMarketSignalDTO( + id=f"signal-{uuid4().hex}", + source=source.name, + title=title, + genre=tags[0] if tags else "", + tags=tags[:3], + summary=summary, + raw_text=title, + created_at=datetime.now(timezone.utc).isoformat(), + ) + ) + if len(signals) >= limit: + break + return signals + + +def _signals_from_tencent_comic_rank_html( + source: TopicMarketSignalSourceDTO, + html: str, + limit: int, +) -> list[TopicMarketSignalDTO]: + item_matches = re.finditer(r"]*>.*?", html or "", flags=re.I | re.S) + signals: list[TopicMarketSignalDTO] = [] + seen: set[str] = set() + for item_match in item_matches: + block = item_match.group(0) + if "mod-rank-name" not in block: + continue + link_match = re.search( + r'(]+class=["\'][^"\']*mod-rank-name[^"\']*["\'][^>]*>)(.*?)', + block, + flags=re.I | re.S, + ) + if not link_match: + continue + title_attr = re.search(r'title=["\']([^"\']+)["\']', link_match.group(1), flags=re.I) + title = _clean_html_text(title_attr.group(1) if title_attr else link_match.group(2)) + if not _is_signal_title(title) or title in seen: + continue + seen.add(title) + rank = _tencent_rank_text(block) + rank_name = _nearest_tencent_rank_title(html or "", item_match.start()) or "漫画榜" + metric = _tencent_rank_metric(block) + summary = f"{source.name} {rank_name}第{rank}名:{title}" + if metric: + summary = f"{summary},{metric}" + signals.append( + TopicMarketSignalDTO( + id=f"signal-{uuid4().hex}", + source=source.name, + title=title, + genre="漫画", + tags=["漫画", rank_name], + summary=summary, + raw_text=title, + created_at=datetime.now(timezone.utc).isoformat(), + ) + ) + if len(signals) >= limit: + break + return signals + + +def _signals_from_kuaikan_comic_rank_html( + source: TopicMarketSignalSourceDTO, + html: str, + limit: int, +) -> list[TopicMarketSignalDTO]: + item_starts = list( + re.finditer( + r']+class=["\'][^"\']*listItem[^"\']*["\'][^>]*>', + html or "", + flags=re.I | re.S, + ) + ) + if not item_starts: + item_starts = list( + re.finditer( + r']+class=["\'][^"\']*IdItems[^"\']*["\'][^>]*>', + html or "", + flags=re.I | re.S, + ) + ) + blocks = [ + (html or "")[ + match.start():item_starts[index + 1].start() + if index + 1 < len(item_starts) + else min(len(html or ""), match.start() + 3500) + ] + for index, match in enumerate(item_starts) + ] + + signals: list[TopicMarketSignalDTO] = [] + seen: set[str] = set() + for index, block in enumerate(blocks): + title = _kuaikan_title(block) + if not _is_signal_title(title) or title in seen: + continue + seen.add(title) + rank_name = _nearest_kuaikan_rank_title(html or "", item_starts[index].start()) or "快看榜单" + rank = _kuaikan_rank_text(block) or str(index + 1) + author = _kuaikan_author(block) + description = _kuaikan_description(block) + update = _kuaikan_update(block) + summary_parts = [f"{source.name} {rank_name}第{rank}名:{title}"] + if author: + summary_parts.append(author) + if description: + summary_parts.append(description) + if update: + summary_parts.append(update) + signals.append( + TopicMarketSignalDTO( + id=f"signal-{uuid4().hex}", + source=source.name, + title=title, + genre="漫画", + tags=["漫画", rank_name], + summary=";".join(summary_parts), + raw_text=title, + created_at=datetime.now(timezone.utc).isoformat(), + ) + ) + if len(signals) >= limit: + break + return signals + + +def _first_link_text(block: str, container_pattern: str) -> str: + container_match = re.search(container_pattern, block or "", flags=re.I | re.S) + text = container_match.group(1) if container_match else block + link_match = re.search(r"]*>(.*?)", text, flags=re.I | re.S) + if link_match: + text = link_match.group(1) + return _clean_html_text(text) + + +def _jjwxc_rank_name(html: str) -> str: + channel_match = re.search( + r']+class=["\'][^"\']*channellink[^"\']*["\'][^>]*>(.*?)', + html or "", + flags=re.I | re.S, + ) + if channel_match: + return _normalize_rank_name(channel_match.group(1)) + heading_match = re.search(r"]*>(.*?)", html or "", flags=re.I | re.S) + return _normalize_rank_name(heading_match.group(1)) if heading_match else "" + + +def _normalize_rank_name(value: str) -> str: + name = _clean_html_text(value) + name = re.sub(r">>+$", "", name).strip() + return name[:20] + + +def _qimao_rank_name(html: str) -> str: + heading_match = re.search( + r']+class=["\'][^"\']*header-txt[^"\']*["\'][^>]*>(.*?)', + html or "", + flags=re.I | re.S, + ) + return _clean_html_text(heading_match.group(1)) if heading_match else "" + + +def _qimao_title(block: str) -> str: + title_match = re.search( + r']+class=["\'][^"\']*s-book-title[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + return _clean_html_text(title_match.group(1)) if title_match else "" + + +def _qimao_tags(block: str) -> list[str]: + info_match = re.search( + r']+class=["\'][^"\']*s-book-info[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + if not info_match: + return [] + link_texts = [ + _clean_html_text(item) + for item in re.findall(r"]*>(.*?)", info_match.group(1), flags=re.I | re.S) + ] + tags = [item for item in link_texts[1:] if item] + return tags[:2] + + +def _qimao_rank_text(block: str) -> str: + rank_match = re.search( + r']+class=["\'][^"\']*rank-number[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + return _clean_html_text(rank_match.group(1)) if rank_match else "" + + +def _qimao_intro(block: str) -> str: + intro_match = re.search( + r']+class=["\'][^"\']*s-book-intro[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + return _clean_html_text(intro_match.group(1)) if intro_match else "" + + +def _qimao_metric(block: str) -> str: + metric_match = re.search( + r']+class=["\'][^"\']*rank-change-num[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + return _clean_html_text(metric_match.group(1)) if metric_match else "" + + +def _fanqie_title(block: str) -> str: + title_match = re.search( + r'<[^>]+class=["\'][^"\']*(?:book-name|book-title|title)[^"\']*["\'][^>]*>(.*?)]+>', + block or "", + flags=re.I | re.S, + ) + if title_match: + return _decode_fanqie_text(_clean_html_text(title_match.group(1))) + page_link_match = re.search( + r']+href=["\'][^"\']*/page/\d+[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + return _decode_fanqie_text(_clean_html_text(page_link_match.group(1))) if page_link_match else "" + + +def _fanqie_tags(block: str, title: str) -> list[str]: + tag_container = re.search( + r'<(?Pdiv|p)[^>]+class=["\'][^"\']*(?:tag|category|label)[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + if not tag_container: + return [] + tag_html = tag_container.group(2) + raw_items = re.findall(r"<(?:span|a)[^>]*>(.*?)", tag_html, flags=re.I | re.S) + stop_words = {"连载", "连载中", "完结", "已完结", "作者", title} + tags: list[str] = [] + for raw_item in raw_items: + item = _decode_fanqie_text(_clean_html_text(raw_item)) + if not item or item in stop_words or item in tags: + continue + if len(item) > 12: + continue + tags.append(item) + return tags[:3] + + +def _fanqie_summary(block: str) -> str: + summary_match = re.search( + r'<(?Pdiv|p)[^>]+class=["\'][^"\']*(?:book-desc|desc|intro|abstract)[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + return _decode_fanqie_text(_clean_html_text(summary_match.group(2))) if summary_match else "" + + +def _decode_fanqie_text(text: str) -> str: + if not text: + return "" + return "".join(_FANQIE_TEXT_FONT_MAP.get(str(ord(char)), char) for char in text) + + +def _tencent_rank_text(block: str) -> str: + rank_match = re.search(r"]*>(.*?)", block or "", flags=re.I | re.S) + rank = _clean_html_text(rank_match.group(1)) if rank_match else "" + return rank or "?" + + +def _nearest_tencent_rank_title(html: str, offset: int) -> str: + headings = list( + re.finditer( + r']+class=["\'][^"\']*ran-rank-title[^"\']*["\'][^>]*>(.*?)', + html[:offset], + flags=re.I | re.S, + ) + ) + if not headings: + return "" + return _clean_html_text(headings[-1].group(1)) + + +def _tencent_rank_metric(block: str) -> str: + metric_match = re.search( + r']+class=["\'][^"\']*mod-rank-num[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + return _clean_html_text(metric_match.group(1)) if metric_match else "" + + +def _nearest_kuaikan_rank_title(html: str, offset: int) -> str: + active_nav_match = re.search( + r']+title=["\']([^"\']+)["\'][^>]+class=["\'][^"\']*active[^"\']*["\']', + html or "", + flags=re.I | re.S, + ) + if active_nav_match: + return _clean_html_text(active_nav_match.group(1)) + headings = list( + re.finditer( + r']+class=["\'][^"\']*title[^"\']*["\'][^>]*>(.*?)', + html[:offset], + flags=re.I | re.S, + ) + ) + if not headings: + return "" + name = _clean_html_text(headings[-1].group(1)) + return name.replace("更多", "").strip()[:20] + + +def _kuaikan_title(block: str) -> str: + span_texts = [ + _clean_html_text(item) + for item in re.findall(r"]*>([^<>]+)", block or "", flags=re.I | re.S) + ] + for text in span_texts: + if text and not text.isdigit() and _is_signal_title(text): + return text + normal_match = re.search( + r']+class=["\'][^"\']*title[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + if normal_match: + return _clean_html_text(normal_match.group(1)) + hover_match = re.search( + r']+class=["\'][^"\']*title[^"\']*["\'][^>]*>.*?]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + return _clean_html_text(hover_match.group(1)) if hover_match else "" + + +def _kuaikan_rank_text(block: str) -> str: + rank_match = re.search( + r']+class=["\'][^"\']*(?:top|iconText)[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + return _clean_html_text(rank_match.group(1)) if rank_match else "" + + +def _kuaikan_author(block: str) -> str: + author_match = re.search( + r'<(?:span|p|div)[^>]+class=["\'][^"\']*author[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + return _clean_html_text(author_match.group(1)) if author_match else "" + + +def _kuaikan_description(block: str) -> str: + description_match = re.search( + r'<(?:p|div)[^>]+class=["\'][^"\']*(?:description|desc)[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + return _clean_html_text(description_match.group(1)) if description_match else "" + + +def _kuaikan_update(block: str) -> str: + update_match = re.search( + r']+class=["\'][^"\']*update[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + return _clean_html_text(update_match.group(1)) if update_match else "" + + +def _qidian_metadata(block: str) -> list[str]: + author_match = re.search( + r']+class=["\'][^"\']*author[^"\']*["\'][^>]*>(.*?)

', + block or "", + flags=re.I | re.S, + ) + if not author_match: + return [] + link_texts = [ + _clean_html_text(item) + for item in re.findall(r"]*>(.*?)", author_match.group(1), flags=re.I | re.S) + ] + metadata = [item for item in link_texts[1:] if item] + return metadata[:2] + + +def _qidian_intro(block: str) -> str: + intro_match = re.search( + r']+class=["\'][^"\']*intro[^"\']*["\'][^>]*>(.*?)

', + block or "", + flags=re.I | re.S, + ) + return _clean_html_text(intro_match.group(1)) if intro_match else "" + + +def _nearest_qidian_mobile_rank_title(html: str, offset: int) -> str: + headings = list( + re.finditer( + r']+class=["\'][^"\']*_rankTitle_[^"\']*["\'][^>]*>.*?]*>(.*?)', + html[:offset], + flags=re.I | re.S, + ) + ) + if not headings: + return "" + return _clean_html_text(headings[-1].group(1)) + + +def _qidian_mobile_rank_text(block: str) -> str: + rank_match = re.search( + r']+class=["\'][^"\']*_ranking_[^"\']*["\'][^>]*>(.*?)', + block or "", + flags=re.I | re.S, + ) + return _clean_html_text(rank_match.group(1)) if rank_match else "" + + +def _qidian_mobile_metadata(block: str) -> list[str]: + subtitle_match = re.search(r"]*>(.*?)

", block or "", flags=re.I | re.S) + subtitle = _clean_html_text(subtitle_match.group(1)) if subtitle_match else "" + parts = [part.strip() for part in subtitle.split("·") if part.strip()] + metadata = [part for part in parts[1:] if not re.search(r"\d", part)] + return metadata[:2] + + +def _clean_html_text(value: str) -> str: + text = unescape(re.sub(r"<[^>]+>", "", value or "")) + return re.sub(r"\s+", " ", text).strip() + + +def _is_signal_title(title: str) -> bool: + if not title or len(title) < 2 or len(title) > 40: + return False + noise = ["排行榜", "登录", "注册", "更多", "首页", "客户端", "App", "APP"] + return not any(word in title for word in noise) + + +def _summary_for_collected_title(title: str, lines: list[str]) -> str: + for index, line in enumerate(lines): + if title in line: + context = [line] + context.extend(lines[index + 1:index + 3]) + return ";".join(item for item in context if item)[:240] + return "" diff --git a/application/topic/services/topic_signal_sources.py b/application/topic/services/topic_signal_sources.py new file mode 100644 index 000000000..b30f197ec --- /dev/null +++ b/application/topic/services/topic_signal_sources.py @@ -0,0 +1,109 @@ +"""市场信号来源配置。""" +from __future__ import annotations + +from application.topic.dtos import TopicMarketSignalSourceDTO + + +MARKET_SIGNAL_SOURCES = { + "qidian_rank": TopicMarketSignalSourceDTO( + key="qidian_rank", + name="起点-小说榜", + url="https://m.qidian.com/rank", + category="novel", + source_type="public_page", + requires_auth=False, + rank_urls={ + "热门榜": "https://m.qidian.com/rank/", + "新书榜": "https://m.qidian.com/rank/newbook/", + "快速上榜": "https://m.qidian.com/rank/sign/", + }, + ), + "jjwxc_rank": TopicMarketSignalSourceDTO( + key="jjwxc_rank", + name="晋江-小说榜", + url="https://m.jjwxc.net/rank", + category="novel", + source_type="public_page", + requires_auth=False, + rank_urls={ + "热门榜": "https://m.jjwxc.net/rank/naturalmore/64", + "新书榜": "https://m.jjwxc.net/rank/naturalmore/29", + "快速上榜": "https://m.jjwxc.net/rank/naturalmore/36", + }, + ), + "qimao_rank": TopicMarketSignalSourceDTO( + key="qimao_rank", + name="七猫-小说榜", + url="https://www.qimao.com/paihang", + category="novel", + source_type="public_page", + requires_auth=False, + rank_urls={ + "热门榜": "https://www.qimao.com/paihang/boy/hot/date/", + "新书榜": "https://www.qimao.com/paihang/boy/new/date/", + "快速上榜": "https://www.qimao.com/paihang/boy/leap/date/", + }, + ), + "fanqie_rank": TopicMarketSignalSourceDTO( + key="fanqie_rank", + name="番茄-小说榜", + url="https://fanqienovel.com/rank", + category="novel", + source_type="public_page", + requires_auth=False, + rank_urls={ + "热门榜": "https://fanqienovel.com/rank/1_2_1141", + "新书榜": "https://fanqienovel.com/rank/1_1_1141", + "快速上榜": "https://fanqienovel.com/rank/1_2_1140", + }, + ), + "qq_read": TopicMarketSignalSourceDTO( + key="qq_read", + name="腾讯-QQ阅读", + url="https://ubook.reader.qq.com/api/book/rank?columnId=535193&pageIndex=1&pageSize=20", + category="novel", + source_type="api", + requires_auth=False, + rank_urls={ + "热门榜": "https://ubook.reader.qq.com/api/book/rank?columnId=535193&pageIndex=1&pageSize=20", + "新书榜": "https://ubook.reader.qq.com/api/book/rank?columnId=535194&pageIndex=1&pageSize=20", + "快速上榜": "https://ubook.reader.qq.com/api/book/rank?columnId=535189&pageIndex=1&pageSize=20", + }, + ), + "tencent_comic_rank": TopicMarketSignalSourceDTO( + key="tencent_comic_rank", + name="腾讯动漫-漫画榜", + url="https://ac.qq.com/Rank/comicRank/type/pgv", + category="comic", + source_type="public_page", + requires_auth=False, + rank_urls={ + "热门榜": "https://ac.qq.com/Rank/comicRank/type/pgv", + "新书榜": "https://ac.qq.com/Rank/comicRank/type/new", + "快速上榜": "https://ac.qq.com/Rank/comicRank/type/rise", + }, + ), + "kuaikan_comic": TopicMarketSignalSourceDTO( + key="kuaikan_comic", + name="快看漫画-漫画", + url="https://www.kuaikanmanhua.com/ranking/", + category="comic", + source_type="public_page", + requires_auth=False, + rank_urls={ + "热门榜": "https://www.kuaikanmanhua.com/ranking/9", + "新书榜": "https://www.kuaikanmanhua.com/ranking/2", + "快速上榜": "https://www.kuaikanmanhua.com/ranking/7", + }, + ), +} + +DEFAULT_MARKET_SIGNAL_SOURCE_WEIGHTS = { + "qidian_rank": 1.0, + "jjwxc_rank": 1.1, + "qimao_rank": 0.95, + "fanqie_rank": 1.05, + "qq_read": 0.9, + "tencent_comic_rank": 0.85, + "kuaikan_comic": 0.7, +} diff --git a/application/workflows/auto_novel_generation_workflow.py b/application/workflows/auto_novel_generation_workflow.py index 06c780476..b98effbef 100644 --- a/application/workflows/auto_novel_generation_workflow.py +++ b/application/workflows/auto_novel_generation_workflow.py @@ -3,7 +3,15 @@ 整合所有子项目组件,实现完整的章节生成流程。 """ import logging +import json +import os +import re from typing import Tuple, Dict, Any, AsyncIterator, Optional, List +from application.ai.llm_json_extract import ( + extract_outer_json_object, + repair_json, + strip_json_fences, +) from application.engine.services.context_builder import ContextBuilder from application.analyst.services.state_extractor import StateExtractor from application.analyst.services.state_updater import StateUpdater @@ -17,7 +25,12 @@ from domain.novel.repositories.plot_arc_repository import PlotArcRepository from domain.bible.repositories.bible_repository import BibleRepository from domain.novel.repositories.foreshadowing_repository import ForeshadowingRepository -from domain.novel.value_objects.consistency_report import ConsistencyReport +from domain.novel.value_objects.consistency_report import ( + ConsistencyReport, + Issue, + IssueType, + Severity, +) from domain.novel.value_objects.chapter_state import ChapterState from domain.novel.value_objects.consistency_context import ConsistencyContext from domain.novel.value_objects.novel_id import NovelId @@ -32,6 +45,18 @@ # 段名与语义对齐,避免「SMART RETRIEVAL」贴在近期正文等历史误标 CHAPTER_CONTEXT_LAYER2_HEADER = "RECENT CHAPTERS" # T2 近期章节正文 CHAPTER_CONTEXT_LAYER3_HEADER = "VECTOR RECALL" # T3 向量召回 +CHAPTER_GENERATION_MAX_TOKENS = 8192 +CHAPTER_GENERATION_TEMPERATURE = 0.92 +DEFAULT_CHAPTER_TARGET_WORDS = 2500 +DEFAULT_WORD_TOLERANCE_RATIO = 0.05 +MIN_WORD_TOLERANCE_RATIO = 0.02 +MAX_WORD_TOLERANCE_RATIO = 0.20 +LONG_CHAPTER_NEXT_SETUP_MIN_WORDS = 3800 +NEXT_CHAPTER_SETUP_MAX_CHARS = 260 +SCENE_BUDGET_MAX_SEGMENTS = 6 +SCENE_BUDGET_MIN_SEGMENTS = 2 +LONG_DRAFT_SPLIT_MIN = 2 +LONG_DRAFT_SPLIT_MAX = 4 def assemble_chapter_bundle_context_text(payload: Dict[str, Any]) -> str: @@ -93,6 +118,10 @@ def __init__( voice_fingerprint_service: Optional['VoiceFingerprintService'] = None, cliche_scanner: Optional['ClicheScanner'] = None, memory_engine: Optional['MemoryEngine'] = None, + style_prompt_overlay_service: Optional[Any] = None, + prop_ledger_service: Optional[Any] = None, + coc_canon_service: Optional[Any] = None, + coc_clue_service: Optional[Any] = None, ): """初始化工作流 @@ -110,6 +139,10 @@ def __init__( voice_fingerprint_service: 风格指纹服务(可选) cliche_scanner: 俗套扫描器(可选) memory_engine: V6 记忆引擎(可选,提供 FACT_LOCK / BEATS / CLUES 注入与章后回写) + style_prompt_overlay_service: 写作手法知识库 overlay 服务(可选) + prop_ledger_service: 道具账本服务(可选,用于章节生成前注入当前道具状态) + coc_canon_service: CoC 正典 overlay 服务(可选) + coc_clue_service: CoC 线索边界 overlay 服务(可选) """ self.context_builder = context_builder self.consistency_checker = consistency_checker @@ -154,6 +187,859 @@ def __init__( self.conflict_detection_service = conflict_detection_service self.voice_fingerprint_service = voice_fingerprint_service self.cliche_scanner = cliche_scanner + self.style_prompt_overlay_service = style_prompt_overlay_service + self.prop_ledger_service = prop_ledger_service + self.coc_canon_service = coc_canon_service + self.coc_clue_service = coc_clue_service + self._current_coc_canon_overlay: str = "" + self._current_coc_absolute_titles: list[str] = [] + self._current_coc_clue_overlay: str = "" + self._current_coc_author_only_clue_keys: list[str] = [] + self._current_coc_cognition_overlay: str = "" + self._current_coc_author_truth_snippets: list[str] = [] + self._coc_hard_guard_enabled: bool = True + + def precheck_coc_cognition_boundary( + self, + *, + novel_id: str, + chapter_number: int, + outline: str, + ) -> dict[str, Any]: + """章节生成前的 CoC 认知边界预检。""" + text = str(outline or "").strip() + if not text: + return { + "checked": False, + "allow_generate": True, + "risk_level": "none", + "blocking_issues": [], + "warnings": [], + "matched_tokens": [], + } + + canon_layers: dict[str, Any] = {} + clue_layers: dict[str, Any] = {} + if self.coc_canon_service: + try: + layers = self.coc_canon_service.get_cognition_layers(novel_id) or {} + canon_layers = layers if isinstance(layers, dict) else {} + except Exception as e: + logger.warning("coc precheck(canon) unavailable: %s", e) + if self.coc_clue_service: + try: + layers = self.coc_clue_service.get_cognition_layers(novel_id) or {} + clue_layers = layers if isinstance(layers, dict) else {} + except Exception as e: + logger.warning("coc precheck(clue) unavailable: %s", e) + + if not (canon_layers or clue_layers): + return { + "checked": False, + "allow_generate": True, + "risk_level": "none", + "blocking_issues": [], + "warnings": [], + "matched_tokens": [], + } + + blocking_issues: list[str] = [] + warnings: list[str] = [] + matched_tokens: list[str] = [] + + author_truth_snippets = self._extract_coc_author_truth_snippets( + canon_layers, + [str(line or "") for line in (canon_layers.get("author_truth") or []) + (clue_layers.get("author_truth") or [])], + ) + for snippet in author_truth_snippets: + if snippet and snippet in text: + token = snippet[:40] + matched_tokens.append(token) + blocking_issues.append( + f"命中作者真相片段:{token}...(建议改为伏笔/误导表达)" + ) + + author_only_keys: list[str] = [] + for line in clue_layers.get("author_truth") or []: + raw = str(line or "").strip() + if ":" in raw: + key = raw.split(":", 1)[0].strip() + if key and key not in author_only_keys: + author_only_keys.append(key) + for key in author_only_keys: + if key and key in text: + matched_tokens.append(key) + blocking_issues.append( + f"命中 author_only 线索键:{key}(大纲不应直接暴露)" + ) + + for line in clue_layers.get("character_known") or []: + raw = str(line or "").strip() + if not raw: + continue + hint = raw.split(":", 1)[0].strip() if ":" in raw else raw[:20] + if hint and hint in text: + warnings.append( + f"命中角色层线索:{hint}(生成时请通过角色视角逐步揭示)" + ) + + allow_generate = len(blocking_issues) == 0 + if blocking_issues: + risk_level = "block" + elif warnings: + risk_level = "warning" + else: + risk_level = "none" + return { + "checked": True, + "allow_generate": allow_generate, + "risk_level": risk_level, + "blocking_issues": blocking_issues, + "warnings": warnings, + "matched_tokens": matched_tokens, + "chapter_number": int(chapter_number), + } + + def rewrite_outline_for_coc_boundary( + self, + *, + novel_id: str, + chapter_number: int, + outline: str, + rewrite_mode: str = "conservative", + rewrite_style: str = "generic", + ) -> dict[str, Any]: + """将命中 CoC 边界阻断的大纲改写为“可生成版本”。""" + mode = str(rewrite_mode or "conservative").strip().lower() + if mode not in {"conservative", "aggressive"}: + mode = "conservative" + style = str(rewrite_style or "generic").strip().lower() + if style not in {"generic", "suspense", "coc"}: + style = "generic" + original = str(outline or "").strip() + precheck = self.precheck_coc_cognition_boundary( + novel_id=novel_id, + chapter_number=chapter_number, + outline=original, + ) + if not original: + return { + "original_outline": original, + "rewritten_outline": original, + "changed": False, + "rewrite_mode": mode, + "rewrite_style": style, + "applied_rules": [], + "precheck_before": precheck, + "precheck_after": precheck, + } + + rewritten = original + applied_rules: list[str] = [] + + tokens = [str(item or "").strip() for item in precheck.get("matched_tokens") or []] + tokens = [item for item in tokens if item] + for token in sorted(set(tokens), key=len, reverse=True): + if token and token in rewritten: + rewritten = rewritten.replace(token, "未公开线索") + applied_rules.append(f"替换敏感片段:{token[:24]}") + + replace_pairs = [ + (r"真实身份是", "身份成谜,疑似与"), + (r"其实是", "疑似是"), + (r"真相是", "线索指向"), + (r"确认是", "怀疑是"), + (r"明确写出", "通过异常细节暗示"), + ] + if mode == "aggressive": + replace_pairs.extend( + [ + (r"揭露", "侧面触发"), + (r"揭示", "暗示"), + (r"证明", "疑似指向"), + (r"彻底查明", "暂时逼近"), + (r"最终确定", "阶段性判断"), + (r"直接说明", "以细节带出"), + (r"一口气说出", "话到嘴边又收住"), + ] + ) + if style == "suspense": + replace_pairs.extend( + [ + (r"凶手", "目标人物"), + (r"证据链完整", "证据链出现缺口"), + (r"确认作案", "动机与时机仍有反差"), + (r"全部真相", "关键一环"), + ] + ) + elif style == "coc": + replace_pairs.extend( + [ + (r"神明", "不可名状存在"), + (r"邪神", "高位存在"), + (r"仪式成功", "仪式迹象增强"), + (r"san值归零", "精神状态急坠"), + (r"完全理解", "仅触及表层"), + ] + ) + for pattern, target in replace_pairs: + updated, count = re.subn(pattern, target, rewritten) + if count > 0: + rewritten = updated + applied_rules.append(f"弱化直述表达:{pattern}→{target}") + + rewritten = re.sub(r"未公开线索(?:\s*未公开线索)+", "未公开线索", rewritten).strip() + changed = rewritten != original + postcheck = self.precheck_coc_cognition_boundary( + novel_id=novel_id, + chapter_number=chapter_number, + outline=rewritten, + ) + return { + "original_outline": original, + "rewritten_outline": rewritten, + "changed": changed, + "rewrite_mode": mode, + "rewrite_style": style, + "applied_rules": applied_rules, + "precheck_before": precheck, + "precheck_after": postcheck, + } + + def validate_coc_content_boundary( + self, + *, + novel_id: str, + chapter_number: int, + content: str, + ) -> dict[str, Any]: + """正文级 CoC 边界校验(用于保存前/生成后硬约束)。""" + text = str(content or "").strip() + if not text: + return { + "checked": False, + "allow_save": True, + "risk_level": "none", + "blocking_issues": [], + "warnings": [], + "chapter_number": int(chapter_number), + } + + # 复用 CoC overlay 解析逻辑,确保 author_only / author_truth / absolute 关键词集就绪 + self._current_novel_id = novel_id + self._build_coc_canon_overlay() + self._build_coc_clue_overlay() + self._build_coc_cognition_overlay() + + canon_conflicts = self._detect_coc_canon_conflicts(text) + clue_conflicts = self._detect_coc_clue_conflicts(text) + truth_leaks = self._detect_coc_author_truth_leaks(text) + blocking_issues = canon_conflicts + clue_conflicts + truth_leaks + + # strict/absolute 条目附近出现“否定重写词”时也视为阻断 + if self.coc_canon_service: + try: + overview = self.coc_canon_service.get_overview(novel_id) or {} + entries = overview.get("entries") if isinstance(overview, dict) else [] + rewrite_terms = ("不是", "并非", "伪造", "作假", "推翻", "篡改", "虚构") + for item in entries or []: + if not isinstance(item, dict): + continue + lock_level = str(item.get("lock_level") or "").strip().lower() + title = str(item.get("title") or "").strip() + if lock_level not in {"strict", "absolute"} or not title or title not in text: + continue + pattern = ( + rf"(?:{re.escape(title)}[\s\S]{{0,16}}(?:{'|'.join(map(re.escape, rewrite_terms))}))" + rf"|(?:(?:{'|'.join(map(re.escape, rewrite_terms))})[\s\S]{{0,16}}{re.escape(title)})" + ) + if re.search(pattern, text): + blocking_issues.append( + f"CoC正典硬约束冲突:{lock_level} 条目「{title}」附近出现否定重写表达。" + ) + except Exception as e: + logger.warning("coc content validation(canon) unavailable: %s", e) + + blocking_issues = list(dict.fromkeys(blocking_issues)) + return { + "checked": True, + "allow_save": len(blocking_issues) == 0, + "risk_level": "block" if blocking_issues else "none", + "blocking_issues": blocking_issues, + "warnings": [], + "chapter_number": int(chapter_number), + } + + @staticmethod + def _chapter_generation_config() -> GenerationConfig: + """章节首稿需要足够输出空间,避免被 profile 小 token 配置压成摘要稿。""" + return GenerationConfig( + max_tokens=CHAPTER_GENERATION_MAX_TOKENS, + temperature=CHAPTER_GENERATION_TEMPERATURE, + ) + + @staticmethod + def _bounded_word_target(target_word_count: Optional[int]) -> Optional[int]: + if target_word_count is None: + return None + try: + target = int(target_word_count) + except (TypeError, ValueError): + return None + return max(800, min(12000, target)) + + @staticmethod + def _effective_word_target(target_word_count: Optional[int]) -> int: + return AutoNovelGenerationWorkflow._bounded_word_target(target_word_count) or DEFAULT_CHAPTER_TARGET_WORDS + + @staticmethod + def _resolve_word_tolerance_ratio(word_tolerance_ratio: Optional[float] = None) -> float: + raw = word_tolerance_ratio + if raw is None: + env_ratio = (os.getenv("PLOTPILOT_WORD_TOLERANCE_RATIO") or "").strip() + env_percent = (os.getenv("PLOTPILOT_WORD_TOLERANCE_PERCENT") or "").strip() + chosen = env_ratio or env_percent + if chosen: + try: + raw = float(chosen) + except (TypeError, ValueError): + raw = None + if raw is None: + return DEFAULT_WORD_TOLERANCE_RATIO + ratio = float(raw) + if ratio > 1: + ratio = ratio / 100.0 + return max(MIN_WORD_TOLERANCE_RATIO, min(MAX_WORD_TOLERANCE_RATIO, ratio)) + + @staticmethod + def _target_word_range( + target_word_count: Optional[int], + word_tolerance_ratio: Optional[float] = None, + ) -> Optional[tuple[int, int]]: + target = AutoNovelGenerationWorkflow._effective_word_target(target_word_count) + tolerance_ratio = AutoNovelGenerationWorkflow._resolve_word_tolerance_ratio(word_tolerance_ratio) + tolerance = max(80, int(target * tolerance_ratio)) + return max(500, target - tolerance), target + tolerance + + @staticmethod + def _config_for_target_words( + target_word_count: Optional[int], + word_tolerance_ratio: Optional[float] = None, + ) -> GenerationConfig: + if target_word_count is None: + return AutoNovelGenerationWorkflow._chapter_generation_config() + + target = AutoNovelGenerationWorkflow._effective_word_target(target_word_count) + tolerance_ratio = AutoNovelGenerationWorkflow._resolve_word_tolerance_ratio(word_tolerance_ratio) + token_ratio = 1.10 + min(0.12, tolerance_ratio * 0.8) + return GenerationConfig( + max_tokens=max(1000, min(CHAPTER_GENERATION_MAX_TOKENS, int(target * token_ratio))), + temperature=CHAPTER_GENERATION_TEMPERATURE, + ) + + @staticmethod + def _env_flag(name: str, default: bool = False) -> bool: + raw = (os.getenv(name) or "").strip().lower() + if not raw: + return default + return raw in {"1", "true", "yes", "on"} + + def _is_scene_budget_enforced(self) -> bool: + return self._env_flag("PLOTPILOT_SCENE_BUDGET_ENFORCED", True) + + def _is_ending_closer_enabled(self) -> bool: + return self._env_flag("PLOTPILOT_ENDING_CLOSER_ENABLED", True) + + def _is_style_anchor_rag_enabled(self) -> bool: + return self._env_flag("PLOTPILOT_STYLE_ANCHOR_RAG_ENABLED", True) + + @staticmethod + def _normalize_long_draft_split_count(split_count: Optional[int]) -> int: + try: + count = int(split_count or LONG_DRAFT_SPLIT_MIN) + except (TypeError, ValueError): + count = LONG_DRAFT_SPLIT_MIN + return max(LONG_DRAFT_SPLIT_MIN, min(LONG_DRAFT_SPLIT_MAX, count)) + + def _resolve_scene_budget_plan( + self, + *, + chapter_strategy: Optional[Dict[str, Any]], + target_word_count: Optional[int], + word_tolerance_ratio: Optional[float], + beat_count: int, + ) -> List[Dict[str, Any]]: + if not self._is_scene_budget_enforced(): + return [] + if not isinstance(chapter_strategy, dict): + return [] + raw_scenes = chapter_strategy.get("scene_plan") + if not isinstance(raw_scenes, list): + return [] + + scenes: list[dict[str, Any]] = [] + for index, item in enumerate(raw_scenes[:SCENE_BUDGET_MAX_SEGMENTS], start=1): + if not isinstance(item, dict): + continue + label = str(item.get("label") or item.get("title") or f"场景 {index}").strip() or f"场景 {index}" + task = str(item.get("task") or "推进冲突").strip() or "推进冲突" + resistance = str(item.get("resistance") or "出现阻力").strip() or "出现阻力" + info_shift = str(item.get("info_shift") or "局势发生变化").strip() or "局势发生变化" + relationship_shift = str(item.get("relationship_shift") or "无明显变化").strip() or "无明显变化" + hook = str(item.get("hook") or "留下下一步追问").strip() or "留下下一步追问" + anchor = str(item.get("anchor") or "一个可见动作或道具变化").strip() or "一个可见动作或道具变化" + visible_action = str(item.get("visible_action") or anchor or "用具体动作推进").strip() or "用具体动作推进" + subtext_dialogue = str(item.get("subtext_dialogue") or "对白必须有试探、遮掩或信息差").strip() or "对白必须有试探、遮掩或信息差" + unspoken_emotion = str(item.get("unspoken_emotion") or "情绪不能直说").strip() or "情绪不能直说" + object_or_clue_change = str(item.get("object_or_clue_change") or "线索或道具状态必须变化").strip() or "线索或道具状态必须变化" + try: + target = int(item.get("target_words") or 0) + except (TypeError, ValueError): + target = 0 + scenes.append( + { + "label": label, + "task": task, + "resistance": resistance, + "info_shift": info_shift, + "relationship_shift": relationship_shift, + "hook": hook, + "anchor": anchor, + "visible_action": visible_action, + "subtext_dialogue": subtext_dialogue, + "unspoken_emotion": unspoken_emotion, + "object_or_clue_change": object_or_clue_change, + "target_words": target, + } + ) + if len(scenes) < SCENE_BUDGET_MIN_SEGMENTS: + return [] + + target_total = self._effective_word_target(target_word_count) + min_words, max_words = self._target_word_range(target_word_count, word_tolerance_ratio) + tolerance_ratio = self._resolve_word_tolerance_ratio(word_tolerance_ratio) + scene_count = len(scenes) + min_scene = max(260, int(target_total * 0.08)) + max_scene = max(min_scene + 80, min(2200, int(target_total * 0.62))) + fallback_scene = max(min_scene, int(target_total / scene_count)) + + weights: list[int] = [] + for scene in scenes: + raw = int(scene.get("target_words") or 0) + if raw <= 0: + raw = fallback_scene + raw = max(min_scene, min(max_scene, raw)) + weights.append(raw) + total = sum(weights) or 1 + + normalized: list[int] = [] + for raw in weights: + val = int(round(raw * target_total / total)) + val = max(min_scene, min(max_scene, val)) + normalized.append(val) + + drift = target_total - sum(normalized) + cursor = 0 + while drift != 0 and normalized: + i = cursor % len(normalized) + if drift > 0 and normalized[i] < max_scene: + normalized[i] += 1 + drift -= 1 + elif drift < 0 and normalized[i] > min_scene: + normalized[i] -= 1 + drift += 1 + cursor += 1 + if cursor > target_total * 2: + break + + plan: list[Dict[str, Any]] = [] + for index, scene in enumerate(scenes): + tw = normalized[index] + per_scene_tolerance = max(45, int(tw * min(0.2, tolerance_ratio * 1.5))) + scene_min = max(180, tw - per_scene_tolerance) + scene_max = min(max_words, tw + per_scene_tolerance) + plan.append( + { + **scene, + "target_words": tw, + "min_words": scene_min, + "max_words": scene_max, + } + ) + + if beat_count > 0 and len(plan) != beat_count: + base_scene = plan + expanded: list[Dict[str, Any]] = [] + for i in range(beat_count): + source = base_scene[min(len(base_scene) - 1, int(i * len(base_scene) / beat_count))] + expanded.append({**source}) + plan = expanded + return plan + + @staticmethod + def _scene_hint_from_budget_plan( + scene_budget_plan: List[Dict[str, Any]], + index: int, + ) -> Optional[Dict[str, Any]]: + if not scene_budget_plan: + return None + if index < 0 or index >= len(scene_budget_plan): + return None + scene = scene_budget_plan[index] + if not isinstance(scene, dict): + return None + return scene + + @staticmethod + def _build_scene_budget_overlay(scene_hint: Optional[Dict[str, Any]]) -> str: + if not scene_hint: + return "" + label = str(scene_hint.get("label") or "场景").strip() or "场景" + task = str(scene_hint.get("task") or "推进冲突").strip() or "推进冲突" + resistance = str(scene_hint.get("resistance") or "出现阻力").strip() or "出现阻力" + info_shift = str(scene_hint.get("info_shift") or "局势变化").strip() or "局势变化" + relation = str(scene_hint.get("relationship_shift") or "无明显变化").strip() or "无明显变化" + anchor = str(scene_hint.get("anchor") or "动作/道具锚点").strip() or "动作/道具锚点" + hook = str(scene_hint.get("hook") or "留下追问").strip() or "留下追问" + visible_action = str(scene_hint.get("visible_action") or "用具体动作推进").strip() or "用具体动作推进" + subtext_dialogue = str(scene_hint.get("subtext_dialogue") or "对白保留潜台词").strip() or "对白保留潜台词" + unspoken_emotion = str(scene_hint.get("unspoken_emotion") or "情绪不能直说").strip() or "情绪不能直说" + object_or_clue_change = str(scene_hint.get("object_or_clue_change") or "线索或道具状态变化").strip() or "线索或道具状态变化" + target_words = int(scene_hint.get("target_words") or 0) + min_words = int(scene_hint.get("min_words") or 0) + max_words = int(scene_hint.get("max_words") or 0) + if target_words > 0 and min_words > 0 and max_words > 0: + budget_line = f"本段预算 {target_words} 字,允许 {min_words}-{max_words} 字。" + elif target_words > 0: + budget_line = f"本段预算 {target_words} 字。" + else: + budget_line = "本段预算以当前场景推进为准。" + return ( + "【场景包执行(预算锁定)】\n" + f"- 场景:{label}\n" + f"- 任务:{task}\n" + f"- 阻力:{resistance}\n" + f"- 信息变化:{info_shift}\n" + f"- 关系变化:{relation}\n" + f"- 场景锚点:{anchor}\n" + f"- 可见动作:{visible_action}\n" + f"- 潜台词对白:{subtext_dialogue}\n" + f"- 未说出口的情绪:{unspoken_emotion}\n" + f"- 道具/线索变化:{object_or_clue_change}\n" + f"- 结尾钩子:{hook}\n" + f"- {budget_line}\n" + "- 禁止把本段写成解释总结,必须通过动作/对白/细节推进。" + ) + + @staticmethod + def _story_text_units(text: str) -> int: + return len(re.sub(r"\s+", "", text or "")) + + @staticmethod + def _is_sentence_tail_complete(text: str) -> bool: + sample = (text or "").rstrip() + if not sample: + return True + enders = "。!?!?…" + closers = "”’」』)】\"'" + tail = sample[-1] + if tail in enders: + return True + if tail in closers and len(sample) >= 2 and sample[-2] in enders: + return True + return False + + def _smooth_truncated_tail(self, text: str, *, min_words: int) -> str: + """避免章节在硬上限后停在半句,优先回退到最近完整句。""" + sample = (text or "").rstrip() + if not sample or self._is_sentence_tail_complete(sample): + return sample + + # 优先在末尾窗口内寻找最近的句末边界;只要不明显低于目标下限即可回退。 + fallback = None + for m in re.finditer(r"[。!?!?…](?:[”’」』)】\"'])*", sample): + if len(sample) - m.end() <= 260: + fallback = m.end() + if fallback is not None: + candidate = sample[:fallback].rstrip() + if self._story_text_units(candidate) >= max(500, min_words - 180): + return candidate + + # 找不到合适边界时,最小修复为补句号,避免裸半句。 + trimmed = sample.rstrip(",,、;;::") + if not trimmed: + return sample + if trimmed[-1].isspace(): + return trimmed.rstrip() + "。" + # 以“替换末字符”为优先,避免收束动作把字数上限顶穿 +1。 + return trimmed[:-1] + "。" + + @staticmethod + def _remove_repeated_leading_paragraph_block(text: str) -> str: + """删除续写阶段偶发的“从开头重写一遍”段落块。""" + normalized = (text or "").strip() + paragraphs = [part.strip() for part in re.split(r"\n\s*\n", normalized) if part.strip()] + if len(paragraphs) < 6: + return normalized + + first = paragraphs[0] + if len(first) < 12: + return normalized + + for start in range(1, len(paragraphs)): + current = paragraphs[start] + first_index = current.find(first) + if first_index < 0: + continue + + repeat_count = 1 + while ( + start + repeat_count < len(paragraphs) + and repeat_count < len(paragraphs) + and paragraphs[start + repeat_count] == paragraphs[repeat_count] + ): + repeat_count += 1 + + if repeat_count < 4: + continue + + prefix = current[:first_index].strip() + cleaned = paragraphs[:start] + if prefix and (not cleaned or cleaned[-1] != prefix): + cleaned.append(prefix) + cleaned.extend(paragraphs[start + repeat_count:]) + return "\n\n".join(cleaned).strip() + + return normalized + + @staticmethod + def _extract_tail_segment(text: str, *, window_chars: int = 460) -> tuple[str, str]: + source = text or "" + if not source: + return "", "" + start = max(0, len(source) - window_chars) + para_break = source.rfind("\n\n", start) + if para_break >= 0: + start = para_break + 2 + return source[:start], source[start:] + + async def _soft_land_chapter_ending_if_needed( + self, + *, + content: str, + outline: str, + min_words: int, + max_words: int, + force: bool = False, + ) -> str: + """章末软着陆:在不增设定、不突破字数的前提下,让结尾更自然有钩子。""" + draft = (content or "").strip() + if not draft: + return content + total_units = self._story_text_units(draft) + if total_units < 900: + return content + + # 仅在接近上限或被硬截断后触发,避免每章额外调用一次 LLM。 + near_cap = (max_words - total_units) <= 90 + if not force and not near_cap: + return content + + head, tail = self._extract_tail_segment(draft, window_chars=520) + tail_units = self._story_text_units(tail) + if tail_units < 120: + return content + + prompt = Prompt( + system=( + "你是中文小说主编。只改写章节最后一段,让结尾自然收束且保留追读钩子。" + "只输出改写后的“最后一段”,不要标题,不要解释。" + ), + user=( + "约束:\n" + "1) 不改变既有事实,不新增角色或世界观设定;\n" + "2) 维持当前悬念方向,不把真相提前说破;\n" + "3) 字数控制在原尾段的 80%-110%,且必须以完整句收尾;\n" + "4) 保留人物语气和现场动作感,避免总结腔。\n\n" + f"【本章大纲】\n{outline}\n\n" + f"【当前尾段】\n{tail}\n\n" + "请输出改写后的尾段:" + ), + ) + try: + result = await self.llm_service.generate( + prompt, + GenerationConfig(max_tokens=max(220, min(900, int(tail_units * 1.35))), temperature=0.72), + ) + rewritten_tail = strip_reasoning_artifacts((result.content or "").strip()) + except Exception as exc: + logger.warning("ending soft-landing skipped: %s", exc) + return content + + if not rewritten_tail: + return content + rewritten_units = self._story_text_units(rewritten_tail) + if rewritten_units < max(80, int(tail_units * 0.8)): + return content + if rewritten_units > int(tail_units * 1.15) + 40: + return content + if not self._is_sentence_tail_complete(rewritten_tail): + rewritten_tail = self._smooth_truncated_tail(rewritten_tail, min_words=max(80, min_words // 4)) + + merged = (head.rstrip() + "\n\n" + rewritten_tail.lstrip()).strip() if head.strip() else rewritten_tail + merged_units = self._story_text_units(merged) + if merged_units > max_words: + merged = self._truncate_to_story_text_units(merged, max_words) + merged = self._smooth_truncated_tail(merged, min_words=min_words) + merged_units = self._story_text_units(merged) + if merged_units < max(500, min_words - 220): + return content + return merged + + @staticmethod + def _truncate_to_story_text_units(text: str, limit: int) -> str: + source = text or "" + if limit <= 0: + return "" + current = 0 + cut_index = len(source) + for idx, ch in enumerate(source): + if not ch.isspace(): + current += 1 + if current >= limit: + cut_index = idx + 1 + break + if cut_index >= len(source): + return source.strip() + + lookahead = source[cut_index: cut_index + 120] + stop = re.search(r"[。!?!?…\n]", lookahead) + if stop: + cut_index += stop.start() + 1 + trimmed = source[:cut_index].strip() + while trimmed and AutoNovelGenerationWorkflow._story_text_units(trimmed) > limit: + trimmed = trimmed[:-1].rstrip() + return trimmed + + async def _expand_to_min_word_target( + self, + *, + content: str, + outline: str, + min_words: int, + ) -> str: + current_words = self._story_text_units(content) + if current_words >= min_words: + return content + needed = min_words - current_words + if needed < 120: + return content + + prompt = Prompt( + system=( + "你是中文小说续写编辑。只负责在不改动前文事实的前提下补足章节长度。" + "只输出续写正文,不要解释,不要标题,不要总结。" + ), + user=( + f"目标:在现有正文后补写 {needed}-{needed + 160} 字,让总字数至少达到 {min_words} 字。\n" + "约束:承接前文冲突,不重述已写情节,不新增世界观设定。\n\n" + f"【本章大纲】\n{outline}\n\n" + f"【已写正文】\n{content}\n\n" + "请直接续写:" + ), + ) + try: + max_tokens = max(500, min(2200, int(needed * 1.3))) + result = await self.llm_service.generate( + prompt, + GenerationConfig(max_tokens=max_tokens, temperature=0.88), + ) + appendix = strip_reasoning_artifacts((result.content or "").strip()) + if not appendix: + return content + merged = (content.rstrip() + "\n\n" + appendix.lstrip()).strip() + return self._remove_repeated_leading_paragraph_block(merged) + except Exception as exc: + logger.warning("word target expansion skipped: %s", exc) + return content + + async def _enforce_chapter_word_target( + self, + *, + content: str, + outline: str, + target_word_count: Optional[int], + word_tolerance_ratio: Optional[float] = None, + ) -> str: + if target_word_count is None: + return (content or "").strip() + + min_words, max_words = self._target_word_range(target_word_count, word_tolerance_ratio) + normalized = (content or "").strip() + current_words = self._story_text_units(normalized) + was_trimmed = False + + if current_words > max_words: + normalized = self._truncate_to_story_text_units(normalized, max_words) + current_words = self._story_text_units(normalized) + was_trimmed = True + logger.info( + "word target clamp: trimmed to <= %s, now=%s", + max_words, + current_words, + ) + + if current_words < min_words: + normalized = await self._expand_to_min_word_target( + content=normalized, + outline=outline, + min_words=min_words, + ) + current_words = self._story_text_units(normalized) + if current_words > max_words: + normalized = self._truncate_to_story_text_units(normalized, max_words) + current_words = self._story_text_units(normalized) + logger.info( + "word target clamp: expanded to >= %s, now=%s", + min_words, + current_words, + ) + + normalized = self._smooth_truncated_tail(normalized, min_words=min_words) + current_words = self._story_text_units(normalized) + if current_words > max_words: + normalized = self._truncate_to_story_text_units(normalized, max_words) + normalized = self._smooth_truncated_tail(normalized, min_words=min_words) + current_words = self._story_text_units(normalized) + + near_cap = (max_words - current_words) <= 120 + if self._is_ending_closer_enabled() and near_cap: + normalized = await self._soft_land_chapter_ending_if_needed( + content=normalized, + outline=outline, + min_words=min_words, + max_words=max_words, + force=False, + ) + current_words = self._story_text_units(normalized) + if current_words > max_words: + normalized = self._truncate_to_story_text_units(normalized, max_words) + normalized = self._smooth_truncated_tail(normalized, min_words=min_words) + current_words = self._story_text_units(normalized) + + if was_trimmed: + normalized = await self._soft_land_chapter_ending_if_needed( + content=normalized, + outline=outline, + min_words=min_words, + max_words=max_words, + force=True, + ) + current_words = self._story_text_units(normalized) + if current_words > max_words: + normalized = self._truncate_to_story_text_units(normalized, max_words) + normalized = self._smooth_truncated_tail(normalized, min_words=min_words) + return normalized def prepare_chapter_generation( self, @@ -262,6 +1148,29 @@ async def post_process_generated_chapter( style_warnings = self._scan_cliches(content) chapter_state = await self._extract_chapter_state(content, chapter_number) consistency_report = self._check_consistency(chapter_state, novel_id) + coc_conflict_warnings = self._detect_coc_canon_conflicts(content) + coc_clue_warnings = self._detect_coc_clue_conflicts(content) + coc_truth_warnings = self._detect_coc_author_truth_leaks(content) + all_coc_warnings = coc_conflict_warnings + coc_clue_warnings + coc_truth_warnings + if self._coc_hard_guard_enabled and all_coc_warnings: + joined = ";".join(all_coc_warnings[:4]) + raise ValueError(f"CoC硬约束阻断:{joined}") + if all_coc_warnings: + merged_warnings = list(consistency_report.warnings) + for warning_text in all_coc_warnings: + merged_warnings.append( + Issue( + type=IssueType.EVENT_LOGIC_ERROR, + severity=Severity.MINOR, + description=warning_text, + location=max(1, chapter_number), + ) + ) + consistency_report = ConsistencyReport( + issues=list(consistency_report.issues), + warnings=merged_warnings, + suggestions=list(consistency_report.suggestions), + ) ghost_annotations = self._detect_conflicts(novel_id, chapter_number, outline, scene_director) if self.state_updater: try: @@ -305,7 +1214,12 @@ async def generate_chapter( chapter_number: int, outline: str, scene_director: Optional[SceneDirectorAnalysis] = None, - enable_beats: bool = True + enable_beats: bool = True, + style_profile_id: str = "", + scene_type: str = "", + chapter_strategy: Optional[Dict[str, Any]] = None, + target_word_count: Optional[int] = None, + word_tolerance_ratio: Optional[float] = None, ) -> GenerationResult: """生成章节(完整工作流) @@ -314,6 +1228,7 @@ async def generate_chapter( chapter_number: 章节号 outline: 章节大纲 scene_director: 可选的场记分析结果,用于过滤角色和地点 + style_profile_id: 可选写作手法档案 ID Returns: GenerationResult 包含内容、一致性报告、上下文和 token 数 @@ -343,17 +1258,44 @@ async def generate_chapter( ) context = bundle["context"] context_tokens = bundle["context_tokens"] + style_overlay = self._build_style_overlay(novel_id, style_profile_id, scene_type) + next_chapter_bridge = self._build_next_chapter_bridge_overlay( + novel_id=novel_id, + chapter_number=chapter_number, + target_word_count=target_word_count, + chapter_strategy=None, + ) logger.info(f" ✓ 上下文已构建: {len(context)} 字符, 约 {context_tokens} tokens") logger.info("阶段 3: 生成 - 调用 LLM") - config = GenerationConfig() + config = self._config_for_target_words(target_word_count, word_tolerance_ratio) # 如果使用节拍模式,先放大节拍 beats = [] + scene_budget_plan: list[dict[str, Any]] = [] if enable_beats: logger.info(" → 启用节拍模式,拆分大纲为微观节拍") - beats = self.context_builder.magnify_outline_to_beats(chapter_number, outline) + beats = self.context_builder.magnify_outline_to_beats( + chapter_number, + outline, + target_chapter_words=self._effective_word_target(target_word_count), + ) + if not isinstance(beats, list): + logger.warning(" ⚠ 微观节拍拆分返回异常,回退到单段生成") + beats = [] logger.info(f" ✓ 已拆分为 {len(beats)} 个微观节拍") + scene_budget_plan = self._resolve_scene_budget_plan( + chapter_strategy=chapter_strategy, + target_word_count=target_word_count, + word_tolerance_ratio=word_tolerance_ratio, + beat_count=len(beats), + ) + if scene_budget_plan: + logger.info( + " ✓ 场景包预算已生效: %s 段,预算合计 %s 字", + len(scene_budget_plan), + sum(int(item.get("target_words") or 0) for item in scene_budget_plan), + ) # 根据是否使用节拍选择不同的生成策略 if enable_beats and beats: @@ -361,7 +1303,12 @@ async def generate_chapter( content_parts: list[str] = [] for i, beat in enumerate(beats): prior_draft = "\n\n".join(content_parts) + scene_hint = self._scene_hint_from_budget_plan(scene_budget_plan, i) beat_prompt_text = self.context_builder.build_beat_prompt(beat, i, len(beats)) + scene_budget_overlay = self._build_scene_budget_overlay(scene_hint) + if scene_budget_overlay: + beat_prompt_text = f"{beat_prompt_text}\n\n{scene_budget_overlay}" + beat_target_words = int(scene_hint.get("target_words")) if scene_hint else int(beat.target_words) logger.info(f"生成节拍 {i+1}/{len(beats)}: {beat.focus} - {beat.description[:50]}...") prompt = self._build_prompt( @@ -373,12 +1320,20 @@ async def generate_chapter( beat_prompt=beat_prompt_text, beat_index=i, total_beats=len(beats), - beat_target_words=beat.target_words, + beat_target_words=beat_target_words, + target_word_count=target_word_count, + word_tolerance_ratio=word_tolerance_ratio, voice_anchors=bundle.get("voice_anchors") or "", chapter_draft_so_far=prior_draft, + style_overlay=style_overlay, + chapter_strategy=chapter_strategy, + next_chapter_bridge=next_chapter_bridge, ) - llm_result = await self.llm_service.generate(prompt, config) + llm_result = await self.llm_service.generate( + prompt, + self._config_for_target_words(beat_target_words, word_tolerance_ratio), + ) beat_content = llm_result.content content_parts.append(beat_content) @@ -393,11 +1348,28 @@ async def generate_chapter( plot_tension=bundle["plot_tension"], style_summary=bundle["style_summary"], voice_anchors=bundle.get("voice_anchors") or "", + style_overlay=style_overlay, + chapter_strategy=chapter_strategy, + next_chapter_bridge=next_chapter_bridge, + target_word_count=target_word_count, + word_tolerance_ratio=word_tolerance_ratio, ) logger.info(f" → 发送请求到 LLM (max_tokens={config.max_tokens}, temperature={config.temperature})") llm_result = await self.llm_service.generate(prompt, config) content = strip_reasoning_artifacts(llm_result.content or "") logger.info(f" ✓ LLM 响应已接收: {len(content)} 字符") + + content = await self._naturalize_ai_flavor_if_needed( + content=content, + outline=outline, + style_overlay=style_overlay, + ) + content = await self._enforce_chapter_word_target( + content=content, + outline=outline, + target_word_count=target_word_count, + word_tolerance_ratio=word_tolerance_ratio, + ) # 保存微观节拍用于后续处理 if beats: @@ -442,7 +1414,16 @@ async def generate_chapter_stream( chapter_number: int, outline: str, scene_director: Optional[SceneDirectorAnalysis] = None, - enable_beats: bool = True + enable_beats: bool = True, + style_profile_id: str = "", + scene_type: str = "", + direct_writing_mode: bool = False, + direct_light_polish: bool = False, + chapter_strategy: Optional[Dict[str, Any]] = None, + target_word_count: Optional[int] = None, + word_tolerance_ratio: Optional[float] = None, + long_draft_mode: bool = False, + long_draft_split_count: Optional[int] = None, ) -> AsyncIterator[Dict[str, Any]]: """流式生成章节:阶段事件 + 正文 token 流 + 最终 done(含一致性报告)。 @@ -462,27 +1443,202 @@ async def generate_chapter_stream( logger.info(f"开始流式生成章节: 小说={novel_id}, 章节={chapter_number}") logger.info(f"========================================") + split_count = self._normalize_long_draft_split_count(long_draft_split_count) + effective_target_word_count = target_word_count + effective_word_tolerance_ratio = word_tolerance_ratio + effective_outline = outline + if long_draft_mode: + base_target = self._effective_word_target(target_word_count) + effective_target_word_count = int(base_target * split_count) + effective_word_tolerance_ratio = max( + 0.08, + self._resolve_word_tolerance_ratio(word_tolerance_ratio), + ) + effective_outline = ( + f"{outline.strip()}\n\n" + "【长稿母本灰度模式】\n" + f"- 本次先写连续母稿,目标约 {effective_target_word_count} 字,后续将拆分为 {split_count} 章。\n" + "- 文内至少形成与拆章数量一致的自然转折点;每个转折点前要有冲突推进,转折后要有新代价或新目标。\n" + "- 结尾不要封死,要留下可切分的追读钩子。" + ).strip() + yield { + "type": "long_draft_plan", + "enabled": True, + "split_count": split_count, + "target_word_count": effective_target_word_count, + } + yield {"type": "phase", "phase": "planning"} yield {"type": "phase", "phase": "context"} logger.info("阶段 1-2: prepare_chapter_generation(规划 + 结构化上下文)") bundle = self.prepare_chapter_generation( - novel_id, chapter_number, outline, scene_director=scene_director + novel_id, chapter_number, effective_outline, scene_director=scene_director ) context = bundle["context"] context_tokens = bundle["context_tokens"] + style_overlay = self._build_style_overlay(novel_id, style_profile_id, scene_type) + next_chapter_bridge = self._build_next_chapter_bridge_overlay( + novel_id=novel_id, + chapter_number=chapter_number, + target_word_count=effective_target_word_count, + chapter_strategy=chapter_strategy, + ) logger.info(f" ✓ 上下文已构建: {len(context)} 字符, 约 {context_tokens} tokens") yield {"type": "phase", "phase": "llm"} logger.info("阶段 3: 生成 - 调用 LLM 流式生成") - config = GenerationConfig() + config = self._config_for_target_words(effective_target_word_count, effective_word_tolerance_ratio) chunk_count = 0 + total_chars = 0 + _, max_words = self._target_word_range(effective_target_word_count, effective_word_tolerance_ratio) + # 流式阶段先做硬上限截断,避免前端先看到超长正文(例如 5k+)再等待章后钳制。 + stream_unit_hard_limit = max(max_words + 120, int(max_words * 1.18)) + emitted_story_units = 0 + hit_stream_hard_limit = False + + def _slice_piece_for_stream_limit(piece: str) -> str: + nonlocal emitted_story_units, hit_stream_hard_limit + if not piece or hit_stream_hard_limit: + return "" + remain = stream_unit_hard_limit - emitted_story_units + if remain <= 0: + hit_stream_hard_limit = True + return "" + piece_units = self._story_text_units(piece) + if piece_units <= remain: + emitted_story_units += piece_units + return piece + # 仅保留剩余配额内的正文片段,防止流式阶段暴涨。 + clipped = self._truncate_to_story_text_units(piece, remain) + emitted_story_units += self._story_text_units(clipped) + hit_stream_hard_limit = True + return clipped + + if direct_writing_mode: + logger.info(" → 直接写作模式:跳过节拍拆分、自然化后处理与章后质检") + prompt = self._build_direct_writing_prompt( + context=context, + outline=effective_outline, + storyline_context=bundle["storyline_context"], + plot_tension=bundle["plot_tension"], + style_summary=bundle["style_summary"], + voice_anchors=bundle.get("voice_anchors") or "", + chapter_strategy=chapter_strategy, + next_chapter_bridge=next_chapter_bridge, + target_word_count=effective_target_word_count, + word_tolerance_ratio=effective_word_tolerance_ratio, + ) + parts: list[str] = [] + total_chars = 0 + async for piece in self.llm_service.stream_generate(prompt, config): + clipped_piece = _slice_piece_for_stream_limit(piece) + if clipped_piece: + parts.append(clipped_piece) + chunk_count += 1 + total_chars += len(clipped_piece) + if not clipped_piece: + if hit_stream_hard_limit: + logger.info( + "stream hard limit reached (direct): limit=%s, emitted=%s", + stream_unit_hard_limit, + emitted_story_units, + ) + break + continue + yield { + "type": "chunk", + "text": clipped_piece, + "stats": { + "chars": total_chars, + "chunks": chunk_count, + "estimated_tokens": int(total_chars / 1.5), + }, + } + if hit_stream_hard_limit: + logger.info( + "stream hard limit reached (direct): limit=%s, emitted=%s", + stream_unit_hard_limit, + emitted_story_units, + ) + break + + content = strip_reasoning_artifacts("".join(parts)) + if not content.strip(): + logger.error(" × 模型返回空内容") + yield {"type": "error", "message": "模型返回空内容"} + return + + if direct_light_polish: + yield {"type": "phase", "phase": "polish"} + content = await self._apply_direct_light_polish_if_needed( + content=content, + outline=effective_outline, + ) + content = await self._enforce_chapter_word_target( + content=content, + outline=effective_outline, + target_word_count=effective_target_word_count, + word_tolerance_ratio=effective_word_tolerance_ratio, + ) + + coc_boundary = self.validate_coc_content_boundary( + novel_id=novel_id, + chapter_number=chapter_number, + content=content, + ) + if not coc_boundary.get("allow_save", True): + reasons = coc_boundary.get("blocking_issues") or [] + reason = reasons[0] if reasons else "命中 CoC 硬约束" + yield {"type": "error", "message": f"CoC硬约束阻断:{reason}"} + return + + output_tokens = int(len(content) / 1.5) + total_tokens = context_tokens + output_tokens + yield { + "type": "done", + "content": content, + "consistency_report": _consistency_report_to_dict( + ConsistencyReport(issues=[], warnings=[], suggestions=[]) + ), + "token_count": context_tokens, + "output_tokens": output_tokens, + "total_tokens": total_tokens, + "chars": len(content), + "ghost_annotations": [], + "style_warnings": [], + "direct_writing_mode": True, + "direct_light_polish": direct_light_polish, + "long_draft_mode": long_draft_mode, + "long_draft_split_count": split_count if long_draft_mode else None, + } + return # 如果使用节拍模式,先放大节拍 beats = [] + scene_budget_plan: list[dict[str, Any]] = [] if enable_beats: logger.info(" → 启用节拍模式,拆分大纲为微观节拍") - beats = self.context_builder.magnify_outline_to_beats(chapter_number, outline) + beats = self.context_builder.magnify_outline_to_beats( + chapter_number, + effective_outline, + target_chapter_words=self._effective_word_target(effective_target_word_count), + ) + if not isinstance(beats, list): + logger.warning(" ⚠ 微观节拍拆分返回异常,回退到单段生成") + beats = [] logger.info(f" ✓ 已拆分为 {len(beats)} 个微观节拍") + scene_budget_plan = self._resolve_scene_budget_plan( + chapter_strategy=chapter_strategy, + target_word_count=effective_target_word_count, + word_tolerance_ratio=effective_word_tolerance_ratio, + beat_count=len(beats), + ) + if scene_budget_plan: + logger.info( + " ✓ 场景包预算已生效: %s 段,预算合计 %s 字", + len(scene_budget_plan), + sum(int(item.get("target_words") or 0) for item in scene_budget_plan), + ) # 发送节拍信息用于前端展示 yield { @@ -493,7 +1649,8 @@ async def generate_chapter_stream( "target_words": beat.target_words, "focus": beat.focus } for beat in beats - ] + ], + "scene_budget_plan": scene_budget_plan, } # 根据是否使用节拍选择不同的生成策略 @@ -502,67 +1659,129 @@ async def generate_chapter_stream( content_parts: list[str] = [] for i, beat in enumerate(beats): prior_draft = "\n\n".join(content_parts) + scene_hint = self._scene_hint_from_budget_plan(scene_budget_plan, i) beat_prompt_text = self.context_builder.build_beat_prompt(beat, i, len(beats)) + scene_budget_overlay = self._build_scene_budget_overlay(scene_hint) + if scene_budget_overlay: + beat_prompt_text = f"{beat_prompt_text}\n\n{scene_budget_overlay}" + beat_target_words = int(scene_hint.get("target_words")) if scene_hint else int(beat.target_words) logger.info(f"生成节拍 {i+1}/{len(beats)}: {beat.focus} - {beat.description[:50]}...") prompt = self._build_prompt( context, - outline, + effective_outline, storyline_context=bundle["storyline_context"], plot_tension=bundle["plot_tension"], style_summary=bundle["style_summary"], beat_prompt=beat_prompt_text, beat_index=i, total_beats=len(beats), - beat_target_words=beat.target_words, + beat_target_words=beat_target_words, voice_anchors=bundle.get("voice_anchors") or "", chapter_draft_so_far=prior_draft, + style_overlay=style_overlay, + chapter_strategy=chapter_strategy, + next_chapter_bridge=next_chapter_bridge, + target_word_count=effective_target_word_count, + word_tolerance_ratio=effective_word_tolerance_ratio, ) beat_content = "" - async for piece in self.llm_service.stream_generate(prompt, config): - chunk_count += 1 - beat_content += piece + async for piece in self.llm_service.stream_generate( + prompt, + self._config_for_target_words(beat_target_words, effective_word_tolerance_ratio), + ): + clipped_piece = _slice_piece_for_stream_limit(piece) + if clipped_piece: + chunk_count += 1 + beat_content += clipped_piece + total_chars += len(clipped_piece) + if not clipped_piece: + if hit_stream_hard_limit: + logger.info( + "stream hard limit reached (beats): limit=%s, emitted=%s", + stream_unit_hard_limit, + emitted_story_units, + ) + break + continue yield { "type": "chunk", - "text": piece, + "text": clipped_piece, "beat_index": i, - "beat_focus": beat.focus + "beat_focus": beat.focus, + "stats": { + "chars": total_chars, + "chunks": chunk_count, + "estimated_tokens": int(total_chars / 1.5), + }, } + if hit_stream_hard_limit: + logger.info( + "stream hard limit reached (beats): limit=%s, emitted=%s", + stream_unit_hard_limit, + emitted_story_units, + ) + break content_parts.append(beat_content) yield {"type": "beat_done", "beat_index": i, "beat_content_length": len(beat_content)} + if hit_stream_hard_limit: + break content = strip_reasoning_artifacts("".join(content_parts)) else: # 传统单段生成 prompt = self._build_prompt( context, - outline, + effective_outline, storyline_context=bundle["storyline_context"], plot_tension=bundle["plot_tension"], style_summary=bundle["style_summary"], voice_anchors=bundle.get("voice_anchors") or "", + style_overlay=style_overlay, + chapter_strategy=chapter_strategy, + next_chapter_bridge=next_chapter_bridge, + target_word_count=effective_target_word_count, + word_tolerance_ratio=effective_word_tolerance_ratio, ) logger.info(f" → 发送流式请求到 LLM") parts: list[str] = [] total_chars = 0 async for piece in self.llm_service.stream_generate(prompt, config): - parts.append(piece) - chunk_count += 1 - total_chars += len(piece) + clipped_piece = _slice_piece_for_stream_limit(piece) + if clipped_piece: + parts.append(clipped_piece) + chunk_count += 1 + total_chars += len(clipped_piece) + if not clipped_piece: + if hit_stream_hard_limit: + logger.info( + "stream hard limit reached (single): limit=%s, emitted=%s", + stream_unit_hard_limit, + emitted_story_units, + ) + break + continue # 增强事件:包含累计字数和预估 token(中文约 1.5 字/token,英文约 4 字/token) estimated_tokens = int(total_chars / 1.5) # 简化估算 yield { "type": "chunk", - "text": piece, + "text": clipped_piece, "stats": { "chars": total_chars, "chunks": chunk_count, "estimated_tokens": estimated_tokens, } } + if hit_stream_hard_limit: + logger.info( + "stream hard limit reached (single): limit=%s, emitted=%s", + stream_unit_hard_limit, + emitted_story_units, + ) + break content = strip_reasoning_artifacts("".join(parts)) logger.info(f" ✓ LLM 流式响应完成: {chunk_count} 个块, {len(content)} 字符") @@ -572,10 +1791,22 @@ async def generate_chapter_stream( yield {"type": "error", "message": "模型返回空内容"} return + content = await self._naturalize_ai_flavor_if_needed( + content=content, + outline=effective_outline, + style_overlay=style_overlay, + ) + content = await self._enforce_chapter_word_target( + content=content, + outline=effective_outline, + target_word_count=effective_target_word_count, + word_tolerance_ratio=effective_word_tolerance_ratio, + ) + yield {"type": "phase", "phase": "post"} logger.info("阶段 4: post_process_generated_chapter") post = await self.post_process_generated_chapter( - novel_id, chapter_number, outline, content, scene_director=scene_director + novel_id, chapter_number, effective_outline, content, scene_director=scene_director ) style_warnings = post["style_warnings"] consistency_report = post["consistency_report"] @@ -611,6 +1842,8 @@ async def generate_chapter_stream( } for hit in style_warnings ], + "long_draft_mode": long_draft_mode, + "long_draft_split_count": split_count if long_draft_mode else None, } except ValueError as e: logger.error(f"参数错误: {e}") @@ -649,63 +1882,775 @@ async def suggest_outline(self, novel_id: str, chapter_number: int) -> str: logger.warning("suggest_outline failed: %s", e) return seed - async def generate_chapter_with_review( + async def _naturalize_ai_flavor_if_needed( self, - novel_id: str, - chapter_number: int, - outline: str - ) -> Tuple[str, ConsistencyReport]: - """生成章节并返回一致性审查 - - Args: - novel_id: 小说 ID - chapter_number: 章节号 - outline: 章节大纲 - - Returns: - (content, consistency_report) 元组 - """ - result = await self.generate_chapter(novel_id, chapter_number, outline) - return result.content, result.consistency_report + *, + content: str, + outline: str, + style_overlay: str = "", + ) -> str: + """对生成正文做一次自然化改写,避免只停留在事后告警。""" + draft = (content or "").strip() + if not draft or not self.cliche_scanner: + return content - def _get_storyline_context(self, novel_id: str, chapter_number: int) -> str: - """获取故事线上下文 + try: + initial_hits = self.cliche_scanner.scan_cliches(draft) + except Exception as e: + logger.warning("AI味预扫描失败,跳过自然化改写: %s", e) + return content - Args: - novel_id: 小说 ID - chapter_number: 章节号 + # 长正文即使未命中有限正则,也常会被检测器判定为“整齐、解释、模板化”。 + # 因此生产链路默认对长章节做一次编辑型自然化;短文本只在明确命中俗套时处理。 + should_naturalize = bool(initial_hits) or len(draft) >= 500 + if not should_naturalize: + return content - Returns: - 故事线上下文字符串 - """ + rewrite_prompt = self._build_ai_flavor_rewrite_prompt(draft=draft, outline=outline) + max_tokens = max(1024, min(12000, int(len(draft) * 1.4))) try: - # 检查 storyline_manager 是否有 repository 属性 - if not hasattr(self.storyline_manager, 'repository'): - return "Storyline context unavailable" - - # 获取所有活跃的故事线 - storylines = self.storyline_manager.repository.get_by_novel_id(NovelId(novel_id)) - active_storylines = [ - s for s in storylines - if s.status.value == "active" - and s.estimated_chapter_start <= chapter_number <= s.estimated_chapter_end - ] + result = await self.llm_service.generate( + rewrite_prompt, + GenerationConfig(max_tokens=max_tokens, temperature=0.9), + ) + revised = strip_reasoning_artifacts(result.content or "").strip() + except Exception as e: + logger.warning("AI味自然化改写失败,保留原文: %s", e) + return content + + if not revised: + return content + if len(revised) < max(80, len(draft) * 0.45): + logger.warning("AI味自然化改写疑似过度压缩,保留原文") + return content + revised = await self._apply_human_texture_pass_if_needed( + content=revised, + outline=outline, + ) + revised = await self._apply_human_residue_pass_if_needed( + content=revised, + outline=outline, + ) + revised = await self._apply_structural_audit_pass_if_needed( + content=revised, + outline=outline, + ) + revised = await self._apply_style_bible_pass_if_needed( + content=revised, + outline=outline, + style_overlay=style_overlay, + ) + revised = await self._apply_forbidden_pattern_gate_if_needed( + content=revised, + outline=outline, + style_overlay=style_overlay, + ) + return self._soft_cap_detector_motifs(revised) - if not active_storylines: - return "No active storylines for this chapter" + async def _apply_structural_audit_pass_if_needed(self, *, content: str, outline: str) -> str: + """把“设定说明连发”改成证据动作链,避免自然化后仍像提纲解释。""" + draft = (content or "").strip() + if not draft or not self._needs_structural_audit_pass(draft): + return content - context_parts = [] - for storyline in active_storylines: - context = self.storyline_manager.get_storyline_context(storyline.id) - context_parts.append(context) + logger.info(" → 触发结构审稿清理:削弱说明流/共识流/概念连发") + prompt = self._build_structural_audit_prompt(draft=draft, outline=outline) + max_tokens = max(1024, min(12000, int(len(draft) * 1.2))) + try: + result = await self.llm_service.generate( + prompt, + GenerationConfig(max_tokens=max_tokens, temperature=0.82), + ) + candidate = strip_reasoning_artifacts(result.content or "").strip() + except Exception as e: + logger.warning("结构审稿清理失败,保留当前正文: %s", e) + return content + + if not candidate: + return content + if len(candidate) < max(80, len(draft) * 0.5): + logger.warning("结构审稿清理疑似过度压缩,保留当前正文") + return content + if ( + self._needs_structural_audit_pass(candidate) + and self._structural_audit_score(candidate) >= self._structural_audit_score(draft) + ): + logger.warning("结构审稿清理未降低说明流风险,保留当前正文") + return content + if self._human_texture_risk_score(candidate) > max(2, self._human_texture_risk_score(draft) + 2): + logger.warning("结构审稿清理引入句法风险,保留当前正文") + return content + return candidate + + async def _apply_style_bible_pass_if_needed(self, *, content: str, outline: str, style_overlay: str) -> str: + """章后将低 AI 味改写稿再贴合选中的写作手法档案。""" + overlay = (style_overlay or "").strip() + draft = (content or "").strip() + if not overlay or not draft: + return content - return "\n\n".join(context_parts) + try: + prompt = self._build_style_bible_rewrite_prompt( + draft=draft, + outline=outline, + style_overlay=overlay, + ) except Exception as e: - logger.warning(f"Failed to get storyline context: {e}") - return "Storyline context unavailable" + logger.warning("Style Bible 章后贴合提示词不可用,保留当前正文: %s", e) + return content - def _get_plot_tension(self, novel_id: str, chapter_number: int) -> str: - """获取情节张力信息 + max_tokens = max(1024, min(12000, int(len(draft) * 1.2))) + try: + result = await self.llm_service.generate( + prompt, + GenerationConfig(max_tokens=max_tokens, temperature=0.78), + ) + candidate = strip_reasoning_artifacts(result.content or "").strip() + except Exception as e: + logger.warning("Style Bible 章后贴合失败,保留当前正文: %s", e) + return content + + if not candidate: + return content + if len(candidate) < max(80, len(draft) * 0.55): + logger.warning("Style Bible 章后贴合疑似过度压缩,保留当前正文") + return content + if self._human_texture_risk_score(candidate) > max(2, self._human_texture_risk_score(draft) + 2): + logger.warning("Style Bible 章后贴合引入句法风险,保留当前正文") + return content + return candidate + + async def _apply_human_texture_pass_if_needed(self, *, content: str, outline: str) -> str: + """对外部检测器常判为 AI 的“过度工整精修稿”做一次节奏破整。""" + draft = (content or "").strip() + if not draft or not self._needs_human_texture_pass(draft): + return content + + logger.info(" → 触发人工纹理破整:降低过度工整/对称句式风险") + prompt = self._build_human_texture_rewrite_prompt(draft=draft, outline=outline) + max_tokens = max(1024, min(12000, int(len(draft) * 1.25))) + try: + result = await self.llm_service.generate( + prompt, + GenerationConfig(max_tokens=max_tokens, temperature=0.95), + ) + textured = strip_reasoning_artifacts(result.content or "").strip() + except Exception as e: + logger.warning("人工纹理破整改写失败,保留自然化正文: %s", e) + return content + + if not textured: + return content + if len(textured) < max(80, len(draft) * 0.45): + logger.warning("人工纹理破整疑似过度压缩,保留自然化正文") + return content + if self._is_detector_signature_improved(textured, draft) and not self._needs_human_texture_pass(textured): + return textured + + best = textured if self._is_detector_signature_improved(textured, draft) else draft + if best is textured: + logger.warning("人工纹理破整已降低但仍高风险,继续严格清理检测器敏感句法") + else: + logger.warning("人工纹理破整未降低检测风险,尝试严格清理检测器敏感句法") + strict_textured = await self._apply_strict_detector_signature_pass(draft=best, outline=outline) + if self._is_detector_signature_improved(strict_textured, best, strict=True): + return strict_textured + + if best is textured: + logger.warning("严格句法清理仍未达标,保留首轮改善稿") + return textured + + logger.warning("严格句法清理仍未达标,保留自然化正文") + return content + + async def _apply_direct_light_polish_if_needed(self, *, content: str, outline: str) -> str: + """直接写作后的轻修:只小幅局部编辑,不进入 PP 全套后处理。""" + draft = (content or "").strip() + if len(draft) < 500: + return content + + prompt = self._build_direct_light_polish_prompt(draft=draft, outline=outline) + max_tokens = max(2048, min(12000, int(len(draft) * 1.12))) + try: + result = await self.llm_service.generate( + prompt, + GenerationConfig(max_tokens=max_tokens, temperature=0.76), + ) + candidate = strip_reasoning_artifacts(result.content or "").strip() + except Exception as e: + logger.warning("直接写作轻修失败,保留直接稿: %s", e) + return content + + if not candidate: + return content + if len(candidate) < max(120, len(draft) * 0.75): + logger.warning("直接写作轻修疑似过度压缩,保留直接稿") + return content + if self._human_texture_risk_score(candidate) > self._human_texture_risk_score(draft) + 2: + logger.warning("直接写作轻修引入句法风险,保留直接稿") + return content + if self._human_residue_score(candidate) + 2 < self._human_residue_score(draft): + logger.warning("直接写作轻修抹平人工余量,保留直接稿") + return content + return candidate + + async def _apply_human_residue_pass_if_needed(self, *, content: str, outline: str) -> str: + """降低过度统一的母题词复现,给文本留出更像人工取舍的余量。""" + draft = (content or "").strip() + terms = self._detector_repetition_terms(draft) + if not draft or not terms: + return content + + logger.info(" → 触发人工余量降噪:降低母题词过密复现 %s", "/".join(terms[:8])) + prompt = self._build_human_residue_prompt(draft=draft, outline=outline, terms=terms) + max_tokens = max(1024, min(12000, int(len(draft) * 1.2))) + try: + result = await self.llm_service.generate( + prompt, + GenerationConfig(max_tokens=max_tokens, temperature=0.88), + ) + candidate = strip_reasoning_artifacts(result.content or "").strip() + except Exception as e: + logger.warning("人工余量降噪失败,保留当前正文: %s", e) + return content + + if not candidate: + return content + if len(candidate) < max(80, len(draft) * 0.55): + logger.warning("人工余量降噪疑似过度压缩,保留当前正文") + return content + if not self._is_motif_repetition_improved(candidate, draft, terms): + logger.warning("人工余量降噪未降低母题重复,保留当前正文") + return content + if self._needs_human_residue_pass(candidate): + logger.warning("人工余量降噪已降低但仍高频,继续严格压低母题词") + strict_candidate = await self._apply_strict_motif_cap_pass( + draft=candidate, + outline=outline, + ) + if strict_candidate and self._is_motif_repetition_improved( + strict_candidate, + candidate, + self._detector_repetition_terms(candidate), + ): + candidate = strict_candidate + if self._human_texture_risk_score(candidate) > max(2, self._human_texture_risk_score(draft) + 2): + logger.warning("人工余量降噪引入句法风险,保留当前正文") + return content + return candidate + + async def _apply_strict_motif_cap_pass(self, *, draft: str, outline: str) -> str: + terms = self._detector_repetition_terms(draft) + if not terms: + return draft + prompt = self._build_strict_motif_cap_prompt(draft=draft, outline=outline, terms=terms) + max_tokens = max(1024, min(12000, int(len(draft) * 1.2))) + try: + result = await self.llm_service.generate( + prompt, + GenerationConfig(max_tokens=max_tokens, temperature=0.78), + ) + candidate = strip_reasoning_artifacts(result.content or "").strip() + except Exception as e: + logger.warning("严格母题压词失败,保留当前正文: %s", e) + return draft + if len(candidate) < max(80, len(draft) * 0.55): + logger.warning("严格母题压词疑似过度压缩,保留当前正文") + return draft + return candidate + + async def _apply_strict_detector_signature_pass(self, *, draft: str, outline: str) -> str: + prompt = self._build_strict_detector_signature_prompt(draft=draft, outline=outline) + max_tokens = max(1024, min(12000, int(len(draft) * 1.25))) + try: + result = await self.llm_service.generate( + prompt, + GenerationConfig(max_tokens=max_tokens, temperature=0.82), + ) + return strip_reasoning_artifacts(result.content or "").strip() + except Exception as e: + logger.warning("严格句法清理失败,保留自然化正文: %s", e) + return draft + + @staticmethod + def _needs_human_texture_pass(text: str) -> bool: + """识别外部 AI 检测常抓的过度对称、过度精修行文。""" + return AutoNovelGenerationWorkflow._human_texture_risk_score(text) >= 5 + + @staticmethod + def _needs_human_residue_pass(text: str) -> bool: + return bool(AutoNovelGenerationWorkflow._detector_repetition_terms(text)) + + @staticmethod + def _needs_structural_audit_pass(text: str) -> bool: + return AutoNovelGenerationWorkflow._structural_audit_score(text) >= 10 + + @staticmethod + def _structural_audit_score(text: str) -> int: + """识别“解释/说明/讲完就达成共识”的结构型 AI 味。""" + if len(text or "") < 500: + return 0 + paragraphs = [p.strip() for p in text.splitlines() if p.strip()] + score = 0 + explain_terms = ("解释", "说明", "讲述", "补充", "机制", "规则", "背景", "原理") + score += min(sum(text.count(term) for term in explain_terms), 6) + score += min(text.count("很快达成共识") * 2, 4) + score += min(text.count("明白了") + text.count("听完后"), 4) + score += min(len(re.findall(r"(?:于是|随后|接着).{0,18}(?:解释|说明|补充|讲述)", text)), 4) + if len(paragraphs) >= 8: + dialogue_like = sum(1 for p in paragraphs if "“" in p or '"' in p) + if dialogue_like / len(paragraphs) < 0.12: + score += 2 + return score + + @staticmethod + def _detector_repetition_terms(text: str) -> List[str]: + """检测外部 AI 检测器常抓的高密度母题词复现。""" + if len(text or "") < 500: + return [] + + watch_terms = ( + "呼吸", "虹彩", "拓片", "节点", "坐标", "频率", "节律", "调谐", + "孔洞", "灰白", "甜腻", "结晶", "脐带", "肺泡", "肺叶", + "十七", "十九", "每分钟", "同步", "共振", + "像", "雨", "水", "冷", "潮", "湿", "铁锈味", + ) + counts = {term: text.count(term) for term in watch_terms} + counts.update(AutoNovelGenerationWorkflow._dynamic_motif_counts(text)) + threshold = 8 if len(text) < 3500 else 10 + generic_terms = {"像", "雨", "水", "冷", "潮", "湿", "铁锈味"} + generic_chars = set("雨水冷潮湿") + generic_threshold = 24 if len(text) < 3500 else 28 + dynamic_threshold = 14 if len(text) < 3500 else 18 + + def threshold_for(term: str) -> int: + if term in generic_terms or any(ch in generic_chars for ch in term): + return generic_threshold + if term not in watch_terms: + return dynamic_threshold + return threshold + + terms = [ + term for term, count in counts.items() + if count >= threshold_for(term) + ] + if len(terms) >= 3: + return sorted(terms, key=lambda item: counts[item], reverse=True) + if any(count >= threshold_for(term) * 2 for term, count in counts.items()): + return sorted(terms, key=lambda item: counts[item], reverse=True) + if any(term == "像" and count >= generic_threshold for term, count in counts.items()): + return sorted(terms, key=lambda item: counts[item], reverse=True) + return [] + + @staticmethod + def _dynamic_motif_counts(text: str) -> Dict[str, int]: + """从正文中动态抽取疑似题材意象词,避免每个题材都手写词表。""" + if len(text or "") < 500: + return {} + + marker_chars = set( + "雨水冷潮湿雾霜雪风火光影灯血骨肉皮纸票门锁屏电机车" + "剑刀枪刃甲丹田经脉灵气威压符阵石壁墙镜" + ) + stop_chars = set("的一是在了和也就都而及与把被让给对上下中里个这那他她它我你们来去着过没又还只很更最") + stop_terms = { + "他们", "她们", "这个", "那个", "什么", "不是", "没有", "已经", + "一下", "一点", "一声", "一样", "时候", "里面", "外面", + } + counts: Dict[str, int] = {} + for segment in re.findall(r"[\u4e00-\u9fff]{2,}", text): + for size in (2, 3): + if len(segment) < size: + continue + for index in range(0, len(segment) - size + 1): + term = segment[index:index + size] + if term in stop_terms: + continue + if any(ch in stop_chars for ch in term): + continue + if not any(ch in marker_chars for ch in term): + continue + counts[term] = counts.get(term, 0) + 1 + return counts + + @staticmethod + def _is_motif_repetition_improved(candidate: str, baseline: str, terms: List[str]) -> bool: + if not candidate.strip() or not baseline.strip() or not terms: + return False + baseline_score = sum(baseline.count(term) for term in terms) + candidate_score = sum(candidate.count(term) for term in terms) + return candidate_score < baseline_score + + @staticmethod + def _soft_cap_detector_motifs(text: str) -> str: + """保留旧入口,但不再做机械字符串替换。 + + 之前的收尾替换会把数字和母题词拼成“三旧值”“上一组读数”等怪异表达; + 母题降噪改由 LLM 编辑型 pass 完成,避免破坏小说正文。 + """ + return text + + @staticmethod + def _is_detector_signature_improved(candidate: str, baseline: str, *, strict: bool = False) -> bool: + candidate = (candidate or "").strip() + baseline = (baseline or "").strip() + if not candidate: + return False + if len(candidate) < max(80, len(baseline) * 0.45): + return False + + candidate_score = AutoNovelGenerationWorkflow._human_texture_risk_score(candidate) + baseline_score = AutoNovelGenerationWorkflow._human_texture_risk_score(baseline) + if candidate_score >= baseline_score: + return False + if candidate.count("不是") > baseline.count("不是"): + return False + + if strict: + max_not = max(4, baseline.count("不是") // 3) + max_like_some = max(1, baseline.count("像某种") // 3) + max_some = max(4, baseline.count("某种") // 2) + if candidate.count("不是") > max_not: + return False + if candidate.count("像某种") > max_like_some: + return False + if candidate.count("某种") > max_some: + return False + + return True + + @staticmethod + def _human_texture_risk_score(text: str) -> int: + """给二次改写前后做同一把尺子的轻量风险评分。""" + if len(text) < 500: + return 0 + + score = 0 + score += min(len(re.findall(r"不是[^。!?\n]{1,28}[,,]是", text)), 4) + score += min(text.count("像某种"), 4) + score += min(text.count("像") // 12, 5) + score += min(text.count("某种") // 4, 3) + score += min(len(re.findall(r"没有[^。!?\n]{1,24}[,,]?(?:只是|而是)", text)), 2) + score += min(len(re.findall(r"不是[^。!?\n]{1,24}(?:而是|而是在)", text)), 2) + score += min(text.count("不是") // 4, 5) + score += min(len(re.findall(r"(?:^|\n)\s*不是", text)), 4) + + paragraphs = [p.strip() for p in text.splitlines() if p.strip()] + if len(paragraphs) >= 18: + short_ratio = sum(1 for p in paragraphs if len(p) <= 18) / len(paragraphs) + if short_ratio >= 0.45: + score += 2 + elif short_ratio >= 0.35: + score += 1 + if short_ratio >= 0.28: + score += 1 + starts = [p[:2] for p in paragraphs if len(p) >= 2] + if starts: + most_common_start = max(starts.count(s) for s in set(starts)) + if most_common_start / len(starts) >= 0.16: + score += 1 + + return score + + @staticmethod + def _human_residue_score(text: str) -> int: + """轻量估算正文是否还保留了人工草稿的局部余量。""" + if len(text or "") < 500: + return 0 + + paragraphs = [p.strip() for p in text.splitlines() if p.strip()] + score = 0 + score += min(len(re.findall(r"[“”\"']", text)) // 2, 6) + score += min(len(re.findall(r"[??!!]", text)), 5) + score += min(len(re.findall(r"(?:顿了顿|停了|没接|没问|没说|咽回去|伸手|缩回|低头|抬眼|偏头)", text)), 8) + score += min(len(re.findall(r"(?:半寸|半秒|两步|三步|指节|袖口|杯沿|门缝|纸角|鞋底|抽屉|钥匙|票据|录音笔|证物袋)", text)), 8) + if len(paragraphs) >= 12: + lengths = [len(p) for p in paragraphs] + short = sum(1 for length in lengths if length <= 24) + long = sum(1 for length in lengths if length >= 90) + if short and long: + score += 2 + unique_starts = len({p[:2] for p in paragraphs if len(p) >= 2}) + if unique_starts / len(paragraphs) >= 0.75: + score += 2 + return score + + @staticmethod + def _build_human_texture_rewrite_prompt(*, draft: str, outline: str) -> Prompt: + target_not_count = min(4, max(1, draft.count("不是") // 2)) + variables = { + "draft": draft, + "rhythm_goal": ( + "保留剧情事实和本章大纲,重点削弱过度工整、过度对称、过度镜头化的AI式精修感;" + "减少连续的“不是X,是Y”“像某种”结构,让段落更像人工作者现场取舍后的表达;" + f"全文“不是”不超过{target_not_count}次,“像某种”尽量为0,“某种”不超过4次;" + "普通“像……”比喻不超过每千字3处,重复场景词不要压成同一组雨/水/冷意象;" + "优先改成直接动作、物证变化、短对白或角色误判,不要新增新的排比式否定句。" + ), + } + try: + from infrastructure.ai.prompt_manager import get_prompt_manager + + manager = get_prompt_manager() + manager.ensure_seeded() + rendered = manager.render("rewrite-prose-irregularity", variables) + if rendered and (rendered.get("system") or "").strip() and (rendered.get("user") or "").strip(): + return Prompt( + system=rendered["system"].strip(), + user=rendered["user"].strip(), + ) + except Exception as e: + logger.warning("句式节奏破整提示词节点不可用,回退内置提示词: %s", e) + + return Prompt( + system=( + "你是中文小说行文节奏编辑。请保留原剧情事实、人物、时间线和伏笔," + "只削弱过度工整、过度对称、过度解释的AI式精修感。减少连续的固定句式," + "让句长、停顿、段落和细节取舍更像人工作者写作。只输出改写后的正文。" + ), + user=( + f"【本章大纲】\n{outline.strip()}\n\n" + "【需要破整的正文】\n" + f"{draft}\n\n" + "请只输出调整后的小说正文:" + ), + ) + + @staticmethod + def _build_strict_detector_signature_prompt(*, draft: str, outline: str) -> Prompt: + target_not_count = min(4, max(1, draft.count("不是") // 3)) + return Prompt( + system=( + "你是中文小说检测器指纹清理编辑。只做表达层改写,不能改剧情、人物、道具、地点、" + "时间线、伏笔和结尾钩子。目标是删除外部AI检测器常抓的重复句法。只输出正文。" + ), + user=( + f"【本章大纲】\n{outline.strip()}\n\n" + "【硬性指标】\n" + f"- 全文“不是”不超过 {target_not_count} 次。\n" + "- “像某种”尽量为 0 次。\n" + "- “某种”不超过 4 次。\n" + "- 普通“像……”比喻不超过每千字 3 处;删掉可有可无的比喻,改成直接动作或物理后果。\n" + "- 同一场景母题词不要过密复现;雨、水、冷、铁锈味、潮湿等词重复过多时,改成光线、脚步、设备、纸张、手部动作或对白反应。\n" + "- 不用“不是X,是Y”做连续纠偏;改成直接描写、动作后果、对白停顿或角色误判。\n" + "- 保留原文已出现的关键名词、线索、坐标、道具和因果顺序。\n" + "- 不要写修改说明。\n\n" + "【待清理正文】\n" + f"{draft}\n\n" + "请输出清理后的小说正文:" + ), + ) + + @staticmethod + def _build_human_residue_prompt(*, draft: str, outline: str, terms: List[str]) -> Prompt: + term_text = "、".join(terms[:10]) + return Prompt( + system=( + "你是中文小说人工余量编辑。你的目标不是把文本改粗糙,而是削弱模型文常见的" + "单一母题过密、意象过度统一、每段都精准服务悬念的机器感。只输出正文。" + ), + user=( + f"【本章大纲】\n{outline.strip()}\n\n" + f"【需要降噪的高频母题词】\n{term_text}\n\n" + "【改写要求】\n" + "- 保留剧情事实、人物关系、关键道具、坐标、危机和结尾钩子。\n" + "- 不要把高频词全部删除;把一部分重复名词改成动作后果、场景物、人物误判或短对白。\n" + "- 如果高频词是“像、雨、水、冷、潮、湿、铁锈味”等通用词,优先删掉可有可无的比喻和重复氛围句;保留必要物证信息。\n" + "- 加入少量合理的现场摩擦:脚滑、光线失真、衣料湿冷、设备误报、门锁卡顿、旁人一句不合时宜的话。\n" + "- 允许信息释放不那么整齐:有些细节先被误读,有些句子停在动作上,不每段都收成同一组意象。\n" + "- 不新增核心设定,不解释修改过程。\n\n" + "【原文】\n" + f"{draft}\n\n" + "请输出降噪后的小说正文:" + ), + ) + + @staticmethod + def _build_structural_audit_prompt(*, draft: str, outline: str) -> Prompt: + return Prompt( + system=( + "你是中文商业小说结构审稿编辑。只做删改和重排,不改变剧情事实、人物、道具、地点、" + "时间线、伏笔和结尾钩子。目标是把说明流、设定解释流、很快达成共识的摘要流," + "改成读者能跟着看的行动链、证据链和对白试探。只输出正文。" + ), + user=( + f"【本章大纲】\n{outline.strip()}\n\n" + "【结构删改要求】\n" + "- 删掉或打散连续解释、背景讲述、规则说明,把必要信息落到角色接触证据、试错、误判和短对白里。\n" + "- 每 800 字内至少出现一次能改变判断的动作或物证变化。\n" + "- 不用“一番交谈后 / 很快达成共识 / 听完后明白了”跳过过程。\n" + "- 对话不要负责完整解释,只负责试探、核对、遮掩、反问或露出破绽。\n" + "- 保留原文关键信息,但允许换出现顺序,让信息更像现场逐步被发现。\n" + "- 不输出审稿说明。\n\n" + "【待删改正文】\n" + f"{draft}\n\n" + "请输出结构删改后的小说正文:" + ), + ) + + @staticmethod + def _build_style_bible_rewrite_prompt(*, draft: str, outline: str, style_overlay: str) -> Prompt: + variables = { + "style_overlay": style_overlay, + "must_keep": f"本章大纲:{outline.strip()};保留剧情事实、关键线索、道具状态和结尾钩子。", + "draft": draft, + } + from infrastructure.ai.prompt_manager import get_prompt_manager + + manager = get_prompt_manager() + manager.ensure_seeded() + rendered = manager.render("style-bible-imitation-pass", variables) + if rendered and (rendered.get("system") or "").strip() and (rendered.get("user") or "").strip(): + return Prompt( + system=rendered["system"].strip(), + user=rendered["user"].strip(), + ) + return Prompt( + system=( + "你是中文小说文风贴合编辑。学习风格约束中的节奏、细节选择、句式倾向和叙述距离," + "但不能复刻样本文字。保留剧情事实和关键线索,只输出改写后的正文。" + ), + user=( + f"【风格约束】\n{style_overlay.strip()}\n\n" + f"【必须保留】\n{variables['must_keep']}\n\n" + f"【原文】\n{draft}\n\n" + "请按风格约束轻度贴合,只输出正文:" + ), + ) + + @staticmethod + def _build_strict_motif_cap_prompt(*, draft: str, outline: str, terms: List[str]) -> Prompt: + caps = [] + for term in terms[:10]: + current = draft.count(term) + target = min(7, max(2, current // 3)) + caps.append(f"- “{term}”:当前 {current} 次,改后不超过 {target} 次") + return Prompt( + system=( + "你是中文小说重复母题压词编辑。你的任务是保留剧情事实,但显著减少同一批关键词的" + "机械复现,让文本像人工写作时会自然换说法、略过、误读和留白。只输出正文。" + ), + user=( + f"【本章大纲】\n{outline.strip()}\n\n" + "【硬性词频上限】\n" + + "\n".join(caps) + + "\n\n【替换方式】\n" + "- 第一次出现可保留关键词,后续尽量改成代词、动作、声音、设备读数、角色反应或干脆省略。\n" + "- 数字和坐标只在关键处出现;重复处改成角色动作、核对过程或具体载体,不要用“那个数”“旧值”等生硬占位。\n" + "- 生理/异常名词不要每段重复,改成现场后果:地面变滑、灯闪、门锁卡住、衣领发潮、设备误报。\n" + "- 加入少量生活摩擦,但不要新增核心人物、核心设定或改变结尾危机。\n" + "- 不要输出修改说明。\n\n" + "【原文】\n" + f"{draft}\n\n" + "请输出压词后的小说正文:" + ), + ) + + @staticmethod + def _build_ai_flavor_rewrite_prompt(*, draft: str, outline: str) -> Prompt: + variables = { + "draft": draft, + "must_keep": f"本章大纲:{outline.strip()}", + "rewrite_goal": "降低AI味,保留剧情事实,增强阅读沉浸", + "taboo_phrases": ( + "空气凝固、时间静止、心中五味杂陈、某种说不清的东西、" + "命运齿轮、一切才刚刚开始、再也回不去了、一番交谈后、很快达成共识、" + "不是X,是Y、像某种、连续排比式否定句;全文“不是”不超过4次," + "“像某种”尽量为0,“某种”不超过4次" + ), + } + try: + from infrastructure.ai.prompt_manager import get_prompt_manager + + manager = get_prompt_manager() + manager.ensure_seeded() + rendered = manager.render("rewrite-ai-flavor-naturalizer", variables) + if rendered and (rendered.get("system") or "").strip() and (rendered.get("user") or "").strip(): + return Prompt( + system=rendered["system"].strip(), + user=rendered["user"].strip(), + ) + except Exception as e: + logger.warning("AI味改写提示词节点不可用,回退内置提示词: %s", e) + + system = ( + "你是中文商业小说自然化改稿编辑。目标是降低AI味,保留原剧情事实、人物、地点、" + "因果顺序、伏笔和关键信息,不新增剧情,不解释修改过程。\n\n" + "硬要求:\n" + "1. 删除抽象情绪说明、模板总结、万能比喻和说明文腔。\n" + "2. 把情绪落到动作、物件、声音、停顿、视线和身体反应。\n" + "3. 对话更像真人:允许半句、回避、反问、误解和沉默,不要人人把动机说透。\n" + "4. 保留章节长度与节奏,不能压缩成摘要。\n" + "5. 首选上一轮效果较好的路线:调查动作清楚、物证逐步出现、对白试探、临场判断;" + "不要刻意粗糙化,不制造错别字或奇怪口癖。\n" + "6. 少用“不是X,是Y”结构,禁止连续排比式否定。\n" + "7. 只输出改写后的小说正文。" + ) + user = ( + f"【本章大纲】\n{outline.strip()}\n\n" + "【需要自然化改写的正文】\n" + f"{draft}\n\n" + "请在不改变剧情事实的前提下降低AI味,只输出正文:" + ) + return Prompt(system=system, user=user) + + async def generate_chapter_with_review( + self, + novel_id: str, + chapter_number: int, + outline: str + ) -> Tuple[str, ConsistencyReport]: + """生成章节并返回一致性审查 + + Args: + novel_id: 小说 ID + chapter_number: 章节号 + outline: 章节大纲 + + Returns: + (content, consistency_report) 元组 + """ + result = await self.generate_chapter(novel_id, chapter_number, outline) + return result.content, result.consistency_report + + def _get_storyline_context(self, novel_id: str, chapter_number: int) -> str: + """获取故事线上下文 + + Args: + novel_id: 小说 ID + chapter_number: 章节号 + + Returns: + 故事线上下文字符串 + """ + try: + # 检查 storyline_manager 是否有 repository 属性 + if not hasattr(self.storyline_manager, 'repository'): + return "Storyline context unavailable" + + # 获取所有活跃的故事线 + storylines = self.storyline_manager.repository.get_by_novel_id(NovelId(novel_id)) + active_storylines = [ + s for s in storylines + if s.status.value == "active" + and s.estimated_chapter_start <= chapter_number <= s.estimated_chapter_end + ] + + if not active_storylines: + return "No active storylines for this chapter" + + context_parts = [] + for storyline in active_storylines: + context = self.storyline_manager.get_storyline_context(storyline.id) + context_parts.append(context) + + return "\n\n".join(context_parts) + except Exception as e: + logger.warning(f"Failed to get storyline context: {e}") + return "Storyline context unavailable" + + def _get_plot_tension(self, novel_id: str, chapter_number: int) -> str: + """获取情节张力信息 Args: novel_id: 小说 ID @@ -744,6 +2689,11 @@ def build_chapter_prompt( beat_target_words: Optional[int] = None, voice_anchors: str = "", chapter_draft_so_far: str = "", + style_overlay: str = "", + chapter_strategy: Optional[Dict[str, Any]] = None, + next_chapter_bridge: str = "", + target_word_count: Optional[int] = None, + word_tolerance_ratio: Optional[float] = None, ) -> Prompt: """构建与 HTTP 单章 / 流式 / 托管按节拍写作一致的 Prompt(对外 API)。""" return self._build_prompt( @@ -758,7 +2708,106 @@ def build_chapter_prompt( beat_target_words=beat_target_words, voice_anchors=voice_anchors, chapter_draft_so_far=chapter_draft_so_far, + style_overlay=style_overlay, + chapter_strategy=chapter_strategy, + next_chapter_bridge=next_chapter_bridge, + target_word_count=target_word_count, + word_tolerance_ratio=word_tolerance_ratio, + ) + + def _build_style_overlay( + self, + novel_id: str, + style_profile_id: str, + scene_type: str = "", + ) -> str: + if not self.style_prompt_overlay_service or not (style_profile_id or "").strip(): + return "" + try: + overlay = self.style_prompt_overlay_service.build_overlay( + novel_id, + style_profile_id, + scene_type=scene_type, + ) + return overlay.prompt + except Exception as e: + logger.warning("style bible overlay unavailable: %s", e) + return "" + + @staticmethod + def _extract_forbidden_patterns_from_style_overlay(style_overlay: str) -> List[str]: + text = str(style_overlay or "").strip() + if not text: + return [] + lines = [line.strip() for line in text.splitlines()] + in_forbidden = False + patterns: list[str] = [] + for line in lines: + if line.startswith("禁用项:"): + in_forbidden = True + continue + if in_forbidden and line.endswith(":") and not line.startswith("-"): + break + if in_forbidden and line.startswith("-"): + item = line[1:].strip() + if item and item not in patterns: + patterns.append(item) + return patterns[:10] + + async def _apply_forbidden_pattern_gate_if_needed( + self, + *, + content: str, + outline: str, + style_overlay: str, + ) -> str: + if not self._is_style_anchor_rag_enabled(): + return content + draft = (content or "").strip() + if not draft: + return content + patterns = self._extract_forbidden_patterns_from_style_overlay(style_overlay) + if not patterns: + return content + hit_patterns = [p for p in patterns if p and p in draft] + if not hit_patterns: + return content + + prompt = Prompt( + system=( + "你是中文小说修文编辑。你只能做局部改写:替换命中禁忌模板句,保持事实、剧情和人物关系不变。" + "只输出修订后的完整正文,不要解释。" + ), + user=( + "请按以下要求修订正文:\n" + "1) 只处理命中禁忌项的句子,不要全篇重写;\n" + "2) 保留剧情事件顺序、人物立场与道具状态;\n" + "3) 用动作、对白、停顿替代模板化总结句;\n" + "4) 禁止使用下列表达:\n" + + "\n".join(f"- {item}" for item in hit_patterns) + + "\n\n" + f"【本章大纲】\n{outline}\n\n" + f"【正文】\n{draft}\n\n" + "输出修订后的正文:" + ), ) + max_tokens = max(1200, min(12000, int(len(draft) * 1.25))) + try: + result = await self.llm_service.generate( + prompt, + GenerationConfig(max_tokens=max_tokens, temperature=0.76), + ) + revised = strip_reasoning_artifacts((result.content or "").strip()) + except Exception as e: + logger.warning("forbidden pattern gate failed: %s", e) + return content + + if not revised: + return content + if len(revised) < max(80, int(len(draft) * 0.55)): + logger.warning("forbidden pattern gate over-compressed, keep draft") + return content + return revised def _build_prompt( self, @@ -774,6 +2823,11 @@ def _build_prompt( beat_target_words: Optional[int] = None, voice_anchors: str = "", chapter_draft_so_far: str = "", + style_overlay: str = "", + chapter_strategy: Optional[Dict[str, Any]] = None, + next_chapter_bridge: str = "", + target_word_count: Optional[int] = None, + word_tolerance_ratio: Optional[float] = None, ) -> Prompt: """构建 LLM 提示词 @@ -788,6 +2842,7 @@ def _build_prompt( beat_target_words: 本段目标字数(分节拍时覆盖「整章 2000-3000 字」说明) voice_anchors: Bible 角色声线/小动作锚点(高优先级 System 提示) chapter_draft_so_far: 同章内当前节拍之前已生成的正文(拼接后传入,避免后续节拍重复) + style_overlay: 写作手法知识库提示词片段 Returns: Prompt 对象 @@ -796,6 +2851,11 @@ def _build_prompt( pt = (plot_tension or "").strip() ss = (style_summary or "").strip() va = (voice_anchors or "").strip() + so = (style_overlay or "").strip() + prop_overlay = self._build_prop_ledger_overlay() + coc_overlay = self._build_coc_canon_overlay() + coc_clue_overlay = self._build_coc_clue_overlay() + coc_cognition_overlay = self._build_coc_cognition_overlay() planning_parts: list[str] = [] if sc and sc not in ("Storyline context unavailable",): planning_parts.append(f"【故事线 / 里程碑】\n{sc}") @@ -803,6 +2863,30 @@ def _build_prompt( planning_parts.append(f"【情节节奏 / 期望张力】\n{pt}") if ss: planning_parts.append(f"【风格约束】\n{ss}") + if so: + planning_parts.append(so) + if prop_overlay: + planning_parts.append(prop_overlay) + if coc_overlay: + planning_parts.append(coc_overlay) + if coc_clue_overlay: + planning_parts.append(coc_clue_overlay) + if coc_cognition_overlay: + planning_parts.append(coc_cognition_overlay) + strategy_overlay = self._build_strategy_overlay(chapter_strategy) + if strategy_overlay: + planning_parts.append(strategy_overlay) + if (next_chapter_bridge or "").strip(): + planning_parts.append(next_chapter_bridge.strip()) + chapter_contract = self._build_chapter_contract_overlay(context=context, outline=outline) + if chapter_contract: + planning_parts.append(chapter_contract) + detector_calibration = self._build_detector_calibration_overlay(context=context, outline=outline) + if detector_calibration: + planning_parts.append(detector_calibration) + genre_overlay = self._build_genre_overlay(context=context, outline=outline) + if genre_overlay: + planning_parts.append(genre_overlay) planning_section = "" if planning_parts: planning_section = ( @@ -819,11 +2903,32 @@ def _build_prompt( beat_mode = bool((beat_prompt or "").strip()) prior_in_chapter = format_prior_draft_for_prompt(chapter_draft_so_far) - length_rule = ( - f"7. 本段约 {beat_target_words} 字(本章分多节输出之一,勿写章节标题)" - if beat_target_words - else ("7. 章节长度:3000-4000字" if not beat_mode else "7. 按下方节拍说明控制篇幅,勿写章节标题") - ) + target_range = self._target_word_range(target_word_count, word_tolerance_ratio) + if beat_target_words: + beat_range = self._target_word_range(beat_target_words, word_tolerance_ratio) + if beat_range: + min_words, max_words = beat_range + length_rule = ( + f"7. 【硬性字数】本段目标 {beat_target_words} 字,允许 {min_words}-{max_words} 字;" + "接近上限时立即收束,不要为了补气氛继续扩写。" + ) + else: + length_rule = f"7. 【硬性字数上限】本段最多 {beat_target_words} 字,超出将被截断,请精炼叙述。" + elif target_range: + min_words, max_words = target_range + target = self._effective_word_target(target_word_count) + length_rule = ( + f"7. 【硬性字数】本章目标 {target} 字,允许 {min_words}-{max_words} 字;" + "少于下限时补关键冲突和人物互动,接近上限时收束,不要继续铺陈。" + ) + else: + default_target = self._effective_word_target(target_word_count) + min_words, max_words = self._target_word_range(default_target, word_tolerance_ratio) + length_rule = ( + f"7. 【硬性字数】本章目标 {default_target} 字,允许 {min_words}-{max_words} 字;" + if not beat_mode + else "7. 按下方节拍说明控制篇幅,勿写章节标题" + ) beat_extra = "" if beat_mode and beat_index is not None and total_beats is not None and total_beats > 0: if prior_in_chapter: @@ -856,9 +2961,54 @@ def _build_prompt( except Exception as e: logger.warning(f"MemoryEngine fact_lock 构建失败: {e}") - # ⚡ 提示词集中管理说明: - # 此模板对应 prompts_defaults.json 中的 id=workflow-chapter-generation - # 如需修改提示词内容,请编辑 JSON 文件而非此代码文件 + prior_draft_block = "" + if beat_mode and prior_in_chapter: + prior_draft_block = f""" + +【本章已生成正文(仅承接;禁止复述、改写或重复已交代的情节与对白;勿写章节标题)】 +{prior_in_chapter} +""" + + beat_section = "" + if beat_mode: + bi = beat_index if beat_index is not None else 0 + tb = total_beats if total_beats is not None else 1 + beat_tail = ( + "本段只写该节拍对应正文,紧接上文已写正文之后继续,衔接自然。" + if prior_in_chapter + else "本段只写该节拍对应正文,与全章其它节拍情节连贯。" + ) + beat_section = f""" + +【节拍 {bi + 1}/{tb}】 +{(beat_prompt or '').strip()} + +{beat_tail}""" + + render_variables = { + "planning_section": planning_section, + "voice_block": voice_block, + "context": context, + "fact_lock": fact_lock, + "length_rule": length_rule, + "beat_extra": beat_extra, + "outline": outline, + "prior_draft": prior_draft_block, + "beat_section": beat_section, + "style_overlay": so, + "next_chapter_bridge": next_chapter_bridge, + "genre_overlay": genre_overlay, + "chapter_contract": chapter_contract, + "detector_calibration": detector_calibration, + } + + visible_prompt = self._render_visible_workflow_prompt(render_variables) + if visible_prompt: + return Prompt( + system=visible_prompt["system"], + user=self._ensure_generation_start_suffix(visible_prompt["user"]), + ) + system_message = f"""你是一位专业的网络小说作家。根据以下上下文撰写章节内容。 {planning_section}{voice_block}{context} @@ -886,32 +3036,1297 @@ def _build_prompt( - 推进主线情节,不要原地踏步 - 结尾要有悬念或转折""" - if beat_mode and prior_in_chapter: - user_message += f""" + user_message += prior_draft_block + user_message += beat_section -【本章已生成正文(仅承接;禁止复述、改写或重复已交代的情节与对白;勿写章节标题)】 -{prior_in_chapter} -""" + user_message += "\n\n开始撰写:" - if beat_mode: - bi = beat_index if beat_index is not None else 0 - tb = total_beats if total_beats is not None else 1 - beat_tail = ( - "本段只写该节拍对应正文,紧接上文已写正文之后继续,衔接自然。" - if prior_in_chapter - else "本段只写该节拍对应正文,与全章其它节拍情节连贯。" + return Prompt(system=system_message, user=user_message) + + def _build_direct_writing_prompt( + self, + *, + context: str, + outline: str, + storyline_context: str = "", + plot_tension: str = "", + style_summary: str = "", + voice_anchors: str = "", + chapter_strategy: Optional[Dict[str, Any]] = None, + next_chapter_bridge: str = "", + target_word_count: Optional[int] = None, + word_tolerance_ratio: Optional[float] = None, + ) -> Prompt: + """用于对照测试的直接写作提示词:少流程、少后处理,接近单次人工写作。""" + planning_parts: list[str] = [] + for title, value in ( + ("故事线 / 里程碑", storyline_context), + ("情节节奏 / 期望张力", plot_tension), + ("风格约束", style_summary), + ("角色声线与肢体语言", voice_anchors), + ): + text = value.strip() if isinstance(value, str) else "" + if text and "unavailable" not in text.lower(): + planning_parts.append(f"【{title}】\n{text}") + + fact_lock = "" + if self.memory_engine: + try: + parts = [ + self.memory_engine.build_fact_lock_section( + self._current_novel_id or "", self._current_chapter_number or 0 + ), + self.memory_engine.get_completed_beats_section(self._current_novel_id or ""), + self.memory_engine.get_revealed_clues_section(self._current_novel_id or ""), + ] + fact_lock = "\n\n".join(p for p in parts if p and p.strip()) + except Exception as e: + logger.warning("direct writing fact_lock skipped: %s", e) + + planning_section = "\n\n".join(planning_parts) + genre_overlay = self._build_genre_overlay(context=context, outline=outline) + detector_calibration = self._build_detector_calibration_overlay(context=context, outline=outline) + prop_overlay = self._build_prop_ledger_overlay() + coc_overlay = self._build_coc_canon_overlay() + coc_clue_overlay = self._build_coc_clue_overlay() + coc_cognition_overlay = self._build_coc_cognition_overlay() + strategy_overlay = self._build_strategy_overlay(chapter_strategy) + constraints = "\n\n".join( + part + for part in ( + planning_section, + fact_lock, + detector_calibration, + genre_overlay, + prop_overlay, + coc_overlay, + coc_clue_overlay, + coc_cognition_overlay, + strategy_overlay, + (next_chapter_bridge or "").strip(), ) - user_message += f""" + if part + ) + target_range = self._target_word_range(target_word_count, word_tolerance_ratio) + if target_range: + min_words, max_words = target_range + target = self._effective_word_target(target_word_count) + else: + target = self._effective_word_target(target_word_count) + tolerance = max(120, int(target * 0.05)) + min_words = max(500, target - tolerance) + max_words = target + tolerance + direct_length_rule = ( + f"本章目标 {target} 中文字,允许 {min_words}-{max_words} 字。" + "少于下限时补角色互动或阻力升级,接近上限时立刻收束。" + ) -【节拍 {bi + 1}/{tb}】 -{(beat_prompt or '').strip()} + system_message = f"""你是一个长期连载中文小说作者。现在进入“直接写作模式”:只写正文,不解释写法,不输出大纲,不做总结报告。 -{beat_tail}""" +你要像人在回忆一个具体场景,而不是像模型完成任务。请保持事实一致,但不要把所有信息都讲清楚;让动作、对话、停顿、误判和道具变化自己推进故事。 - user_message += "\n\n开始撰写:" +【直接写作规则】 +1. 开头直接进入一个可见动作、声音、物件变化或人物反应,不先介绍背景。 +2. 每一段只承担一个小动作或一次信息变化,避免段段收束成结论。 +3. 对话要有遮掩、停顿、误解和试探;不要让人物把动机、世界观、推理过程一次说完。 +4. 情绪不用抽象词解释,落到手、眼神、步伐、物件、声音、沉默和临时决定。 +5. 保留一点不整齐:允许短句、半句、插入动作、轻微绕路;不要写成一篇被修得很光滑的稿子。 +6. 少用万能比喻和统一意象。普通“像……”比喻全章控制在少量,重复场景词要主动换成动作或道具。 +7. {direct_length_rule} + +{constraints} + +【上下文】 +{context}""" + + user_message = f"""请直接写这一章正文: + +{outline} + +只输出小说正文。不要写章节标题,不要列项目,不要解释规则。 + +开始撰写:""" + return Prompt(system=system_message, user=user_message) + + @staticmethod + def _build_direct_light_polish_prompt(*, draft: str, outline: str) -> Prompt: + system_message = """你是中文小说轻修编辑。你的任务不是重写,不是润色成更华丽,而是把一篇已经写好的章节做少量人工化修补。 + +这次的优先级是“保留草稿感”,不是“修顺”。如果一段已经有现场动作、停顿、口语、物件细节或不完整的反应,就不要碰它。 + +硬性边界: +1. 保留 90% 以上原文句子、剧情事实、人物关系、道具状态和信息顺序。 +2. 不新增角色,不新增设定,不改变结尾事件。 +3. 只局部修改检测器敏感位置:抽象总结、心理直说、说明腔、段落过整、每段都收束成结论。 +4. 把“他意识到/他明白/复杂情绪/某种感觉/一切都……”改成可见动作、停顿、物件变化、旁人反应或一句未说完的话。 +5. 不要把句子修得更整齐,不要统一段落长度,不要给每段补结论,不要增加成套排比。 +6. 不要故意写错别字,不要制造奇怪口癖,不要把全文改成同一种短句。 +7. 只输出轻修后的小说正文。""" + + user_message = f"""本章大纲: +{outline} +请轻修下面正文,只做 5%-10% 的局部改动,目标是保留直接写作稿的松弛感,同时减少过度说明和过度工整。 + +不要为了“更通顺”而重写全章;没有明显问题的段落原样保留。 + +正文: +{draft}""" return Prompt(system=system_message, user=user_message) + @staticmethod + def _build_chapter_contract_overlay(*, context: str, outline: str) -> str: + """给模型一个好看优先的章节戏剧任务,避免滑向说明文或检测器导向。""" + outline_text = (outline or "").strip() + lines = [ + "【好看优先:章节戏剧任务】", + "写作前先在心里确定本章戏剧任务,但不要输出计划:", + "- POV 角色此刻想要什么;", + "- 谁或什么阻碍他;", + "- 他手里有什么筹码、误判或不能说出口的动机;", + "- 本章结束时读者获得什么兑现,又被什么新问题勾住。", + ] + if outline_text: + lines.append("本章必须让大纲中的目标落成可见变化,不能只复述设定或解释背景。") + else: + lines.append("当前大纲为空或过短时,请从上下文里抽取一个最自然的短线任务来写:小证据、小阻碍、小试探、小代价,四者至少满足两项。") + lines.extend([ + "", + "【内部场景推进表(只在心里列,不输出)】", + "把本章拆成 2-4 个场景或节拍,每个场景必须有:", + "1. 场景任务:角色要拿到、确认、隐瞒、反制或失去什么;", + "2. 阻力:人、制度、时间、误会、体力、道具、环境或旧关系至少一种;", + "3. 新信息:读者对人物、线索、局势或关系的理解必须发生变化;", + "4. 可见锚点:一个具体地点/物件/动作/记录/身体反应承载这次变化;", + "5. 出口钩子:场景结束时留下下一步选择、未说完的话、反常细节或代价。", + "", + "解释信息前先让角色碰到证据、阻力或后果;不要让人物站着讲完机制。", + "检测器分数不是写作目标。若“降低AI味”和“章节好看”冲突,优先保证人物欲望、冲突推进和追读钩子。", + "", + "【追读自检】", + "输出前内部检查:开头是否有钩子,中段是否有阻力升级,结尾是否有未完成问题、关系裂口、危险升级或证据反转;没有就补一个具体钩子。", + ]) + return "\n".join(lines) + + @staticmethod + def _build_detector_calibration_overlay(*, context: str, outline: str) -> str: + """从外部检测器真人样本提炼出的小说化事实锚点规则。""" + source = f"{context or ''}\n{outline or ''}" + genre_key = AutoNovelGenerationWorkflow._infer_genre_key(source) + if genre_key in {"suspense", "urban", "cultivation", "comic_adaptation"}: + anchor_examples = { + "suspense": "门禁编号、票据抬头、摄像头角度、检测阈值、日志时间、证物封条、坐标误差", + "urban": "合同条款、会议纪要、转账时间、项目编号、报价差额、审批流程、设备记录", + "cultivation": "丹炉火候、阵纹编号、灵石刻度、伤口变化、功法层级、药液比例、巡山时辰", + "comic_adaptation": "分镜位置、招牌文字、服装材质、镜头遮挡、榜单数据、道具特写、动作顺序", + }[genre_key] + else: + anchor_examples = "时间、地点、材质、编号、票据、记录、流程、误差、阈值、旧物磨损、操作顺序" + + return "\n".join([ + "【检测器校准:事实锚点写法】", + "外部检测器更容易把高密度、可核验、非抒情的事实链判为人工文本;请把这个特征小说化,而不是写成论文。", + f"本章每 700-1000 字至少嵌入 1 个非文学性事实锚点,可选:{anchor_examples}。", + "事实锚点必须由角色看见、摸到、核对、误读或操作出来;不要站出来说明背景。", + "优先写“数据/物件/流程 -> 角色判断 -> 出错或受阻 -> 新动作”的因果链,少写纯氛围和纯情绪。", + "允许少量不那么文学化的句子:记录式短句、半截术语、口头修正、旧标签、错位标点或中英缩写,但不能故意写错别字,也不能破坏可读性。", + ]) + + @staticmethod + def _build_genre_overlay(*, context: str, outline: str) -> str: + """根据现有上下文和本章大纲生成类型化网文写法规则。""" + source = f"{context or ''}\n{outline or ''}" + genre_key = AutoNovelGenerationWorkflow._infer_genre_key(source) + if not genre_key: + return "" + + overlays = { + "suspense": ( + "悬疑/调查", + [ + "开头先给异常或后果,不先解释异常来源。", + "本章至少出现一个线索、一个误判、一个暂时被遮住的事实。", + "信息释放分层:角色看到的、角色误解的、读者可疑的、真相暂不说破的。", + "紧张感来自现场后果、证物变化和人物反应,不靠抽象氛围形容词。", + ], + ), + "urban": ( + "都市爽文", + [ + "开头必须有现实压力:钱、权、身份、资源、名声、家人、安全或机会被夺。", + "主角不能只被动挨打,至少做出一次判断、试探或反制。", + "爽点按“被压制、找破口、小范围兑现、更大对手出现”推进。", + "配角要代表资源、阻碍或利益立场,不要只做解释和捧场。", + ], + ), + "cultivation": ( + "玄幻/仙侠", + [ + "每章至少让修为、资源、敌我差距或规则限制中的一个发生变化。", + "设定只在行动中出现:功法、法器、血脉、禁地规则通过使用、失败或代价展示。", + "战斗重点写判断、破绽、代价、环境变化和旁观者反应,不堆招式说明。", + "阶段性兑现后留下更高层级压力,避免一章把问题收干净。", + ], + ), + "historical_romance": ( + "古言/宅斗", + [ + "冲突落在身份、礼法、婚约、家族利益、名声、继承或权力站队上。", + "对话要藏刀,人物不能把真实目的直接说完。", + "细节写规矩和位置:谁坐哪、谁先开口、谁不能接话、谁被迫低头。", + "爽点不是吵赢,而是让局势、名分、证据或人心发生偏移。", + ], + ), + "romance": ( + "情感/关系流", + [ + "每章推进关系温度:靠近、误会、试探、退让、暴露弱点或边界变化。", + "情绪不要解释成“心动/难过/复杂”,要落到动作、回避、停顿、没说完的话。", + "CP 拉扯要有外部事件承载,不要纯聊天。", + "人物魅力来自选择和克制,不来自作者替他夸。", + ], + ), + "comic_adaptation": ( + "漫画转小说", + [ + "优先保留漫画题材里的第一眼冲突:身份反差、画面奇观、关系张力、强设定物件。", + "把视觉冲击转成小说场景:动作、空间、道具、表情、停顿和围观反应。", + "不照搬漫画夸张对白,改成更有潜台词的互动。", + "第一章必须让读者看见核心卖点,而不是只读到设定说明。", + ], + ), + } + title, rules = overlays[genre_key] + return "\n".join(["【类型写法规则】", f"当前类型:{title}", *[f"- {rule}" for rule in rules]]) + + @staticmethod + def _infer_genre_key(text: str) -> str: + source = (text or "").lower() + checks: list[tuple[str, tuple[str, ...]]] = [ + ("comic_adaptation", ("漫画", "分镜", "快看", "腾讯动漫", "视觉冲突")), + ("historical_romance", ("古言", "宅斗", "宫斗", "侯府", "王爷", "嫡女", "庶女", "婚约", "礼法")), + ("cultivation", ("玄幻", "仙侠", "修仙", "宗门", "灵气", "功法", "境界", "法器", "血脉")), + ("suspense", ("悬疑", "推理", "案件", "凶案", "调查", "档案", "线索", "嫌疑", "证物", "异常事件")), + ("urban", ("都市", "逆袭", "职场", "商战", "系统", "神豪", "赘婿", "校花", "夺功", "上司")), + ("romance", ("现言", "甜宠", "豪门", "总裁", "先婚", "替身", "破镜重圆", "双男", "女频", "情感")), + ] + for key, keywords in checks: + if any(keyword in source for keyword in keywords): + return key + return "" + + @staticmethod + def _build_strategy_overlay(chapter_strategy: Optional[Dict[str, Any]]) -> str: + if not isinstance(chapter_strategy, dict): + return "" + contract = chapter_strategy.get("chapter_contract") or {} + dramatic = chapter_strategy.get("dramatic_task") or {} + scenes = chapter_strategy.get("scene_plan") or [] + focus_points = chapter_strategy.get("writing_focus") or [] + lines = ["【本章写作策略(已确认,必须执行)】"] + if isinstance(contract, dict) and contract: + lines.extend([ + "章节合同:", + f"- 本章问题:{str(contract.get('chapter_question') or '未说明').strip()}", + f"- 主角想要:{str(contract.get('protagonist_want') or '未说明').strip()}", + f"- 阻力来源:{str(contract.get('opposition') or '未说明').strip()}", + f"- 信息变化:{str(contract.get('required_information_change') or '未说明').strip()}", + f"- 关系变化:{str(contract.get('required_relationship_change') or '未说明').strip()}", + f"- 章末追问:{str(contract.get('ending_question') or '未说明').strip()}", + "展示优先:", + ]) + rules = contract.get("show_dont_tell_rules") if isinstance(contract.get("show_dont_tell_rules"), list) else [] + for rule in rules[:5]: + text = str(rule or "").strip() + if text: + lines.append(f"- {text}") + if dramatic: + lines.extend([ + "戏剧任务:", + f"- 角色想要:{str(dramatic.get('goal') or '未说明').strip()}", + f"- 主要阻碍:{str(dramatic.get('obstacle') or '未说明').strip()}", + f"- 读者期待:{str(dramatic.get('reader_expectation') or '未说明').strip()}", + f"- 章末钩子:{str(dramatic.get('ending_hook') or '未说明').strip()}", + ]) + if scenes: + lines.append("场景推进:") + for index, scene in enumerate(scenes[:4], start=1): + if not isinstance(scene, dict): + continue + title = str(scene.get("label") or scene.get("title") or f"场景 {index}").strip() + task = str(scene.get("task") or "未说明").strip() + resistance = str(scene.get("resistance") or "未说明").strip() + info_shift = str(scene.get("info_shift") or "未说明").strip() + relation_shift = str(scene.get("relationship_shift") or "未说明").strip() + visible_action = str(scene.get("visible_action") or scene.get("anchor") or "未说明").strip() + subtext_dialogue = str(scene.get("subtext_dialogue") or "未说明").strip() + unspoken_emotion = str(scene.get("unspoken_emotion") or "未说明").strip() + clue_change = str(scene.get("object_or_clue_change") or "未说明").strip() + hook = str(scene.get("hook") or "未说明").strip() + lines.append( + f"{index}. {title}|任务:{task}|阻力:{resistance}|变化:{info_shift}|关系:{relation_shift}|动作:{visible_action}|潜台词:{subtext_dialogue}|不直说:{unspoken_emotion}|线索/道具:{clue_change}|钩子:{hook}" + ) + if focus_points: + lines.append("执行提醒:") + for item in focus_points[:4]: + text = str(item or "").strip() + if text: + lines.append(f"- {text}") + lines.append("正文必须围绕这份策略推进,不能写成与策略无关的设定说明或平铺叙述。") + return "\n".join(lines) + + def _build_next_chapter_bridge_overlay( + self, + *, + novel_id: str, + chapter_number: int, + target_word_count: Optional[int], + chapter_strategy: Optional[Dict[str, Any]], + ) -> str: + manual_notes = self._extract_manual_next_chapter_notes(chapter_strategy) + auto_enabled = self._should_enable_next_chapter_bridge(target_word_count) + if not manual_notes and not auto_enabled: + return "" + + lines: list[str] = ["【下一章承接设定(长章前摄)】"] + if manual_notes: + lines.append("手动设定:") + for note in manual_notes[:4]: + lines.append(f"- {note}") + + if auto_enabled: + next_seed = self._resolve_next_chapter_seed(novel_id, chapter_number + 1) + if next_seed: + title = next_seed.get("title") or f"第{chapter_number + 1}章" + outline = next_seed.get("outline") or "" + lines.append(f"下一章预设:第{chapter_number + 1}章《{title}》") + if outline: + lines.append(f"- 核心设定:{outline}") + elif not manual_notes: + lines.append(f"下一章预设:第{chapter_number + 1}章尚未有明确大纲,请在本章末尾预留转场钩子。") + + lines.extend( + [ + "执行要求:", + "- 本章后 20% 要埋入 1-2 个可承接锚点(人物决定 / 道具状态 / 风险升级)。", + "- 只埋钩子,不提前完整剧透下一章核心反转。", + "- 锚点必须可见可写(动作、对白、道具、时间点),不能只写抽象预告。", + ] + ) + return "\n".join(lines) + + def _should_enable_next_chapter_bridge(self, target_word_count: Optional[int]) -> bool: + return self._effective_word_target(target_word_count) >= LONG_CHAPTER_NEXT_SETUP_MIN_WORDS + + @staticmethod + def _extract_manual_next_chapter_notes(chapter_strategy: Optional[Dict[str, Any]]) -> List[str]: + if not isinstance(chapter_strategy, dict): + return [] + notes: list[str] = [] + + def _append_text(value: Any) -> None: + if isinstance(value, str): + text = value.strip() + if text and text not in notes: + notes.append(text) + elif isinstance(value, list): + for item in value: + _append_text(item) + elif isinstance(value, dict): + compact = ";".join( + f"{k}: {str(v).strip()}" + for k, v in value.items() + if str(v).strip() + ) + if compact and compact not in notes: + notes.append(compact) + + for key in ("next_chapter_setup", "next_chapter_bridge", "next_chapter_hint", "next_setup", "next_chapter"): + if key in chapter_strategy: + _append_text(chapter_strategy.get(key)) + return notes + + def _resolve_next_chapter_seed(self, novel_id: str, next_chapter_number: int) -> Optional[Dict[str, str]]: + story_node_repo = getattr(self.context_builder, "story_node_repository", None) + if story_node_repo and hasattr(story_node_repo, "get_by_novel_sync"): + try: + nodes = story_node_repo.get_by_novel_sync(novel_id) or [] + for node in nodes: + node_type = getattr(node, "node_type", None) + node_type_value = getattr(node_type, "value", node_type) + if str(node_type_value or "").lower() != "chapter": + continue + number = getattr(node, "number", None) + if number is None or int(number) != int(next_chapter_number): + continue + title = str(getattr(node, "title", "") or "").strip() + outline_raw = ( + getattr(node, "outline", None) + or getattr(node, "description", None) + or getattr(node, "content", None) + or "" + ) + outline = self._compact_prompt_text(str(outline_raw), NEXT_CHAPTER_SETUP_MAX_CHARS) + if title or outline: + return {"title": title, "outline": outline} + break + except Exception as e: + logger.debug("next chapter seed from story nodes skipped: %s", e) + + chapter_repo = getattr(self.context_builder, "chapter_repository", None) + if chapter_repo and hasattr(chapter_repo, "list_by_novel"): + try: + chapters = chapter_repo.list_by_novel(NovelId(novel_id)) or [] + for chapter in chapters: + if int(getattr(chapter, "number", -1)) != int(next_chapter_number): + continue + title = str(getattr(chapter, "title", "") or "").strip() + outline_raw = getattr(chapter, "outline", None) or "" + if not outline_raw: + outline_raw = getattr(chapter, "content", None) or "" + outline = self._compact_prompt_text(str(outline_raw), NEXT_CHAPTER_SETUP_MAX_CHARS) + if title or outline: + return {"title": title, "outline": outline} + break + except Exception as e: + logger.debug("next chapter seed from chapters skipped: %s", e) + return None + + @staticmethod + def _compact_prompt_text(text: str, max_chars: int) -> str: + raw = " ".join(str(text or "").strip().split()) + if len(raw) <= max_chars: + return raw + return raw[: max_chars - 1].rstrip() + "…" + + @staticmethod + def _coerce_llm_content_to_text(raw: Any) -> str: + if raw is None: + return "" + if isinstance(raw, str): + return raw + if isinstance(raw, list): + text_parts: list[str] = [] + looks_like_content_parts = False + for item in raw: + if isinstance(item, str): + looks_like_content_parts = True + if item.strip(): + text_parts.append(item.strip()) + continue + if isinstance(item, dict): + item_type = str(item.get("type") or "").lower() + if item_type in {"reasoning", "thinking", "refusal"}: + looks_like_content_parts = True + continue + text_value = item.get("text") + content_value = item.get("content") + if item_type or isinstance(text_value, str) or isinstance(content_value, (str, list)): + looks_like_content_parts = True + if isinstance(text_value, str) and text_value.strip(): + text_parts.append(text_value.strip()) + elif isinstance(content_value, str) and content_value.strip(): + text_parts.append(content_value.strip()) + elif isinstance(content_value, list): + nested = AutoNovelGenerationWorkflow._coerce_llm_content_to_text(content_value) + if nested.strip(): + text_parts.append(nested.strip()) + continue + text_attr = getattr(item, "text", None) + if isinstance(text_attr, str): + looks_like_content_parts = True + if text_attr.strip(): + text_parts.append(text_attr.strip()) + if looks_like_content_parts: + return "\n".join(text_parts) + try: + import json + return json.dumps(raw, ensure_ascii=False) + except Exception: + return str(raw) + if isinstance(raw, dict): + try: + import json + return json.dumps(raw, ensure_ascii=False) + except Exception: + return str(raw) + return str(raw) + + def _parse_llm_json_payload(self, raw: Any) -> Tuple[Optional[Dict[str, Any]], List[str]]: + text = self._coerce_llm_content_to_text(raw) + try: + cleaned = strip_json_fences(text if isinstance(text, str) else str(text)) + outer = extract_outer_json_object(cleaned if isinstance(cleaned, str) else str(cleaned)) + repaired = repair_json(outer if isinstance(outer, str) else str(outer)) + parsed = json.loads(repaired) + except json.JSONDecodeError as e: + return None, [f"JSON 解析失败: {e}"] + except Exception as e: + return None, [ + "预处理失败: " + f"{e} (raw={type(raw).__name__}, text={type(locals().get('text')).__name__}, " + f"cleaned={type(locals().get('cleaned')).__name__}, outer={type(locals().get('outer')).__name__})" + ] + + if isinstance(parsed, dict): + return parsed, [] + if isinstance(parsed, list): + for item in parsed: + if isinstance(item, dict): + return item, ["根节点为列表,已自动取首个对象"] + return None, ["根节点为列表,但未包含对象"] + return None, [f"根节点类型不支持: {type(parsed).__name__}"] + + async def generate_chapter_strategy( + self, + novel_id: str, + chapter_number: int, + outline: str, + *, + scene_director: Optional[SceneDirectorAnalysis] = None, + style_profile_id: str = "", + scene_type: str = "", + target_word_count: Optional[int] = None, + word_tolerance_ratio: Optional[float] = None, + ) -> Dict[str, Any]: + bundle = self.prepare_chapter_generation( + novel_id, + chapter_number, + outline, + scene_director=scene_director, + max_tokens=12000, + ) + context = bundle["context"] + style_overlay = self._build_style_overlay(novel_id, style_profile_id, scene_type) + prompt = self._build_strategy_prompt( + context=context, + outline=outline, + storyline_context=bundle["storyline_context"], + plot_tension=bundle["plot_tension"], + style_summary=bundle["style_summary"], + style_overlay=style_overlay, + target_word_count=target_word_count, + word_tolerance_ratio=word_tolerance_ratio, + ) + data: Dict[str, Any] = {} + try: + result = await self.llm_service.generate( + prompt, + GenerationConfig(max_tokens=1200, temperature=0.35), + ) + parsed, errs = self._parse_llm_json_payload(result.content) + if parsed: + data = parsed + else: + logger.warning("chapter strategy JSON parse failed: %s", errs) + except Exception as e: + logger.warning("chapter strategy generation failed, fallback will be used: %s", e) + return self._normalize_strategy_payload( + data, + outline=outline, + target_word_count=target_word_count, + word_tolerance_ratio=word_tolerance_ratio, + ) + + async def review_generated_chapter_editorially( + self, + *, + novel_id: str, + chapter_number: int, + outline: str, + content: str, + chapter_strategy: Optional[Dict[str, Any]] = None, + ) -> Dict[str, Any]: + bundle = self.prepare_chapter_generation(novel_id, chapter_number, outline) + prompt = self._build_editorial_review_prompt( + context=bundle["context"], + outline=outline, + content=content, + chapter_strategy=chapter_strategy, + ) + data: Dict[str, Any] = {} + try: + result = await self.llm_service.generate( + prompt, + GenerationConfig(max_tokens=1800, temperature=0.35), + ) + parsed, errs = self._parse_llm_json_payload(result.content) + if parsed: + data = parsed + else: + logger.warning("editorial review JSON parse failed: %s", errs) + except Exception as e: + logger.warning("editorial review generation failed, fallback will be used: %s", e) + return self._normalize_editorial_review_payload(data) + + def _build_strategy_prompt( + self, + *, + context: str, + outline: str, + storyline_context: str = "", + plot_tension: str = "", + style_summary: str = "", + style_overlay: str = "", + target_word_count: Optional[int] = None, + word_tolerance_ratio: Optional[float] = None, + ) -> Prompt: + target = self._effective_word_target(target_word_count) + tolerance_ratio = self._resolve_word_tolerance_ratio(word_tolerance_ratio) + tolerance = max(80, int(target * tolerance_ratio)) + min_words = max(500, target - tolerance) + max_words = target + tolerance + system = """你是资深网文主编。你的任务不是直接写正文,而是先给作者一份本章写作策略。 + +请只输出 JSON,不要加解释,不要加 Markdown 代码块。 + +JSON 结构: +{ + "chapter_contract": { + "chapter_question": "本章读者最想知道的问题", + "protagonist_want": "主角最具体想拿到/确认/避免什么", + "opposition": "谁或什么阻碍他", + "reader_expectation": "读者期待看到的具体场面", + "required_information_change": "本章必须交付的信息变化", + "required_relationship_change": "本章必须发生的人物关系变化", + "ending_question": "章末留下的追问", + "show_dont_tell_rules": ["本章禁止直说的情绪/动机/解释,改用动作、停顿、物件、对白表现"] + }, + "dramatic_task": { + "goal": "角色这章最具体想拿到/确认/隐瞒什么", + "obstacle": "谁或什么阻碍他", + "reader_expectation": "读者这一章最期待看到什么兑现", + "ending_hook": "章末要留下什么追读钩子" + }, + "scene_plan": [ + { + "label": "场景标题", + "task": "这个场景的任务", + "resistance": "阻力", + "info_shift": "新信息或局势变化", + "relationship_shift": "人物关系变化,没有就写无明显变化", + "anchor": "一个具体物件/动作/地点锚点", + "visible_action": "必须出现的具体动作", + "subtext_dialogue": "对白表面内容和真实意图", + "unspoken_emotion": "不能直说的情绪", + "object_or_clue_change": "道具或线索状态变化", + "hook": "场景结尾钩子", + "target_words": 800 + } + ], + "writing_focus": ["3-4 条执行提醒"] +} + +硬性要求: +1. scene_plan 只能有 2-4 段。 +2. 每段都必须推动故事,不准只有解释。 +3. 写法优先级:开头钩子、冲突推进、人物选择、结尾追读。 +4. target_words 总和尽量接近目标字数。 +5. 所有字段必须填中文字符串,target_words 为整数。 +6. 展示优先:少解释,多展示;少总结,多动作和细节;少金句,多具体反应。 +7. 不要直接写“复杂情绪”,必须要求正文通过动作、停顿、回避、物件处理来表现。 +8. 对话不要每句都完整、礼貌、逻辑闭环;允许打断、反问、避重就轻。""" + user = ( + f"目标字数:约 {target} 字(容差 {min_words}-{max_words})\n\n" + f"【故事线】\n{storyline_context or '(无)'}\n\n" + f"【情节张力】\n{plot_tension or '(无)'}\n\n" + f"【风格约束】\n{style_summary or '(无)'}\n\n" + f"{style_overlay}\n\n" + f"【上下文】\n{context}\n\n" + f"【本章大纲】\n{outline}\n\n" + "请生成本章写作策略 JSON:" + ) + return Prompt(system=system, user=user) + + def _build_editorial_review_prompt( + self, + *, + context: str, + outline: str, + content: str, + chapter_strategy: Optional[Dict[str, Any]] = None, + ) -> Prompt: + strategy_overlay = self._build_strategy_overlay(chapter_strategy) + system = """你是网络小说主编,负责章后审稿。重点不是检查 AI 味,而是判断这章是否好看、是否能让人继续追读。 + +只输出 JSON,不要加解释,不要加 Markdown 代码块。 + +JSON 结构: +{ + "summary": "一句话总结这章的阅读效果", + "scores": { + "opening": 0-100, + "conflict": 0-100, + "character": 0-100, + "dialogue": 0-100, + "hook": 0-100, + "pacing": 0-100, + "showing": 0-100 + }, + "strengths": ["2-4 条亮点"], + "problems": ["2-4 条最关键问题"], + "actions": ["2-4 条可执行修改建议"], + "verdict": "保留 / 可优化后使用 / 建议重写" +} + +评分标准: +- opening:开头是否迅速进入具体情境 +- conflict:冲突和阻力是否真实推进 +- character:人物欲望、选择、代价是否成立 +- dialogue:对白是否有潜台词和信息变化 +- hook:章末是否形成追读点 +- pacing:场景切换、轻重缓急是否合适 +- showing:是否少解释、多展示;情绪是否通过动作/细节/潜台词表现;对白是否避免完整礼貌闭环 + +展示优先专项检查: +- 扣解释句过密、总结句替代场景、直接命名情绪。 +- 扣客服式完整对白和段尾金句。 +- 修改动作必须说明如何把解释改成动作或潜台词。""" + user = ( + f"【上下文】\n{context}\n\n" + f"【本章大纲】\n{outline}\n\n" + f"{strategy_overlay}\n\n" + f"【本章正文】\n{content}\n\n" + "请给出主编审稿 JSON:" + ) + return Prompt(system=system, user=user) + + @staticmethod + def _clean_text(value: Any, fallback: str) -> str: + text = str(value or "").strip() + return text or fallback + + @staticmethod + def _clean_text_list(value: Any, fallback: List[str], *, limit: int = 4) -> List[str]: + if isinstance(value, list): + cleaned = [str(item).strip() for item in value if str(item).strip()] + if cleaned: + return cleaned[:limit] + return fallback[:limit] + + @staticmethod + def _normalize_strategy_payload( + data: Dict[str, Any], + *, + outline: str, + target_word_count: Optional[int] = None, + word_tolerance_ratio: Optional[float] = None, + ) -> Dict[str, Any]: + dramatic = data.get("dramatic_task") if isinstance(data.get("dramatic_task"), dict) else {} + target = AutoNovelGenerationWorkflow._effective_word_target(target_word_count) + tolerance_ratio = AutoNovelGenerationWorkflow._resolve_word_tolerance_ratio(word_tolerance_ratio) + min_scene_words = max(400, int(target * max(0.18, 0.22 - tolerance_ratio * 0.2))) + default_scene_words = max(600, int(target / 3)) + raw_contract = data.get("chapter_contract") if isinstance(data.get("chapter_contract"), dict) else {} + chapter_contract = { + "chapter_question": AutoNovelGenerationWorkflow._clean_text( + raw_contract.get("chapter_question"), + "本章的关键问题必须在具体行动中被推进。", + ), + "protagonist_want": AutoNovelGenerationWorkflow._clean_text( + raw_contract.get("protagonist_want"), + dramatic.get("goal") or outline[:36] or "主角要确认一条关键线索。", + ), + "opposition": AutoNovelGenerationWorkflow._clean_text( + raw_contract.get("opposition"), + dramatic.get("obstacle") or "有人或流程阻碍主角。", + ), + "reader_expectation": AutoNovelGenerationWorkflow._clean_text( + raw_contract.get("reader_expectation"), + dramatic.get("reader_expectation") or "读者要看到冲突推进,而不是解释背景。", + ), + "required_information_change": AutoNovelGenerationWorkflow._clean_text( + raw_contract.get("required_information_change"), + "至少交付一条会改变判断的新信息。", + ), + "required_relationship_change": AutoNovelGenerationWorkflow._clean_text( + raw_contract.get("required_relationship_change"), + "至少让主要人物的立场或信任关系发生细微变化。", + ), + "ending_question": AutoNovelGenerationWorkflow._clean_text( + raw_contract.get("ending_question"), + dramatic.get("ending_hook") or "章末留下新的追问。", + ), + "show_dont_tell_rules": AutoNovelGenerationWorkflow._clean_text_list( + raw_contract.get("show_dont_tell_rules"), + [ + "不能直接命名复杂情绪,必须写动作、停顿、回避或身体反应。", + "不能用总结句跳过冲突过程,必须让读者看到试探和阻力。", + "对白不能每句都完整礼貌,允许打断、反问、答非所问。", + ], + limit=5, + ), + } + raw_scenes = data.get("scene_plan") if isinstance(data.get("scene_plan"), list) else [] + scenes: List[Dict[str, Any]] = [] + for index, item in enumerate(raw_scenes[:4], start=1): + if not isinstance(item, dict): + continue + try: + scene_target = int(item.get("target_words") or default_scene_words) + except (TypeError, ValueError): + scene_target = default_scene_words + scenes.append({ + "label": str(item.get("label") or f"场景 {index}").strip() or f"场景 {index}", + "task": str(item.get("task") or "推进当前矛盾").strip() or "推进当前矛盾", + "resistance": str(item.get("resistance") or "出现具体阻力").strip() or "出现具体阻力", + "info_shift": str(item.get("info_shift") or "读者对局势理解发生变化").strip() or "读者对局势理解发生变化", + "relationship_shift": str(item.get("relationship_shift") or "无明显变化").strip() or "无明显变化", + "anchor": str(item.get("anchor") or "一个具体物件或动作").strip() or "一个具体物件或动作", + "visible_action": AutoNovelGenerationWorkflow._clean_text( + item.get("visible_action"), + str(item.get("anchor") or "用一个具体动作承载情绪和信息。"), + ), + "subtext_dialogue": AutoNovelGenerationWorkflow._clean_text( + item.get("subtext_dialogue"), + "对白表面推进事实,底层保留试探、遮掩或误判。", + ), + "unspoken_emotion": AutoNovelGenerationWorkflow._clean_text( + item.get("unspoken_emotion"), + "不要直接命名情绪,用动作和反应表现。", + ), + "object_or_clue_change": AutoNovelGenerationWorkflow._clean_text( + item.get("object_or_clue_change"), + "本场景至少让一个线索、道具或判断发生变化。", + ), + "hook": str(item.get("hook") or "留下下一步动作或异常细节").strip() or "留下下一步动作或异常细节", + "target_words": max(min_scene_words, min(1800, scene_target)), + }) + if len(scenes) < 2: + fallback_scenes = [ + { + "label": "开场推进", + "task": "尽快把角色送进具体局面", + "resistance": "先给一个小阻力或误判", + "info_shift": "让读者知道本章核心问题", + "relationship_shift": "主要人物态度出现偏差", + "anchor": "现场动作或道具变化", + "visible_action": "角色用一个可见动作进入冲突,而不是先解释心情。", + "subtext_dialogue": "对白表面确认事实,底层互相试探。", + "unspoken_emotion": "紧张、怀疑或不安不能被直接命名。", + "object_or_clue_change": "一个现场线索从背景物变成问题核心。", + "hook": "把问题推向下一场景", + "target_words": default_scene_words, + }, + { + "label": "兑现与钩子", + "task": "兑现一部分预期,同时抬高代价", + "resistance": "阻力升级或旧问题反扑", + "info_shift": "补一条改变判断的新信息", + "relationship_shift": "人物关系留下一道裂口", + "anchor": "证据、表情、动作或记录", + "visible_action": "角色必须做出选择或处理证据,留下可见后果。", + "subtext_dialogue": "对白不把动机说透,保留遮掩和反问。", + "unspoken_emotion": "代价和犹豫用停顿、回避或动作表现。", + "object_or_clue_change": "证据、道具或判断在章末改变状态。", + "hook": "章末留追读点", + "target_words": default_scene_words, + }, + ] + scenes.extend(fallback_scenes[len(scenes):]) + focus = data.get("writing_focus") if isinstance(data.get("writing_focus"), list) else [] + normalized_focus = [str(item).strip() for item in focus if str(item).strip()] + if not normalized_focus: + normalized_focus = [ + "开头直接进入具体动作或异常细节,不先解释背景。", + "每个场景都要让人物做选择,不只接受信息。", + "对白里保留试探和遮掩,不把动机一次说完。", + ] + return { + "chapter_contract": chapter_contract, + "dramatic_task": { + "goal": str(dramatic.get("goal") or outline[:24] or "拿到或确认一条关键线索").strip() or "拿到或确认一条关键线索", + "obstacle": str(dramatic.get("obstacle") or "有人、时间或局势阻碍角色").strip() or "有人、时间或局势阻碍角色", + "reader_expectation": str(dramatic.get("reader_expectation") or "看到问题被推进,而不是原地解释").strip() or "看到问题被推进,而不是原地解释", + "ending_hook": str(dramatic.get("ending_hook") or "章末留下新的疑点、代价或关系裂口").strip() or "章末留下新的疑点、代价或关系裂口", + }, + "scene_plan": scenes, + "writing_focus": normalized_focus[:4], + } + + @staticmethod + def _normalize_editorial_review_payload(data: Dict[str, Any]) -> Dict[str, Any]: + scores = data.get("scores") if isinstance(data.get("scores"), dict) else {} + def score_of(key: str) -> int: + try: + value = int(round(float(scores.get(key, 0)))) + except (TypeError, ValueError): + value = 0 + return max(0, min(100, value)) + verdict = str(data.get("verdict") or "可优化后使用").strip() or "可优化后使用" + def text_list(key: str, fallback: List[str]) -> List[str]: + raw = data.get(key) + if isinstance(raw, list): + cleaned = [str(item).strip() for item in raw if str(item).strip()] + if cleaned: + return cleaned[:4] + return fallback + return { + "summary": str(data.get("summary") or "本章完成了推进,但仍有可继续压实的空间。").strip() or "本章完成了推进,但仍有可继续压实的空间。", + "scores": { + "opening": score_of("opening"), + "conflict": score_of("conflict"), + "character": score_of("character"), + "dialogue": score_of("dialogue"), + "hook": score_of("hook"), + "pacing": score_of("pacing"), + "showing": score_of("showing"), + }, + "strengths": text_list("strengths", ["至少有一处具体场景成立,能支撑继续加工。"]), + "problems": text_list("problems", ["仍需检查冲突升级和章末钩子是否足够明确。"]), + "actions": text_list("actions", ["优先补强最弱一场戏的阻力和信息变化。"]), + "verdict": verdict, + } + + def _build_prop_ledger_overlay(self) -> str: + if not self.prop_ledger_service or not self._current_novel_id: + return "" + try: + overview = self.prop_ledger_service.get_overview(self._current_novel_id) + except Exception as e: + logger.warning("prop ledger overlay unavailable: %s", e) + return "" + items = overview.get("items") or [] + if not items: + return "" + lines = [ + "【道具账本(必须保持一致)】", + "写到相关道具时,必须遵守当前持有人、位置、状态;未在本章合理交代前,不得凭空改变去向或用途。", + ] + for item in items[:12]: + chapter = item.get("last_seen_chapter") or item.get("first_seen_chapter") or "未登记" + lines.append( + "- " + f"{item.get('name') or '未命名'}" + f"|状态:{item.get('status') or '未记录'}" + f"|持有人:{item.get('current_holder') or '未记录'}" + f"|位置:{item.get('current_location') or '未记录'}" + f"|最近:第{chapter}章" + ) + return "\n".join(lines) + + def _build_coc_canon_overlay(self) -> str: + if not self.coc_canon_service or not self._current_novel_id: + self._current_coc_canon_overlay = "" + self._current_coc_absolute_titles = [] + return "" + try: + overlay = self.coc_canon_service.build_overlay(self._current_novel_id) + except Exception as e: + logger.warning("coc canon overlay unavailable: %s", e) + self._current_coc_canon_overlay = "" + self._current_coc_absolute_titles = [] + return "" + + prompt = "" + if isinstance(overlay, dict): + prompt = str(overlay.get("prompt") or "") + elif isinstance(overlay, str): + prompt = overlay + else: + prompt = str(getattr(overlay, "prompt", "") or "") + + self._current_coc_canon_overlay = prompt.strip() + self._current_coc_absolute_titles = self._extract_coc_absolute_titles(overlay, prompt) + return self._current_coc_canon_overlay + + def _build_coc_clue_overlay(self) -> str: + if not self.coc_clue_service or not self._current_novel_id: + self._current_coc_clue_overlay = "" + self._current_coc_author_only_clue_keys = [] + return "" + try: + overlay = self.coc_clue_service.build_overlay(self._current_novel_id) + except Exception as e: + logger.warning("coc clue overlay unavailable: %s", e) + self._current_coc_clue_overlay = "" + self._current_coc_author_only_clue_keys = [] + return "" + + prompt = "" + if isinstance(overlay, dict): + prompt = str(overlay.get("prompt") or "") + elif isinstance(overlay, str): + prompt = overlay + else: + prompt = str(getattr(overlay, "prompt", "") or "") + + self._current_coc_clue_overlay = prompt.strip() + self._current_coc_author_only_clue_keys = self._extract_coc_author_only_clue_keys(overlay, prompt) + return self._current_coc_clue_overlay + + def _build_coc_cognition_overlay(self) -> str: + if not self._current_novel_id: + self._current_coc_cognition_overlay = "" + self._current_coc_author_truth_snippets = [] + return "" + canon_layers: dict[str, Any] = {} + clue_layers: dict[str, Any] = {} + if self.coc_canon_service: + try: + layers = self.coc_canon_service.get_cognition_layers(self._current_novel_id) or {} + canon_layers = layers if isinstance(layers, dict) else {} + except Exception as e: + logger.warning("coc cognition (canon) unavailable: %s", e) + if self.coc_clue_service: + try: + layers = self.coc_clue_service.get_cognition_layers(self._current_novel_id) or {} + clue_layers = layers if isinstance(layers, dict) else {} + except Exception as e: + logger.warning("coc cognition (clue) unavailable: %s", e) + + def _merge_lines(*groups: Any) -> list[str]: + merged: list[str] = [] + for group in groups: + if not isinstance(group, (list, tuple)): + continue + for line in group: + text = str(line or "").strip() + if text and text not in merged: + merged.append(text) + return merged + + reader_known = _merge_lines( + canon_layers.get("reader_known") or [], + clue_layers.get("reader_known") or [], + ) + character_known = _merge_lines(clue_layers.get("character_known") or []) + author_truth = _merge_lines( + canon_layers.get("author_truth") or [], + clue_layers.get("author_truth") or [], + ) + self._current_coc_author_truth_snippets = self._extract_coc_author_truth_snippets(canon_layers, author_truth) + + if not (reader_known or character_known or author_truth): + self._current_coc_cognition_overlay = "" + return "" + + lines = [ + "【CoC认知边界(三层)】", + "1) 读者已知:可直接写进正文;", + "2) 角色已知:仅能通过角色视角与行动逐步呈现;", + "3) 作者真相:禁止直接明说,只能以伏笔/误导/侧写方式间接处理。", + ] + if reader_known: + lines.append("【读者已知】") + for line in reader_known[:10]: + lines.append(f"- {line}") + if character_known: + lines.append("【角色已知】") + for line in character_known[:10]: + lines.append(f"- {line}") + if author_truth: + lines.append("【作者真相(禁直出)】") + for line in author_truth[:10]: + lines.append(f"- {line}") + self._current_coc_cognition_overlay = "\n".join(lines) + return self._current_coc_cognition_overlay + + @staticmethod + def _extract_coc_absolute_titles(overlay: Any, prompt: str) -> list[str]: + titles: list[str] = [] + + def _append_title(value: Any) -> None: + text = str(value or "").strip() + if text and text not in titles: + titles.append(text) + + container = None + if isinstance(overlay, dict): + for key in ("entries", "items", "rules", "canon_items", "constraints"): + if isinstance(overlay.get(key), list): + container = overlay.get(key) + break + else: + for key in ("entries", "items", "rules", "canon_items", "constraints"): + value = getattr(overlay, key, None) + if isinstance(value, list): + container = value + break + + if isinstance(container, list): + for item in container: + if isinstance(item, dict): + title = item.get("title") or item.get("name") or item.get("label") or item.get("key") + marker = ( + item.get("marker") + or item.get("level") + or item.get("constraint_level") + or item.get("kind") + or item.get("scope") + or item.get("type") + ) + is_absolute = bool(item.get("absolute") is True) + else: + title = ( + getattr(item, "title", None) + or getattr(item, "name", None) + or getattr(item, "label", None) + or getattr(item, "key", None) + ) + marker = ( + getattr(item, "marker", None) + or getattr(item, "level", None) + or getattr(item, "constraint_level", None) + or getattr(item, "kind", None) + or getattr(item, "scope", None) + or getattr(item, "type", None) + ) + is_absolute = bool(getattr(item, "absolute", False) is True) + + marker_text = str(marker or "").strip().lower() + if not is_absolute and marker_text: + is_absolute = ("absolute" in marker_text) or ("绝对" in marker_text) + if is_absolute: + _append_title(title) + + for pattern in ( + r"(?:^|\n)\s*[-*]\s*(?:\[)?(?:absolute|绝对)(?:\])?\s*[::\-]\s*([^\n]+)", + r"(?:^|\n)\s*[-*]\s*([^\n((]+?)\s*[((]\s*(?:absolute|绝对)\s*[))]", + r"(?:^|\n)\s*[-*]\s*(?:\[[^\]]+\]\s*)?([^\n((]+?)\s*[((]\s*锁定\s*[::]\s*(?:absolute|绝对)\s*[))]", + ): + for match in re.findall(pattern, prompt or "", flags=re.IGNORECASE): + _append_title(match.split("|", 1)[0].split("|", 1)[0].strip()) + + return titles + + @staticmethod + def _extract_coc_author_only_clue_keys(overlay: Any, prompt: str) -> list[str]: + keys: list[str] = [] + + def _append_key(value: Any) -> None: + text = str(value or "").strip() + if text and text not in keys: + keys.append(text) + + container = None + if isinstance(overlay, dict): + for name in ("clues", "entries", "items", "rules", "constraints"): + if isinstance(overlay.get(name), list): + container = overlay.get(name) + break + else: + for name in ("clues", "entries", "items", "rules", "constraints"): + value = getattr(overlay, name, None) + if isinstance(value, list): + container = value + break + + if isinstance(container, list): + for item in container: + if isinstance(item, dict): + visibility = str(item.get("visibility") or "").strip().lower() + clue_key = item.get("clue_key") or item.get("key") or item.get("title") or item.get("name") + else: + visibility = str(getattr(item, "visibility", "") or "").strip().lower() + clue_key = ( + getattr(item, "clue_key", None) + or getattr(item, "key", None) + or getattr(item, "title", None) + or getattr(item, "name", None) + ) + if visibility == "author_only": + _append_key(clue_key) + + for pattern in ( + r"(?:^|\n)\s*[-*]\s*(?:\[[^\]]+\]\s*)?(?:clue_key|线索键)\s*[::=]\s*([^\s||\n]+)[^\n]*(?:visibility|可见性)\s*[::=]\s*author_only", + r"(?:^|\n)\s*[-*]\s*(?:\[[^\]]*author_only[^\]]*\]\s*)?([^\s||\n]+)[^\n]*(?:author_only)", + ): + for match in re.findall(pattern, prompt or "", flags=re.IGNORECASE): + _append_key(match.strip()) + + return keys + + def _detect_coc_canon_conflicts(self, content: str) -> list[str]: + text = str(content or "") + if not text or not self._current_coc_absolute_titles: + return [] + warnings: list[str] = [] + rewrite_markers = ("并非", "其实是", "原来是") + for title in self._current_coc_absolute_titles: + title_text = str(title or "").strip() + if not title_text or title_text not in text: + continue + pattern = ( + rf"(?:{re.escape(title_text)}[\s\S]{{0,24}}(?:并非|其实是|原来是))" + rf"|(?:(?:并非|其实是|原来是)[\s\S]{{0,24}}{re.escape(title_text)})" + ) + if re.search(pattern, text): + warnings.append( + f"CoC正典疑似冲突:绝对条目「{title_text}」附近出现改写词({ '/'.join(rewrite_markers) }),请复核是否越界。" + ) + return warnings + + def _detect_coc_clue_conflicts(self, content: str) -> list[str]: + text = str(content or "") + if not text or not self._current_coc_author_only_clue_keys: + return [] + warnings: list[str] = [] + for clue_key in self._current_coc_author_only_clue_keys: + key_text = str(clue_key or "").strip() + if key_text and key_text in text: + warnings.append( + f"CoC线索疑似越级:author_only 线索「{key_text}」出现在正文,请复核是否泄露。" + ) + return warnings + + @staticmethod + def _extract_coc_author_truth_snippets(canon_layers: dict[str, Any], author_truth_lines: list[str]) -> list[str]: + snippets: list[str] = [] + for raw in canon_layers.get("author_truth_snippets") or []: + text = str(raw or "").strip() + if len(text) >= 8 and text not in snippets: + snippets.append(text[:80]) + for line in author_truth_lines: + text = str(line or "") + if ":" in text: + text = text.split(":", 1)[1] + text = text.strip() + if len(text) >= 12 and text not in snippets: + snippets.append(text[:80]) + return snippets[:50] + + def _detect_coc_author_truth_leaks(self, content: str) -> list[str]: + text = str(content or "") + if not text or not self._current_coc_author_truth_snippets: + return [] + warnings: list[str] = [] + for snippet in self._current_coc_author_truth_snippets: + if snippet and snippet in text: + warnings.append( + f"CoC作者真相疑似直出:正文出现作者层片段「{snippet[:30]}...」,建议改为伏笔或错位信息。" + ) + return warnings + + @staticmethod + def _ensure_generation_start_suffix(user_message: str) -> str: + """给可视配置渲染出的 user prompt 补上统一的生成起笔标记。""" + text = (user_message or "").rstrip() + if text.endswith("开始撰写:") or text.endswith("开始撰写:"): + return text + return f"{text}\n\n开始撰写:" + + @staticmethod + def _render_visible_workflow_prompt(variables: Dict[str, Any]) -> Optional[Dict[str, str]]: + """读取提示词广场中的工作流章节生成配置;不可用时返回 None 走内置兜底。""" + try: + from infrastructure.ai.prompt_manager import get_prompt_manager + + manager = get_prompt_manager() + manager.ensure_seeded() + rendered = manager.render("workflow-chapter-generation", variables) + except Exception as e: + logger.warning("workflow prompt config unavailable, using built-in fallback: %s", e) + return None + + if not rendered: + return None + system = (rendered.get("system") or "").strip() + user = (rendered.get("user") or "").strip() + if not system or not user: + return None + return {"system": system, "user": user} + async def _extract_chapter_state(self, content: str, chapter_number: int) -> ChapterState: """从生成的内容中提取章节状态 diff --git a/application/world/services/knowledge_service.py b/application/world/services/knowledge_service.py index 5a850be34..e608a8d77 100644 --- a/application/world/services/knowledge_service.py +++ b/application/world/services/knowledge_service.py @@ -16,13 +16,14 @@ class KnowledgeService: 处理知识图谱的业务逻辑 """ - def __init__(self, knowledge_repository: KnowledgeRepository): + def __init__(self, knowledge_repository: KnowledgeRepository, primary_memory_service: Any = None): """初始化服务 Args: knowledge_repository: 知识仓储 """ self.knowledge_repository = knowledge_repository + self.primary_memory_service = primary_memory_service def get_knowledge(self, novel_id: str) -> StoryKnowledge: """获取知识图谱 @@ -36,6 +37,11 @@ def get_knowledge(self, novel_id: str) -> StoryKnowledge: Raises: EntityNotFoundError: 如果知识图谱不存在 """ + primary_knowledge = self._load_primary_memory(novel_id) + if primary_knowledge is not None: + self._cache_primary_memory(primary_knowledge) + return primary_knowledge + knowledge = self.knowledge_repository.get_by_novel_id(novel_id) if knowledge is None: # 返回空的知识图谱而不是抛出异常,保持向后兼容 @@ -43,6 +49,25 @@ def get_knowledge(self, novel_id: str) -> StoryKnowledge: return StoryKnowledge(novel_id=novel_id) return knowledge + def _load_primary_memory(self, novel_id: str) -> Optional[StoryKnowledge]: + if self.primary_memory_service is None: + return None + try: + knowledge = self.primary_memory_service.load_knowledge(novel_id) + if knowledge and (knowledge.chapters or knowledge.facts or knowledge.premise_lock): + return knowledge + except Exception as exc: + logger.warning("读取 Obsidian 主记忆失败 novel=%s: %s", novel_id, exc) + return None + + def _cache_primary_memory(self, knowledge: StoryKnowledge) -> None: + try: + save = getattr(self.knowledge_repository, "save", None) + if callable(save): + save(knowledge) + except Exception as exc: + logger.warning("同步 Obsidian 主记忆到 PP 缓存失败 novel=%s: %s", knowledge.novel_id, exc) + def update_knowledge(self, novel_id: str, data: Dict[str, Any]) -> StoryKnowledge: """更新知识图谱 diff --git a/application/world/services/obsidian_memory_service.py b/application/world/services/obsidian_memory_service.py new file mode 100644 index 000000000..3796e50d4 --- /dev/null +++ b/application/world/services/obsidian_memory_service.py @@ -0,0 +1,424 @@ +"""Obsidian long-term memory bridge for PlotPilot Knowledge. + +PlotPilot still writes through the existing chapter/Knowledge pipeline. This +service exports PP cache into Markdown and can read supported notes back as the +primary long-term memory source. +""" +from __future__ import annotations + +import os +import re +import shutil +import sys +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Optional + +from application.paths import DATA_DIR +from domain.knowledge.chapter_summary import ChapterSummary +from domain.knowledge.knowledge_triple import KnowledgeTriple +from domain.knowledge.story_knowledge import StoryKnowledge + + +OBSIDIAN_VAULT_ENV = "PLOTPILOT_OBSIDIAN_VAULT" + + +def resolve_obsidian_vault_path() -> Path: + raw = os.getenv(OBSIDIAN_VAULT_ENV, "").strip() + if raw: + return Path(raw).expanduser().resolve() + return DATA_DIR / "obsidian-vault" + + +def _safe_segment(value: str) -> str: + text = re.sub(r"[\\/:*?\"<>|#^\[\]]+", "-", str(value or "").strip()) + text = re.sub(r"\s+", "-", text).strip(".-") + return text or "untitled" + + +def _frontmatter(data: Dict[str, Any]) -> str: + lines = ["---"] + for key, value in data.items(): + if isinstance(value, list): + rendered = "[" + ", ".join(str(item) for item in value) + "]" + else: + rendered = str(value).replace("\n", " ") + lines.append(f"{key}: {rendered}") + lines.append("---") + return "\n".join(lines) + + +def _table_cell(value: Any) -> str: + return str(value or "").replace("|", "\\|").replace("\n", " ").strip() + + +def _parse_table_cell(value: str) -> str: + return str(value or "").replace("\\|", "|").strip() + + +def _parse_tags(value: str) -> List[str]: + return [item.strip() for item in str(value or "").split(",") if item.strip()] + + +def _parse_chapter_number(value: str) -> Optional[int]: + raw = str(value or "").strip() + match = re.search(r"\d+", raw) + if not match: + return None + try: + return int(match.group(0)) + except ValueError: + return None + + +def _extract_section(markdown: str, title: str) -> str: + pattern = re.compile( + rf"^##\s+{re.escape(title)}\s*$\n(?P.*?)(?=^##\s+|\Z)", + re.MULTILINE | re.DOTALL, + ) + match = pattern.search(markdown) + if not match: + return "" + return match.group("body").strip() + + +def _extract_frontmatter_value(markdown: str, key: str) -> str: + if not markdown.startswith("---"): + return "" + end = markdown.find("\n---", 3) + if end < 0: + return "" + frontmatter = markdown[3:end] + for line in frontmatter.splitlines(): + if line.strip().startswith(f"{key}:"): + return line.split(":", 1)[1].strip() + return "" + + +class ObsidianMemoryService: + """Exports PP cache to Obsidian and reads supported notes back as memory.""" + + def __init__(self, vault_root: Optional[Path], knowledge_service: Any): + self.vault_root = Path(vault_root or resolve_obsidian_vault_path()).expanduser() + self.knowledge_service = knowledge_service + + def sync_chapter(self, novel_id: str, chapter_number: int) -> Dict[str, Any]: + knowledge = self.knowledge_service.get_knowledge(novel_id) + chapter = None + for item in getattr(knowledge, "chapters", []) or []: + if int(getattr(item, "chapter_id", 0) or 0) == int(chapter_number): + chapter = item + break + + if chapter is None: + return { + "synced": False, + "reason": "chapter summary not found", + "vault_path": str(self.vault_root), + } + + novel_dir = self.vault_root / _safe_segment(novel_id) + (novel_dir / "02_Chapters").mkdir(parents=True, exist_ok=True) + (novel_dir / "03_Entities").mkdir(parents=True, exist_ok=True) + (novel_dir / "04_Timelines").mkdir(parents=True, exist_ok=True) + + self._write_index(novel_dir, novel_id, knowledge) + self._write_fact_locks(novel_dir, knowledge) + self._write_timeline(novel_dir, knowledge) + self._write_relationship_graph(novel_dir, knowledge) + chapter_path = self._write_chapter_note(novel_dir, novel_id, chapter) + + return { + "synced": True, + "vault_path": str(self.vault_root), + "chapter_note": str(chapter_path), + "fact_count": len(getattr(knowledge, "facts", []) or []), + } + + def load_knowledge(self, novel_id: str) -> Optional[StoryKnowledge]: + """Read Obsidian vault notes back as the long-term memory source.""" + novel_dir = self.vault_root / _safe_segment(novel_id) + if not novel_dir.exists(): + return None + + premise_lock, facts = self._read_fact_locks(novel_dir / "01_Fact_Locks.md") + chapters = self._read_chapter_notes(novel_dir / "02_Chapters") + if not premise_lock and not facts and not chapters: + return None + + return StoryKnowledge( + novel_id=novel_id, + premise_lock=premise_lock, + chapters=chapters, + facts=facts, + ) + + def has_memory(self, novel_id: str) -> bool: + return self.load_knowledge(novel_id) is not None + + def get_relationship_graph_path(self, novel_id: str) -> Path: + return self.vault_root / _safe_segment(novel_id) / "03_Entities" / "Character_Relationships.md" + + def is_vault_configured(self) -> bool: + return bool(os.getenv(OBSIDIAN_VAULT_ENV, "").strip()) + + def is_obsidian_installed(self) -> bool: + if shutil.which("obsidian"): + return True + if sys.platform == "darwin": + candidates = [ + Path("/Applications/Obsidian.app"), + Path.home() / "Applications" / "Obsidian.app", + ] + return any(path.exists() for path in candidates) + if sys.platform.startswith("win"): + candidates = [ + Path(os.getenv("LOCALAPPDATA", "")) / "Obsidian" / "Obsidian.exe", + Path(os.getenv("PROGRAMFILES", "")) / "Obsidian" / "Obsidian.exe", + ] + return any(path.exists() for path in candidates) + return any( + Path(path).exists() + for path in ( + "/usr/bin/obsidian", + "/usr/local/bin/obsidian", + "/snap/bin/obsidian", + "/var/lib/flatpak/exports/bin/md.obsidian.Obsidian", + ) + ) + + def _write_index(self, novel_dir: Path, novel_id: str, knowledge: Any) -> None: + chapters = sorted(getattr(knowledge, "chapters", []) or [], key=lambda item: item.chapter_id) + lines = [ + _frontmatter( + { + "type": "plotpilot-long-term-memory-index", + "novel_id": novel_id, + "updated_at": datetime.utcnow().isoformat(), + } + ), + "", + f"# PlotPilot 长期记忆:{novel_id}", + "", + "## 入口", + "- [[01_Fact_Locks|事实锁 / 长期设定]]", + "- [[04_Timelines/Timeline|时间线]]", + "", + "## 章节记忆", + ] + for chapter in chapters: + number = int(getattr(chapter, "chapter_id", 0) or 0) + lines.append(f"- [[02_Chapters/Chapter_{number:04d}|第 {number} 章]]") + self._write_text(novel_dir / "00_Index.md", "\n".join(lines) + "\n") + + def _write_fact_locks(self, novel_dir: Path, knowledge: Any) -> None: + facts = sorted(getattr(knowledge, "facts", []) or [], key=lambda item: item.id) + lines = [ + _frontmatter( + { + "type": "plotpilot-fact-locks", + "updated_at": datetime.utcnow().isoformat(), + "fact_count": len(facts), + } + ), + "", + "# 事实锁 / 长期设定", + "", + "## 全书基调", + str(getattr(knowledge, "premise_lock", "") or "(暂无)"), + "", + "## 知识三元组", + "| 主体 | 关系 | 客体 | 章节 | 备注 | 标签 |", + "| --- | --- | --- | --- | --- | --- |", + ] + for fact in facts: + lines.append( + "| {subject} | {predicate} | {object} | {chapter} | {note} | {tags} |".format( + subject=_table_cell(getattr(fact, "subject", "")), + predicate=_table_cell(getattr(fact, "predicate", "")), + object=_table_cell(getattr(fact, "object", "")), + chapter=_table_cell(getattr(fact, "chapter_id", "") or getattr(fact, "first_appearance", "")), + note=_table_cell(getattr(fact, "note", "") or getattr(fact, "description", "")), + tags=_table_cell(", ".join(getattr(fact, "tags", []) or [])), + ) + ) + self._write_text(novel_dir / "01_Fact_Locks.md", "\n".join(lines) + "\n") + + def _read_fact_locks(self, path: Path) -> tuple[str, List[KnowledgeTriple]]: + if not path.exists(): + return "", [] + text = path.read_text(encoding="utf-8") + premise_lock = _extract_section(text, "全书基调").strip() + facts: List[KnowledgeTriple] = [] + for line in text.splitlines(): + if not line.startswith("|"): + continue + if "---" in line or "主体" in line: + continue + cells = [_parse_table_cell(part) for part in line.strip().strip("|").split("|")] + if len(cells) < 3: + continue + subject, predicate, obj = cells[:3] + if not subject or not predicate or not obj: + continue + chapter_id = _parse_chapter_number(cells[3] if len(cells) > 3 else "") + note = cells[4] if len(cells) > 4 else "" + tags = _parse_tags(cells[5] if len(cells) > 5 else "") + facts.append( + KnowledgeTriple( + id=f"obsidian-{len(facts) + 1:04d}-{_safe_segment(subject)}-{_safe_segment(predicate)}-{_safe_segment(obj)}", + subject=subject, + predicate=predicate, + object=obj, + chapter_id=chapter_id, + note=note, + tags=tags, + source_type="obsidian_primary", + ) + ) + return premise_lock, facts + + def _write_timeline(self, novel_dir: Path, knowledge: Any) -> None: + chapters = sorted(getattr(knowledge, "chapters", []) or [], key=lambda item: item.chapter_id) + lines = [ + _frontmatter( + { + "type": "plotpilot-timeline-memory", + "updated_at": datetime.utcnow().isoformat(), + "chapter_count": len(chapters), + } + ), + "", + "# 时间线", + "", + "| 章节 | 关键事件 | 未解问题 |", + "| --- | --- | --- |", + ] + for chapter in chapters: + number = int(getattr(chapter, "chapter_id", 0) or 0) + lines.append( + "| 第 {number} 章 | {events} | {threads} |".format( + number=number, + events=_table_cell(getattr(chapter, "key_events", "") or getattr(chapter, "summary", "")), + threads=_table_cell(getattr(chapter, "open_threads", "")), + ) + ) + self._write_text(novel_dir / "04_Timelines" / "Timeline.md", "\n".join(lines) + "\n") + + def _write_relationship_graph(self, novel_dir: Path, knowledge: Any) -> None: + facts = getattr(knowledge, "facts", []) or [] + lines = [ + _frontmatter( + { + "type": "plotpilot-relationship-graph", + "updated_at": datetime.utcnow().isoformat(), + "fact_count": len(facts), + } + ), + "", + "# 角色 / 故事关系图", + "", + "```mermaid", + "graph LR", + ] + edge_count = 0 + for fact in facts: + subject = _safe_segment(getattr(fact, "subject", "")) + obj = _safe_segment(getattr(fact, "object", "")) + predicate = _table_cell(getattr(fact, "predicate", "关联")) + if not subject or not obj: + continue + lines.append( + f' {subject}["{_table_cell(getattr(fact, "subject", ""))}"] -->|"{predicate}"| {obj}["{_table_cell(getattr(fact, "object", ""))}"]' + ) + edge_count += 1 + if edge_count == 0: + lines.append(' Empty["暂无结构化关系"]') + lines.extend(["```", ""]) + self._write_text(novel_dir / "03_Entities" / "Character_Relationships.md", "\n".join(lines)) + + def _write_chapter_note(self, novel_dir: Path, novel_id: str, chapter: Any) -> Path: + chapter_number = int(getattr(chapter, "chapter_id", 0) or 0) + path = novel_dir / "02_Chapters" / f"Chapter_{chapter_number:04d}.md" + beats = getattr(chapter, "beat_sections", []) or [] + micro_beats = getattr(chapter, "micro_beats", []) or [] + lines = [ + _frontmatter( + { + "type": "plotpilot-chapter-memory", + "novel_id": novel_id, + "chapter": chapter_number, + "sync_status": getattr(chapter, "sync_status", "draft") or "draft", + "updated_at": datetime.utcnow().isoformat(), + "source": "PlotPilot Knowledge", + } + ), + "", + f"# 第 {chapter_number} 章长期记忆", + "", + "关联:[[../00_Index|长期记忆索引]] · [[01_Fact_Locks]]", + "", + "## 章末摘要", + str(getattr(chapter, "summary", "") or "(暂无)"), + "", + "## 关键事件", + str(getattr(chapter, "key_events", "") or "(暂无)"), + "", + "## 未解问题 / 伏笔", + str(getattr(chapter, "open_threads", "") or "无"), + "", + "## 连续性说明", + str(getattr(chapter, "consistency_note", "") or "(暂无)"), + "", + "## 节拍", + ] + if beats: + lines.extend(f"- {beat}" for beat in beats) + else: + lines.append("- (暂无)") + + if micro_beats: + lines.extend(["", "## 微观节拍"]) + for beat in micro_beats: + if isinstance(beat, dict): + lines.append(f"- {beat.get('description', '')}({beat.get('focus', '')})") + else: + lines.append(f"- {beat}") + + self._write_text(path, "\n".join(lines) + "\n") + return path + + def _read_chapter_notes(self, chapters_dir: Path) -> List[ChapterSummary]: + if not chapters_dir.exists(): + return [] + chapters: List[ChapterSummary] = [] + for path in sorted(chapters_dir.glob("Chapter_*.md")): + text = path.read_text(encoding="utf-8") + chapter_id = _parse_chapter_number(_extract_frontmatter_value(text, "chapter")) + if chapter_id is None: + chapter_id = _parse_chapter_number(path.stem) + if chapter_id is None: + continue + beat_lines = [] + beats_text = _extract_section(text, "节拍") + for line in beats_text.splitlines(): + stripped = line.strip() + if stripped.startswith("- "): + beat_lines.append(stripped[2:].strip()) + chapters.append( + ChapterSummary( + chapter_id=chapter_id, + summary=_extract_section(text, "章末摘要"), + key_events=_extract_section(text, "关键事件"), + open_threads=_extract_section(text, "未解问题 / 伏笔"), + consistency_note=_extract_section(text, "连续性说明"), + beat_sections=beat_lines, + sync_status=_extract_frontmatter_value(text, "sync_status") or "synced", + ) + ) + return chapters + + @staticmethod + def _write_text(path: Path, content: str) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(content, encoding="utf-8") diff --git a/docs/2026-05-02-office-hours-brainstorm-ai-readability-wordcount.md b/docs/2026-05-02-office-hours-brainstorm-ai-readability-wordcount.md new file mode 100644 index 000000000..76f2842bb --- /dev/null +++ b/docs/2026-05-02-office-hours-brainstorm-ai-readability-wordcount.md @@ -0,0 +1,114 @@ +# Design: 小说生成质量与字数控制(Office Hours Brainstorm) + +Generated by /office-hours on 2026-05-02 +Branch: local/novel-pro +Repo: PlotPilot-NovelPro +Status: DRAFT +Mode: Builder + +## Problem Statement +当前项目有两个高频痛点: +1. 章节有明显 AI 感,可读性不稳定,尤其在中后段会出现“解释腔、总结腔、过整齐句法”。 +2. 每章字数不可控,目标 2500 字时常出现 4000-9000 字或硬截断突兀。 + +## Current State(代码现状) +- 生成主链路在 `application/workflows/auto_novel_generation_workflow.py`。 +- 已有后处理与去 AI 味链路:`_naturalize_ai_flavor_if_needed`、`_apply_human_texture_pass_if_needed`。 +- 已有字数控制链路:`_enforce_chapter_word_target`、`_target_word_range`、流式 `stream_unit_hard_limit`。 +- 已新增“长章节下一章前摄”约束,默认长章触发。 + +## Premises +1. “AI 感重”主要不是单点 prompt 问题,而是“生成流程缺少强结构闭环 + 风格评估闭环”的系统问题。 +2. “字数失控”主要不是单点裁剪阈值问题,而是“预算分配粒度过粗(整章级而非场景级)”。 +3. 只继续堆叠禁词和润色规则,会产生边际递减,甚至与叙事张力冲突。 + +## Brainstorm Rounds(收敛前 8 轮) + +### Round 1:继续强化大 Prompt 禁令 +- 想法:继续追加“禁用词/禁句式/反模板规则”。 +- 结果:可短期改善,但很快被模型补偿性绕过,且文本变僵。 +- 结论:**不能作为主方案,只能是护栏。** + +### Round 2:单模型单次生成 + 章后重写 +- 想法:先写满,再用编辑 pass 统一修。 +- 结果:可读性提升有限,且重写后字数再次漂移。 +- 结论:**后处理应保留,但必须从“主引擎”降级为“微调层”。** + +### Round 3:场景化预算(Scene Budgeting) +- 想法:先生成 3-5 个场景卡,每个场景独立目标字数与容差。 +- 结果:更容易稳定章长,且节奏更可控。 +- 结论:**这是字数可控的第一关键杠杆。** + +### Round 4:写作模型与分析模型强分工 +- 想法:写作用 Kimi/OpenAI 强写作模型,分析/裁判用 DS。 +- 结果:角色、节奏、结构判断更稳定,避免“同模型自我偏见”。 +- 结论:**模型分工必须默认化。** + +### Round 5:把“可读性”做成可度量指标而非主观判断 +- 想法:引入章节质量评分卡(开篇抓力、冲突推进、对白信息密度、动作承压、尾钩强度)。 +- 结果:可以自动筛出“看起来像合格字数但不好看”的稿子。 +- 结论:**没有评分闭环,就没有稳定质量。** + +### Round 6:双阶段输出(Draft + Lock) +- 想法:先输出 Draft,不立即定稿;通过质量阈值后再锁定为 Chapter。 +- 结果:降低“坏稿直接入库”概率,支持自动重试。 +- 结论:**需要稿件状态机,而不是一次性提交。** + +### Round 7:字数控制从“硬截断”升级到“预算仲裁器” +- 想法:每个场景生成后立即结算剩余预算,动态收缩后续场景目标。 +- 结果:可避免最后一段硬切突兀,章节收尾自然很多。 +- 结论:**预算仲裁器是第二关键杠杆。** + +### Round 8:把“下一章前摄”纳入结构目标 +- 想法:章尾 15%-20% 强制埋 1-2 个“可执行钩子”(不是抽象预告)。 +- 结果:跨章可读性上升,连载追读更稳定。 +- 结论:**已接近最终可执行方案。** + +## Approaches Considered + +### Approach A(最小可行): Prompt 强化 + 后处理加严 +- Effort: S +- Risk: Med +- Pros: 改动小,上线快。 +- Cons: 治标不治本,长期漂移仍大。 + +### Approach B(推荐): 双闭环引擎(结构闭环 + 质量闭环) +- Effort: M +- Risk: Low-Med +- Pros: 同时解决 AI 感与字数问题,可持续优化。 +- Cons: 需要新增状态机与评估阈值策略。 + +### Approach C(理想架构): 多代理协作写作总线 +- Effort: L +- Risk: Med-High +- Pros: 最强可扩展性,支持题材插件化。 +- Cons: 复杂度高,短期 ROI 不如 B。 + +## Recommended Approach +选择 **Approach B(双闭环引擎)**。 + +核心机制: +1. **结构闭环(先结构后正文)** + - 章节戏剧任务 -> 场景推进表(3-5 场)-> 场景预算分配。 +2. **生成闭环(每场景独立)** + - 每场景单独生成 + 即时预算结算 + 剩余预算动态回灌。 +3. **质量闭环(先评估后锁稿)** + - 主编评分 >= 阈值才锁稿,否则自动进入“精修候选稿”。 +4. **跨章闭环(已接入)** + - 长章自动注入下一章前摄约束,章尾强制留下可执行钩子。 + +## Success Criteria +- 字数命中率: + - `|实际字数 - 目标字数| / 目标字数 <= 8%` 的章节占比 >= 90%。 +- 可读性: + - 主编评分(开头/冲突/人物/对白/追读/节奏)均值 >= 88。 + - “可优化后使用”占比 >= 80%,直接“不建议使用” <= 5%。 +- AI 味检测(仅作参考): + - 疑似 AI 占比中位数较当前基线下降 >= 20%。 + +## Next Steps +1. 新增 `ChapterDraftState`(draft/reviewed/locked)状态机。 +2. 新增 `SceneBudgetArbiter`:场景级预算分配与滚动结算。 +3. 新增 `QualityGate`:低于阈值自动触发精修候选稿,而非直接入库。 +4. 在工作台新增“章长控制诊断”面板,展示:目标、已写、剩余、场景偏差。 + diff --git a/docs/2026-05-02-office-hours-brainstorm-v2-10rounds.md b/docs/2026-05-02-office-hours-brainstorm-v2-10rounds.md new file mode 100644 index 000000000..2c846b32b --- /dev/null +++ b/docs/2026-05-02-office-hours-brainstorm-v2-10rounds.md @@ -0,0 +1,116 @@ +# Design: 小说生成质量与字数控制(二次脑暴,10轮) + +Generated on 2026-05-02 +Branch: local/novel-pro +Scope: 仅方案,不改代码 + +## 问题重述 +1. AI感重,可读性波动大。 +2. 章字数不可控,常超目标或硬截断突兀。 + +当前系统已具备:节拍生成、去AI味后处理、字数钳制、主编审稿、下一章前摄。 +这次目标是找**新路径**,避免只在现有 prompt 上打补丁。 + +--- + +## 10轮脑暴记录(强制跑满) + +### Round 1:把“写作”拆成两种任务 +- 想法:把“可读性”与“信息推进”分离成两次生成。 +- 结论:会稳定一点,但成本翻倍,且可能风格割裂。 + +### Round 2:先生成“章内证据链”,再生成正文 +- 想法:先产出可核验的“事件-证据-后果”链。 +- 结论:对悬疑/现实题材有效,玄幻/情感题材收益有限。 + +### Round 3:建立“句法温度”控制器 +- 想法:为每章设句法温度(长短句比例、对话密度、比喻密度)。 +- 结论:这是新杠杆,可显著减轻“模型统一腔调”。 + +### Round 4:用“失败草稿”反向训练约束 +- 想法:收集你判定为难看的章,抽取共同失败特征,反向注入禁用规则。 +- 结论:比泛化禁词更有效,且能持续迭代。 + +### Round 5:字数控制改为“预算账户制” +- 想法:每章开一个预算账户,场景每写一段就扣额度。 +- 结论:比最后截断更自然,且能实时预警超支。 + +### Round 6:增加“尾段收束专用模型调用” +- 想法:当预算剩余不足15%时,不再继续常规生成,改走“收束模板”。 +- 结论:能解决突兀结尾,是对现有硬截断的升级。 + +### Round 7:做“章节类型路由” +- 想法:不同章节类型走不同写作协议:调查章/推进章/情感章/战斗章。 +- 结论:可读性提升明显,尤其能减少“每章同一种口感”。 + +### Round 8:引入“读者模拟器”而非只看AI检测器 +- 想法:让评审系统输出“下一段是否想读”的概率信号。 +- 结论:更贴近真实可读性,比“AI味分”更有业务价值。 + +### Round 9:跨章记忆改为“硬事实+软情绪”双轨 +- 想法:硬事实强锁(道具、时间、关系),软情绪弱锁(语气、状态)。 +- 结论:能防止一致性崩坏,又不把文风写死。 + +### Round 10:改成“多候选并行,小样本盲评选优” +- 想法:同大纲并行生成2-3个短候选(每个1200-1800字),先选口感最好的一条再扩写。 +- 结论:这是本轮最有新意的路径,能显著压低AI感,但计算成本增加。 + +--- + +## 新方案池(和上一版不同) + +### 方案A:预算账户制 + 尾段收束专用调用(低风险) +**核心**:字数问题优先,先把“超字数/硬截断”根除。 +- 章预算账户(目标字数 + 容差) +- 实时扣费(段落级) +- 低余额触发“尾段收束器” + +优点:落地快,风险低。 +缺点:只解决字数,对AI感改善有限。 + +### 方案B:章节类型路由 + 句法温度控制(平衡型) +**核心**:可读性优先,同时保住字数。 +- 章节先分类(调查/推进/情感/战斗) +- 每类有专属句法温度配置 +- 生成后按温度做轻量校正 + +优点:可读性改善明显,风格更像“人写”。 +缺点:需要维护类型模板,策略复杂度中等。 + +### 方案C:多候选并行盲评 + 选优扩写(高上限) +**核心**:先选“最好读”的草稿,再扩写到目标字数。 +- 并行2-3个候选短稿 +- 用主编评分+读者模拟器盲评 +- 选优稿扩写并收束 + +优点:AI感下降最明显,质量上限高。 +缺点:成本最高,时延增加。 + +--- + +## 推荐结论 +推荐 **B + A 组合** 分两阶段推进: + +1. 先上 A(1周内可见收益): + 把字数不可控先打掉,减少生成事故。 +2. 再上 B(2-3周): + 用章节类型路由 + 句法温度降低AI腔。 +3. C 作为“高质量模式”开关: + 只在关键章节或付费模式启用。 + +--- + +## 验收指标(新增) +- 字数命中率:目标±8% 内 >= 92% +- 收束自然度:尾段“突兀”人工标记比例 <= 10% +- 文风分布:长短句比例落在类型目标区间 >= 85% +- 追读信号:读者模拟器“愿读下一段”评分均值提升 >= 15% + +--- + +## 下一步(不写代码) +1. 先定义4类章节路由协议(调查/推进/情感/战斗)。 +2. 给每类配置句法温度参数(对话密度、长句率、比喻上限)。 +3. 设计预算账户字段与收束触发阈值。 +4. 定义“高质量模式”触发条件(是否启用候选并行盲评)。 + diff --git a/docs/2026-05-02-office-hours-brainstorm-v3-10rounds.md b/docs/2026-05-02-office-hours-brainstorm-v3-10rounds.md new file mode 100644 index 000000000..99ed1eb78 --- /dev/null +++ b/docs/2026-05-02-office-hours-brainstorm-v3-10rounds.md @@ -0,0 +1,125 @@ +# Design: 小说生成质量与字数控制(三次脑暴,10轮) + +Generated by /office-hours on 2026-05-02 + +## 本轮目标 + +在已有 v1/v2 方案基础上,强制再跑 10 轮,要求出现“新杠杆”,不是重复“去 AI 味后处理”。 + +当前两大痛点: +1. 成章可读性不稳定,常被判“疑似 AI/AI 特征”。 +2. 章节字数经常偏离目标,且结尾会出现硬收束突兀。 + +--- + +## 10 轮脑暴(本轮新增思路) + +### Round 1:把“写作任务”改成“读者体验任务” +- 想法:每章先产出 6 行“读者体验卡”(本章读者要感到什么、记住什么、想追什么)。 +- 为什么新:从“写给模型”改为“写给读者结果”,减少套路输出。 +- 结论:作为所有生成前置必填卡,强约束高价值。 + +### Round 2:把“整章生成”改成“场景包生成” +- 想法:每章固定 4~6 个场景包,每包独立目标字数、冲突类型、信息增量、结尾钩子。 +- 为什么新:字数控制从“后裁剪”前移到“包级预算”。 +- 结论:字数可控的基础设施,必须做。 + +### Round 3:引入“章内波形”约束 +- 想法:每章定义节奏波形(紧-缓-紧-爆点 / 稳推-反转-追问)。 +- 为什么新:不是让模型“写得生动”,而是给结构节奏模板,直接抑制“平均用力”。 +- 结论:对可读性提升显著,成本低。 + +### Round 4:加入“对白占比区间”硬门控 +- 想法:按类型设对白占比区间(如都市悬疑 28%-42%,群像冒险 35%-50%)。 +- 为什么新:很多“AI 味”来自叙述密度过高、角色互动不足。 +- 结论:作为质量门槛之一,低风险高收益。 + +### Round 5:建立“禁忌句式黑名单” +- 想法:维护可配置黑名单(过度总结句、万能抒情句、模板转场句)。 +- 为什么新:不靠玄学“自然化”,而是定点剔除机械句型。 +- 结论:应放在候选稿筛选阶段,命中阈值直接淘汰。 + +### Round 6:风格锚点检索写作(RAG) +- 想法:从你认可的人写章节中抽取 6~10 条“风格锚句/动作锚句”,每章检索注入。 +- 为什么新:不只是“提示词更像人”,而是“先检索后写作”。 +- 结论:这是通用化最关键的一步,比人工一章章调更可持续。 + +### Round 7:候选并行 + 盲评打分 +- 想法:同一场景包生成 2~3 个候选,盲评后仅保留最优,拼章。 +- 为什么新:把“一次成稿”改为“机器内选优”。 +- 结论:质量上限高,但成本较高,建议仅用于关键章或首卷。 + +### Round 8:长写短分的两段式流程 +- 想法:先写“连续长稿(例如 7000-9000)”,再按场景包切成 2~3 章并做章尾钩子重写。 +- 为什么新:你提出的“先写长再分章”正式流程化。 +- 结论:对上下文连续性很好,但必须配套“分章器 + 章尾钩子器”。 + +### Round 9:结尾收束专用小模型/专用调用 +- 想法:正文结束前最后 250~400 字交给“收束器”单独生成,目标是自然刹车。 +- 为什么新:把“突兀结尾”拆成独立问题,不再靠全文硬截断。 +- 结论:字数与观感双收益,建议优先实现。 + +### Round 10:质量评分改成“追读导向” +- 想法:主编审稿评分权重调整为:开篇钩子、冲突推进、角色动机清晰、对白辨识度、章尾追问。 +- 为什么新:从“文风正确”转向“读者会不会继续点下一章”。 +- 结论:这比 AI 检测分更贴近业务目标,应作为主指标。 + +--- + +## 本轮新增方案(和 v1/v2 不同) + +### 方案 D:风格锚点 RAG + 禁忌句式门控(通用化方案) +- 目标:不再逐章手工调,建立可迁移“低 AI 味”机制。 +- 关键组件: + - 风格锚点库(按题材/视角/叙事人称) + - 检索注入器(每章注入 6~10 条) + - 禁忌句式过滤器(阈值淘汰) +- 适用:长期连载,追求稳定。 + +### 方案 E:场景包预算 + 收束器(字数稳定方案) +- 目标:把目标 2500 字控制在 ±8% 内,且结尾自然。 +- 关键组件: + - 场景包预算器(每包预算 + 容差) + - 余量仲裁器(最后一包动态调节) + - 结尾收束器(最后 250~400 字专用) +- 适用:全量章节默认开启。 + +### 方案 F:长稿母本 + 智能分章器(连续性方案) +- 目标:提升剧情连续感,减少章节间割裂。 +- 关键组件: + - 长稿生成器(一次产出完整段落链) + - 分章器(按冲突峰值/信息转折切章) + - 章尾钩子重写器 +- 适用:关键卷、主线推进章。 + +--- + +## 推荐落地顺序(两周可执行版) + +1. **先上 E(字数稳定)** + - 因为当前最影响体验的是 2500 目标经常跑到 5000+。 +2. **再上 D(通用去 AI 味)** + - 把“一次次人工调 prompt”变成系统能力。 +3. **最后试点 F(长写短分)** + - 先在 1 本测试书、1 卷里开灰度。 + +--- + +## 验收指标(本轮版) + +- 字数命中率:目标±8% 内 >= 92% +- 突兀结尾占比:<= 8% +- 主编审稿“追读”均分:>= 88 +- 人工抽检“角色对白可区分”通过率:>= 85% +- 单章人工返工时长中位数:下降 30%+ + +--- + +## 结论 + +这次 10 轮的新增价值是三点: +1. 从“后处理修文”转到“前置结构 + 包级预算 + 结尾专用收束”。 +2. 从“逐章调 prompt”转到“风格锚点 RAG + 禁忌句式门控”的通用机制。 +3. 把你提出的“先写长再分章”变成可落地流程,而不是临时手工操作。 + +如果按一个主方案来选,本轮推荐:**E + D 先落地,F 小范围灰度**。 diff --git a/docs/NOVELPRO_README.md b/docs/NOVELPRO_README.md new file mode 100644 index 000000000..6328e4f90 --- /dev/null +++ b/docs/NOVELPRO_README.md @@ -0,0 +1,541 @@ +# NovelPro 作者工作台增强说明 + +NovelPro 是一组面向长篇小说创作的工作台增强能力,目标是让 PlotPilot 不只负责“生成章节”,还负责把选题、候选稿、连续性、关系变化、战力约束、文风稳定和长期记忆放进同一条可验证链路里。 + +这份说明用于代码审核、部署评估和后续维护。所有功能都以现有 FastAPI + Vue + SQLite 架构为基础实现,不要求新增外部服务;需要真实模型或登录态数据的部分均可降级为空配置、手动输入或公开页面采集。 + +## 设计目标 + +- 降低长篇创作中的断线风险:角色掉线、关系沉默、时间线冲突、大纲偏离、战力跳升和文风漂移。 +- 把“AI 生成正文”前移到“选题判断”和“候选稿决策”,减少一次生成失败后直接污染正文。 +- 保留作者控制权:候选稿、精修稿、外部模型结果、AI 表单建议都以预览和采用动作进入主线。 +- 支持可回读的长期记忆:Obsidian Markdown Vault 可作为主记忆源,SQLite 知识库作为运行缓存。 +- 对市场信号保持可配置边界:公开榜单、API、登录态数据统一走来源配置和凭据表,接口不回传明文凭据。 + +## 功能总览 + +### 1. 选题立项池 + +选题模块提供从市场观察到新书创建的完整闭环: + +- 手动输入选题补充说明。 +- 手动导入市场观察文本。 +- 公开来源采集标题级市场信号。 +- 自动后台采集与来源健康状态。 +- 市场信号去重、趋势摘要、平台权重统计。 +- 漫画信号转译为小说题材机会。 +- 选题生成、深化、评估、对比推荐。 +- 采纳选题为新书,并把立项报告写入新书 premise。 + +#### 选题工作流 + +推荐使用顺序: + +1. 打开首页的选题立项池。 +2. 在“市场观察”中手动导入观察文本,或在“公开来源采集”中选择榜单来源采集样本。 +3. 查看“市场摘要”,确认来源、类型、标签、漫画机会和近窗趋势。 +4. 在生成表单里填写补充说明、类型、关键词、规避模式,并勾选要引用的市场信号。 +5. 生成 3-5 个选题候选。 +6. 对候选执行“深化”和“评估”,生成结构化立项报告。 +7. 多选候选执行“对比推荐”。 +8. 采纳候选为新书,系统会把选题阶段沉淀的信息写入新书 premise,供后续世界观、人物和章节生成继承。 + +#### 来源配置文件 + +市场信号来源集中定义在: + +```text +application/topic/services/topic_signal_sources.py +``` + +当前默认来源包括: + +| source key | 名称 | 类型 | 默认采集方式 | 榜单维度 | +|---|---|---|---|---| +| `qidian_rank` | 起点-小说榜 | 小说 | `public_page` | 热门榜 / 新书榜 / 快速上榜 | +| `jjwxc_rank` | 晋江-小说榜 | 小说 | `public_page` | 热门榜 / 新书榜 / 快速上榜 | +| `qimao_rank` | 七猫-小说榜 | 小说 | `public_page` | 热门榜 / 新书榜 / 快速上榜 | +| `fanqie_rank` | 番茄-小说榜 | 小说 | `public_page` | 热门榜 / 新书榜 / 快速上榜 | +| `qq_read` | 腾讯-QQ阅读 | 小说 | `api` | 热门榜 / 新书榜 / 快速上榜 | +| `tencent_comic_rank` | 腾讯动漫-漫画榜 | 漫画 | `public_page` | 热门榜 / 新书榜 / 快速上榜 | +| `kuaikan_comic` | 快看漫画-漫画 | 漫画 | `public_page` | 热门榜 / 新书榜 / 快速上榜 | + +新增来源时,维护者只需要在 `MARKET_SIGNAL_SOURCES` 中增加一个 `TopicMarketSignalSourceDTO`: + +```python +"example_rank": TopicMarketSignalSourceDTO( + key="example_rank", + name="示例平台-小说榜", + url="https://example.com/rank", + category="novel", + source_type="public_page", + requires_auth=False, + rank_urls={ + "热门榜": "https://example.com/rank/hot", + "新书榜": "https://example.com/rank/new", + "快速上榜": "https://example.com/rank/rise", + }, +) +``` + +字段说明: + +- `key`:来源唯一标识,会用于接口、数据库凭据、健康状态和平台权重。 +- `name`:前端展示名称。 +- `url`:默认采集入口。 +- `category`:`novel` 或 `comic`;漫画来源会参与“漫画转题机会”分析。 +- `source_type`:`public_page`、`api` 或 `authenticated_source`。 +- `requires_auth`:是否要求配置凭据后才允许采集。 +- `rank_urls`:按榜单维度配置多个采集入口;为空时使用 `url`。 + +平台默认权重定义在同一文件的 `DEFAULT_MARKET_SIGNAL_SOURCE_WEIGHTS`。权重会影响市场摘要、选题评估和候选对比推荐。 + +#### 采集器配置 + +采集器入口在: + +```text +application/topic/services/topic_signal_collectors.py +``` + +内置三类 collector: + +- `public_page`:抓取 HTML 页面,使用平台适配解析器或通用标题解析。 +- `api`:请求 JSON API,优先解析 `books/items/list/records` 等常见榜单结构。 +- `authenticated_source`:需要登录态或自定义 headers 的来源入口。 + +如果新增平台页面结构比较稳定,建议增加平台专用解析函数,并在 `PublicPageMarketSignalCollector` 中按 `source.key` 分发。这样可以提取更准确的标题、类型、标签、简介、排名和热度。 + +#### 凭据和 Endpoint 配置 + +前端配置入口: + +```text +TopicIdeaPanel.vue -> 外部 API / 登录态 +``` + +后端接口: + +```http +GET /api/v1/topics/signals/source-credentials +PATCH /api/v1/topics/signals/sources/{source_key}/credentials +``` + +可配置字段: + +```json +{ + "api_key": "optional-api-key", + "cookie": "optional-cookie", + "endpoint_url": "https://example.com/custom/rank-api", + "headers": { + "User-Agent": "Mozilla/5.0", + "X-Custom-Header": "value" + } +} +``` + +保存位置: + +```text +topic_market_signal_credentials +``` + +安全边界: + +- API 只返回 `api_key_configured`、`cookie_configured`、`endpoint_configured` 和 `header_keys`。 +- 明文 API Key / Cookie 只在服务端内部读取,不通过查询接口返回。 +- 如果配置了 `endpoint_url`,采集时会优先使用该 Endpoint 覆盖来源默认 URL。 +- 如果配置了 `api_key` 且没有显式 `Authorization` header,采集器会自动补 `Authorization: Bearer `。 +- 如果配置了 `cookie` 且没有显式 `Cookie` header,采集器会自动补 `Cookie`。 + +#### 手动采集配置 + +前端配置入口: + +```text +TopicIdeaPanel.vue -> 公开来源采集 -> 立即采集 +``` + +后端接口: + +```http +POST /api/v1/topics/signals/collect +GET /api/v1/topics/signals/automation +PATCH /api/v1/topics/signals/automation +GET /api/v1/topics/signals/source-health +``` + +手动采集请求: + +```json +{ + "source_keys": ["qidian_rank", "fanqie_rank", "qq_read"], + "limit_per_source": 8 +} +``` + +保留的采集设置字段: + +```json +{ + "enabled": false, + "interval_minutes": 180, + "limit_per_source": 8, + "lookback_days": 30, + "selected_source_keys": ["qidian_rank", "fanqie_rank", "qq_read"], + "source_weights": { + "qidian_rank": 1.0, + "jjwxc_rank": 1.1, + "fanqie_rank": 1.05 + } +} +``` + +保存位置: + +```text +topic_market_signal_settings +topic_market_signal_source_health +``` + +当前本地应用默认不在 API 进程启动时创建市场信号定时采集线程,只在用户点击“立即采集”或调用 `/api/v1/topics/signals/collect` 后执行。独立脚本仍保留给需要一次性命令行采集或未来自行接入进程管理器的场景: + +```bash +python scripts/start_topic_signal_collector.py --once --force +python scripts/start_topic_signal_collector.py --poll-interval 60 +``` + +相关环境变量: + +| 变量 | 默认值 | 说明 | +|---|---:|---| +| `TOPIC_SIGNAL_POLL_INTERVAL_SECONDS` | `60` | 独立守护进程检查配置是否到期的轮询间隔,单位秒 | +| `LOG_LEVEL` | `INFO` | 独立守护进程日志级别 | +| `LOG_FILE` | `logs/aitext.log` | 独立守护进程日志文件 | +| `DISABLE_SSL_VERIFY` | `false` | 调试网络证书问题时可临时关闭 SSL 校验,不建议生产使用 | + +当前默认不建议配置 systemd、cron 或其他定时任务;需要采集时优先使用前端“立即采集”按钮。 + +#### 数据库表 + +选题模块新增或使用以下 SQLite 表: + +| 表 | 用途 | +|---|---| +| `topic_ideas` | 选题候选、立项报告、评估结果、采纳状态 | +| `topic_market_signals` | 手动导入和采集得到的市场信号 | +| `topic_market_signal_settings` | 采集偏好、单源条数、趋势窗口、平台权重 | +| `topic_market_signal_credentials` | 来源 API Key、Cookie、Endpoint 和自定义 headers | +| `topic_market_signal_source_health` | 每个来源最近采集状态、条数和错误 | + +这些表和必要列由 `infrastructure/persistence/database/connection.py` 在启动时幂等创建或补齐,旧库无需手动迁移。 + +#### API 清单 + +主要接口: + +```http +POST /api/v1/topics/generate +GET /api/v1/topics/ +GET /api/v1/topics/{topic_id} +POST /api/v1/topics/{topic_id}/deepen +POST /api/v1/topics/{topic_id}/evaluate +POST /api/v1/topics/compare +POST /api/v1/topics/{topic_id}/adopt + +POST /api/v1/topics/signals/import +POST /api/v1/topics/signals/collect +GET /api/v1/topics/signals/summary +GET /api/v1/topics/signals/sources +POST /api/v1/topics/signals/sources/test +GET /api/v1/topics/signals/source-health + +GET /api/v1/topics/signals/automation +PATCH /api/v1/topics/signals/automation +GET /api/v1/topics/signals/source-credentials +PATCH /api/v1/topics/signals/sources/{source_key}/credentials +``` + +#### 验证选题功能 + +推荐后端测试: + +```bash +pytest \ + tests/unit/domain/topic/test_topic_idea.py \ + tests/unit/infrastructure/database/test_sqlite_topic_idea_repository.py \ + tests/unit/application/services/test_topic_idea_service.py \ + tests/unit/application/services/test_topic_signal_automation_service.py \ + tests/unit/application/services/test_topic_signal_collectors.py \ + tests/unit/interfaces/api/test_topic_ideas.py \ + tests/unit/scripts/test_start_topic_signal_collector.py \ + -q +``` + +推荐手工验收: + +1. `GET /api/v1/topics/signals/sources` 返回 7 个默认来源。 +2. `POST /api/v1/topics/signals/sources/test` 能返回每源样例标题或明确错误。 +3. `POST /api/v1/topics/signals/import` 可导入手动观察。 +4. `POST /api/v1/topics/signals/collect` 可采集至少一个来源并入库。 +5. `GET /api/v1/topics/signals/summary` 返回来源、类型、标签、日统计和漫画转题机会。 +6. `POST /api/v1/topics/generate` 能引用市场信号生成候选。 +7. `deepen / evaluate / compare / adopt` 全链路可用,采纳后创建新书。 + +相关后端模块: + +- `domain/topic/` +- `application/topic/` +- `interfaces/api/v1/topic/` +- `infrastructure/persistence/database/sqlite_topic_idea_repository.py` + +相关前端模块: + +- `frontend/src/components/topic/TopicIdeaPanel.vue` +- `frontend/src/api/topic.ts` + +### 2. 候选稿与精修闭环 + +候选稿模块让章节生成进入“候选 -> 对比 -> 审稿 -> 采用/拒绝”的流程,而不是直接覆盖正文: + +- 章节候选稿保存。 +- 分支列表与差异对比。 +- 监督审稿与拒绝理由。 +- 候选稿状态流转。 +- 外部模型生成结果台账。 +- 精细改稿任务入口。 + +相关后端模块: + +- `application/core/services/chapter_candidate_draft_service.py` +- `infrastructure/persistence/database/sqlite_chapter_candidate_draft_repository.py` +- `interfaces/api/v1/core/chapter_candidate_drafts.py` + +相关前端模块: + +- `frontend/src/components/workbench/CandidateRefinePanel.vue` +- `frontend/src/components/workbench/CandidateDraftBranchSwitcher.vue` +- `frontend/src/stores/candidateDraftBranchStore.ts` + +### 3. 连续性巡检 + +连续性巡检把章节正文、知识库、关系事件和大纲节点合并成结构化监控: + +- 角色掉线提醒。 +- 关系线沉默提醒。 +- 关系变化事件记录。 +- 时间线事件与冲突检查。 +- 大纲覆盖状态。 +- 文风漂移信号接入。 + +相关后端模块: + +- `application/analyst/services/continuity_overview_service.py` +- `interfaces/api/v1/analyst/continuity.py` + +相关前端模块: + +- `frontend/src/components/workbench/ContinuityPanel.vue` +- `frontend/src/api/continuity.ts` + +### 4. 战力系统 + +战力系统用于约束玄幻、异能、竞技等类型中的能力跃迁: + +- 战力规则维护。 +- 角色战力档案。 +- 战力变化事件。 +- 异常跃迁提醒。 +- 总览面板。 + +相关后端模块: + +- `application/analyst/services/power_system_service.py` +- `infrastructure/persistence/database/sqlite_power_system_repository.py` +- `interfaces/api/v1/analyst/power_system.py` + +相关前端模块: + +- `frontend/src/components/workbench/PowerSystemPanel.vue` +- `frontend/src/api/powerSystem.ts` + +### 5. Obsidian 长期记忆 + +Obsidian 集成把 PlotPilot 的章后知识沉淀导出为 Markdown Vault,并允许系统优先从 Vault 回读长期记忆: + +- 事实锁。 +- 分章摘要。 +- 角色/故事关系图。 +- 时间线。 +- Vault 路径配置。 +- 当前章节手动同步。 +- NovelPro 监控中心读取主记忆状态。 + +该能力不要求安装 Obsidian 桌面应用。只要配置了 Vault 路径,就可以作为普通 Markdown 目录使用。 + +相关后端模块: + +- `application/world/services/obsidian_memory_service.py` +- `interfaces/api/v1/analyst/novelpro_monitor.py` + +相关文档: + +- `docs/novelpro-obsidian-long-term-memory.md` + +### 6. AI 味抑制与提示词广场 + +章节生成提示词增加了低 AI 味约束,并提供可编辑的提示词节点: + +- `workflow-chapter-generation` +- `anti-ai-style-rules` +- `review-ai-flavor-audit` +- `rewrite-ai-flavor-naturalizer` + +默认策略强调具体动作、对白潜台词、冲突慢写、信息增量和禁止空泛总结。运行时仍允许用户在提示词广场中编辑并保留版本历史。 + +相关模块: + +- `application/audit/services/cliche_scanner.py` +- `application/workflows/auto_novel_generation_workflow.py` +- `infrastructure/ai/prompts/prompts_defaults.json` +- `frontend/src/components/workbench/promptPlaza/PromptDetailPanel.vue` + +### 7. NovelPro 监控中心 + +监控中心把多个系统的状态收束到一个右侧面板: + +- Obsidian 主记忆状态。 +- 知识关系图统计。 +- 连续性巡检摘要。 +- 战力风险摘要。 +- 红黄灯健康分。 +- 自动提醒与操作建议。 + +时间线提醒做了分级处理:轻度可疑冲突会显示为 warning,只有多个冲突或缺少当前章节时间锚点时才升级为 error。 + +相关模块: + +- `application/analyst/services/novelpro_monitor_service.py` +- `frontend/src/components/workbench/NovelProMonitorPanel.vue` + +## API 概览 + +新增或扩展的主要 API: + +- `GET/POST /api/v1/topics/...` +- `GET /api/v1/topics/signals/summary` +- `GET/PATCH /api/v1/topics/signals/credentials/{source_key}` +- `GET/PATCH /api/v1/topics/signals/automation` +- `GET /api/v1/topics/signals/source-health` +- `POST /api/v1/topics/signals/sources/test` +- `GET/POST /api/v1/novels/{novel_id}/candidate-drafts` +- `GET /api/v1/novels/{novel_id}/continuity/overview` +- `GET /api/v1/novels/{novel_id}/power-system/overview` +- `GET /api/v1/novels/{novel_id}/novelpro/monitor` +- `POST /api/v1/novels/{novel_id}/novelpro/obsidian/sync` +- `POST /api/v1/novels/{novel_id}/novelpro/suggestions/form` + +## 数据库变更 + +新增表和字段均通过启动时幂等迁移创建,面向已有 SQLite 数据库保持兼容。 + +主要新增数据区域: + +- 选题候选与市场信号。 +- 市场信号手动采集设置。 +- 市场信号来源健康状态。 +- 来源凭据状态。 +- 候选稿与候选稿分支。 +- 战力规则、角色档案和事件。 +- LLM 控制台配置补充字段。 +- 文风指纹持久化修复。 + +凭据表会保存 API Key / Cookie 以供本地采集使用,但对外查询接口只返回是否已配置,不返回明文。 + +## 降级与安全边界 + +- 未配置真实 LLM 时,系统沿用现有 MockProvider 或空配置提示,不阻断页面打开。 +- 未配置来源凭据时,市场采集仍可使用公开页面来源或手动输入。 +- 外部 API 和登录态来源使用统一 collector 边界,后续可以替换为官方 API。 +- Obsidian 未安装时仍可写 Markdown Vault。 +- `generate-chapter-stream` 语义仍是流式生成正文和章后知识回写,不直接创建章节;前端需要把生成结果保存为章节。 +- 本 PR 不包含任何用户 Cookie、API Key、数据库文件、宝塔部署地址或个人路径。 + +## 本地启动 + +后端: + +```bash +python -m venv .venv +source .venv/bin/activate +pip install -r requirements.txt +uvicorn interfaces.main:app --host 127.0.0.1 --port 8005 --reload +``` + +前端: + +```bash +cd frontend +npm install +npm run dev +``` + +可选环境变量: + +```bash +PLOTPILOT_OBSIDIAN_VAULT=/path/to/obsidian-vault +DISABLE_TOPIC_SIGNAL_AUTOMATION=1 +ANTHROPIC_API_KEY=... +ARK_API_KEY=... +OPENAI_API_KEY=... +``` + +## 验证建议 + +后端聚焦测试: + +```bash +pytest \ + tests/unit/application/services/test_topic_idea_service.py \ + tests/unit/application/services/test_topic_signal_collectors.py \ + tests/unit/application/services/test_chapter_candidate_draft_service.py \ + tests/unit/application/services/test_continuity_overview_service.py \ + tests/unit/application/services/test_power_system_service.py \ + tests/unit/application/services/test_novelpro_monitor_service.py \ + tests/integration/interfaces/api/v1/test_chapter_candidate_drafts_api.py \ + tests/integration/interfaces/api/v1/test_continuity_api.py \ + tests/integration/interfaces/api/v1/test_power_system_api.py \ + -q +``` + +前端构建: + +```bash +cd frontend +npm ci +npm run build +``` + +当前 fork 分支已通过 GitHub Actions: + +- Backend CI +- Frontend CI + +## 审核重点 + +这组变更规模较大,建议按模块拆分审核: + +1. 先审数据库迁移和仓储兼容性。 +2. 再审选题市场信号链路和凭据脱敏边界。 +3. 再审候选稿闭环是否符合原项目产品方向。 +4. 再审连续性、战力、Obsidian、监控中心是否需要全部进入主线,或拆成可选增强模块。 +5. 最后审前端入口和工作台信息密度。 + +如果维护者倾向小 PR,建议从这个分支中拆出以下独立 PR: + +- 选题立项池与市场信号。 +- 候选稿与精修闭环。 +- 连续性巡检与 NovelPro 监控中心。 +- Obsidian 长期记忆。 +- AI 味抑制提示词与俗套扫描增强。 diff --git a/docs/novelpro-candidate-draft-optimization.md b/docs/novelpro-candidate-draft-optimization.md new file mode 100644 index 000000000..02f4ba118 --- /dev/null +++ b/docs/novelpro-candidate-draft-optimization.md @@ -0,0 +1,27 @@ +# NovelPro 候选稿与外部模型优化 + +## 目标 + +让 Kimi 等外部模型参与写作时,仍然保留 PlotPilot 的候选稿、快照、采纳和章后记忆更新闭环。 + +## 本次能力 + +- 段落级 diff:候选稿预览会按段落显示新增、删除、改写和相似度。 +- 部分采纳:可勾选候选段落,生成新的“部分采纳候选稿”,再手动采纳为主稿。 +- 外部模型台账:复制提示词、导入回稿、采纳外部稿时,会在浏览器本地记录任务状态。 +- 记忆影响预览:采纳前会提示可能影响正文事实、角色关系、大纲节点、出场记录或外部模型回稿风险。 +- Autopilot 拆包:工作台的托管写作面板已改为异步 chunk,继续降低主包体积。 + +## 使用建议 + +1. 从连续性提醒、精细改稿或外部模型入口创建候选任务。 +2. 复制提示词给 Kimi,台账会记录该提示。 +3. 将 Kimi 回稿导入外部模型稿,系统会记录回稿与候选稿 ID。 +4. 在候选稿预览里看段落级 diff。 +5. 如只想采纳部分段落,先保存为“部分采纳候选稿”,再采纳这版。 +6. 采纳后仍走主稿、快照和章后记忆更新链路。 + +## 边界 + +- 外部模型台账目前保存在浏览器本地,适合个人写作流;如果需要跨设备同步,后续再升级为后端表。 +- 部分采纳不会直接改主稿,而是生成新的候选稿,避免绕开既有回滚和记忆更新机制。 diff --git a/docs/novelpro-continuity-structured-tracking.md b/docs/novelpro-continuity-structured-tracking.md new file mode 100644 index 000000000..5fe4520ba --- /dev/null +++ b/docs/novelpro-continuity-structured-tracking.md @@ -0,0 +1,25 @@ +# NovelPro 连续性结构化追踪 + +## 目标 + +把原本偏启发式的关系变化、大纲偏离巡检,升级为可人工确认、可持续追踪的结构化记录,减少长篇后期误判和漏判。 + +## 能力范围 + +- 关系事件:记录某一章中两名角色的关系推进、破裂、修复、隐瞒、结盟等事件。 +- 大纲节点状态:记录章节大纲节点的完成、变更、缺失、阻塞或待确认状态。 +- 连续性总览会优先展示结构化记录;没有记录时仍回退到现有启发式巡检。 + +## 使用流程 + +1. 在工作台右栏打开 `连续性巡检`。 +2. 若本章出现关键关系变化,在 `记录关系事件` 填写角色、关系、变化类型、描述和证据句。 +3. 若本章大纲有改动,在 `更新大纲节点状态` 标记对应节点为已完成、已变更、缺失或阻塞。 +4. 保存后刷新面板,关系和大纲区域会显示 `结构化记录` 标签。 +5. 如果结构化记录提示风险,再通过候选改稿、精细改稿或外部稿导入链路处理。 + +## 设计边界 + +- 本功能只提供追踪和巡检,不直接改写主稿。 +- 不替换 Bible 静态关系;关系事件是随章节推进的动态变化记录。 +- 不强制阻断写作;是否修稿仍由作者通过候选稿链路决定。 diff --git a/docs/novelpro-model-role-workflow.md b/docs/novelpro-model-role-workflow.md new file mode 100644 index 000000000..5aabae816 --- /dev/null +++ b/docs/novelpro-model-role-workflow.md @@ -0,0 +1,25 @@ +# NovelPro 模型分工工作流 + +## 目标 + +让外部写作模型可配置,不限定 Kimi;同时保留一个审稿/记忆模型负责约束、检查和采纳前判断。 + +## 推荐分工 + +- 写作模型:负责生成正文、改写段落、扩写场景。 +- 审稿/记忆模型:负责连续性、战力、事实、伏笔、候选稿 diff 和采纳前记忆影响检查。 +- PlotPilot:仍是唯一事实源;只有候选稿被采纳后,才写入主稿、快照和章后记忆。 + +## 使用方式 + +1. 在工作台右栏打开 `模型分工`。 +2. 选择默认写作模型和审稿/记忆模型。 +3. 如果需要 Gemini、豆包、通义等模型,可添加自定义模型。 +4. 复制外部模型提示词时,提示词会自动写入当前模型分工。 +5. 导入回稿时,候选稿 metadata 和外部模型台账会记录使用的写作模型。 + +## 边界 + +- 当前是 copy/paste 工作流,不直接调用外部模型 API。 +- 外部模型台账仍保存在浏览器本地。 +- 后续如果接 API,应继续保持“写作模型只产正文,审稿/记忆模型只做约束和检查”的边界。 diff --git a/docs/novelpro-obsidian-long-term-memory.md b/docs/novelpro-obsidian-long-term-memory.md new file mode 100644 index 000000000..e473c8e63 --- /dev/null +++ b/docs/novelpro-obsidian-long-term-memory.md @@ -0,0 +1,92 @@ +# NovelPro Obsidian 长期记忆 + +## 定位 + +Obsidian 是 NovelPro 的长期主记忆层。PlotPilot 写作、生成和采纳仍然走原有章节与 Knowledge 管线;章后管线会把 PP 缓存自动导出到 Obsidian,后续读取 Knowledge 时会优先回读 Obsidian,再同步回 PP SQLite 缓存。 + +这样只有一条写作链路:作者仍然在 PP 内写作,AI 仍使用 PP 当前激活配置;Obsidian 负责长期记忆、人工复盘、双链整理和关系图展示。 + +## 自动同步时机 + +- 章节正文保存后,`ChapterAftermathPipeline` 会先执行现有叙事同步、向量、三元组、伏笔、文风和 KG 推断。 +- 候选稿采纳仍复用章节保存后的同一管线,因此也会自动同步 Obsidian。 +- 后台 `EXTRACT_BUNDLE` 任务完成叙事同步后,也会尝试同步 Obsidian。 +- Obsidian 同步失败只记录 warning,不阻断正文保存和候选稿采纳。 +- Obsidian 导出会读取 PP SQLite 缓存,不会在写后导出时反向读取旧 Markdown,避免刚保存的章节记忆被旧 Obsidian 内容遮挡。 + +## Vault 路径 + +默认导出到: + +```text +data/obsidian-vault// +``` + +如需接入已有 Obsidian vault,可设置环境变量: + +```bash +export PLOTPILOT_OBSIDIAN_VAULT="/你的/Obsidian/Vault/路径" +``` + +也可以写入项目根目录 `.env`: + +```bash +PLOTPILOT_OBSIDIAN_VAULT=/你的/Obsidian/Vault/路径 +``` + +修改后需要重启后端。若不配置,PP 会使用默认目录 `data/obsidian-vault`。 + +## 目录结构 + +每本书会生成独立目录: + +```text +/ + 00_Index.md + 01_Fact_Locks.md + 02_Chapters/ + Chapter_0001.md + Chapter_0002.md + 03_Entities/ + Character_Relationships.md + 04_Timelines/ + Timeline.md +``` + +## 数据来源 + +- `00_Index.md`:从当前 Knowledge 章节摘要生成入口索引。 +- `01_Fact_Locks.md`:从 `premise_lock` 和知识三元组生成长期事实锁。 +- `02_Chapters/Chapter_XXXX.md`:从分章摘要、关键事件、未解问题、连续性说明和节拍生成章节记忆。 +- `03_Entities/Character_Relationships.md`:从知识三元组生成 Mermaid 关系图,展示角色关系、故事关系和势力关联。 +- `04_Timelines/Timeline.md`:从分章关键事件和未解问题生成章节时间线。 + +## 使用原则 + +- 在 PP 里写作、生成、采纳和维护章节正文,避免复制粘贴到外部模型形成双线。 +- 在 Obsidian 里阅读、复盘、补充长期记忆和人工链接;`01_Fact_Locks.md` 与章节笔记可作为回读来源。 +- 若人工编辑 Obsidian,建议保持现有 Markdown 表格和章节模板结构,否则 PP 回读可能无法识别。 + +## 自动监控 + +右侧 `NovelPro 测试区 -> 监控中心` 会聚合: + +- Obsidian 主记忆是否可回读。 +- 当前 Vault 路径、是否使用自定义路径,以及本机是否检测到 Obsidian 应用。 +- 长期事实、章节摘要和关系图数量。 +- 连续性巡检里的角色掉线、关系沉默、文风漂移、时间线冲突和大纲偏离。 +- 战力系统里的跳级过快、无代价越级和高战力缺限制等提醒。 + +监控中心也提供“同步当前章”按钮。它不会直接生成正文,只会把当前章节已经完成的章后记忆导出到 Obsidian;如果当前章还没有保存或章后管线还没形成摘要,会提示暂时无法同步。 + +## AI 填表建议 + +NovelPro 的手动表单已接入 `AI 生成建议`: + +- 口吻锁定:生成角色心理状态、口头禅、小动作和 OOC 禁忌。 +- 对话沙盒:生成角色语气锚点和试写场景。 +- 连续性巡检:生成关系事件和大纲节点状态。 +- 战力系统:生成战力规则、角色战力档案、战斗/升级事件。 +- 精细改稿:生成改稿目标、重点片段和作者要求。 + +这些建议只会填入表单,不会自动保存或自动采纳;作者仍保留最后确认权。 diff --git a/docs/novelpro-power-system.md b/docs/novelpro-power-system.md new file mode 100644 index 000000000..835923ee5 --- /dev/null +++ b/docs/novelpro-power-system.md @@ -0,0 +1,28 @@ +# NovelPro 战力系统规范 + +## 目标 + +用于系统文、游戏文、玄幻升级流等强规则题材,固定等级、能力、代价和战斗结果,避免后期出现战力崩坏。 + +## 核心原则 + +- 战力来自明确资源:等级、属性、技能熟练度、装备、血脉、职业或系统任务奖励。 +- 升级必须有代价或条件:经验、冷却、材料、任务、风险、消耗、负面状态至少占一项。 +- 越级胜利必须解释机制:克制、环境、情报差、一次性底牌、队友配合或对方限制。 +- 数值只服务剧情,不得随章临时发明新规则;新系统必须先登记规则再生效。 +- Boss 与副本要有门槛、奖励和失败代价,避免无损刷级导致战力通胀。 + +## 使用流程 + +1. 在工作台右栏打开 `战力系统`。 +2. 先保存作品的境界/等级表、核心规则、禁忌规则和升级节奏。 +3. 给主要角色建立战力档案,尤其要写清能力和限制。 +4. 每次大战、升级、副本、获得技能或受伤后,记录一条战力事件。 +5. 面板会提示高战力无弱点、单章跳升过快、疑似无代价越级等风险。 +6. 写系统文或游戏文前,可复制“战力约束提示词”给外部模型或本地生成链路使用。 + +## 采纳规则 + +- 战力系统只记录规则和告警,不直接改写主稿。 +- 如果战力风险需要修稿,应通过候选稿/精修/外部稿导入链路处理。 +- 候选稿采纳后仍走主稿、快照和章后记忆更新流程。 diff --git a/docs/superpowers/plans/2026-04-30-style-bible-implementation-plan.md b/docs/superpowers/plans/2026-04-30-style-bible-implementation-plan.md new file mode 100644 index 000000000..5f975c701 --- /dev/null +++ b/docs/superpowers/plans/2026-04-30-style-bible-implementation-plan.md @@ -0,0 +1,888 @@ +# Style Bible Implementation Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Build a medium-scope writing-technique knowledge base that learns style, pacing, and craft rules from user-provided novel samples, saves them as editable style profiles, and injects selected profiles into chapter generation. + +**Architecture:** Add a new `style_bible` bounded context that follows the existing DDD layout: `domain/style_bible`, `application/style_bible`, SQLite repository, FastAPI routes, and a Vue workbench panel. The system stores raw samples and chunks, computes deterministic metrics, optionally asks the active LLM to extract technique cards, then composes a compact prompt overlay for chapter generation. + +**Tech Stack:** Python 3, FastAPI, SQLite, Vue 3 + TypeScript + Naive UI, existing LLM control/provider layer, existing prompt plaza and chapter workflow. + +--- + +## Scope + +### Included In MVP + +- Paste text samples into a Style Bible panel. +- Store sample metadata: title, source type, genre, scene type, POV, permission to use in generation. +- Split samples into chapters, scenes, and paragraphs using deterministic rules. +- Compute style metrics: sentence length, paragraph length, dialogue ratio, action/psychology/environment ratio, hook positions, pacing markers, AI-cliche hits. +- Generate an editable style profile with technique cards, rhythm rules, and forbidden patterns. +- Select one style profile during chapter generation. +- Inject a compact `style-bible-chapter-overlay` prompt block. +- Score generated chapters against the selected profile at a basic metrics level. + +### Explicitly Not Included In MVP + +- Model fine-tuning or LoRA training. +- Copyright enforcement beyond local metadata and permission flags. +- Full-text sample retrieval into every generation request. +- Multi-profile blending. +- Automatic ingestion from external novel websites. + +--- + +## File Map + +### Domain + +- Create `domain/style_bible/__init__.py` +- Create `domain/style_bible/entities.py` + - `StyleSample` + - `StyleSampleChunk` + - `StyleProfile` + - `StyleTechniqueCard` + - `StyleRule` +- Create `domain/style_bible/repositories.py` + - `StyleBibleRepository` + +### Application + +- Create `application/style_bible/__init__.py` +- Create `application/style_bible/dtos.py` +- Create `application/style_bible/services/text_splitter.py` +- Create `application/style_bible/services/style_metric_analyzer.py` +- Create `application/style_bible/services/style_profile_service.py` +- Create `application/style_bible/services/style_prompt_overlay_service.py` + +### Infrastructure + +- Create `infrastructure/persistence/database/sqlite_style_bible_repository.py` +- Modify `infrastructure/persistence/database/connection.py` +- Modify `infrastructure/persistence/database/schema.sql` +- Modify `infrastructure/ai/prompts/prompts_defaults.json` + +### Interfaces + +- Create `interfaces/api/v1/style_bible.py` +- Modify `interfaces/api/dependencies.py` +- Modify `interfaces/main.py` +- Modify `interfaces/api/v1/engine/generation.py` + +### Frontend + +- Create `frontend/src/api/styleBible.ts` +- Create `frontend/src/components/workbench/StyleBiblePanel.vue` +- Modify `frontend/src/components/workbench/SettingsPanel.vue` +- Modify `frontend/src/components/workbench/WorkArea.vue` if a top-level entry is needed. +- Modify the quick-generation modal or chapter generation request model to pass `style_profile_id`. + +### Tests + +- Create `tests/unit/domain/style_bible/test_style_bible_entities.py` +- Create `tests/unit/application/services/test_style_text_splitter.py` +- Create `tests/unit/application/services/test_style_metric_analyzer.py` +- Create `tests/unit/application/services/test_style_profile_service.py` +- Create `tests/unit/application/services/test_style_prompt_overlay_service.py` +- Create `tests/unit/infrastructure/database/test_sqlite_style_bible_repository.py` +- Create `tests/unit/interfaces/api/test_style_bible_api.py` +- Update `tests/unit/application/workflows/test_auto_novel_generation_workflow.py` + +--- + +## Data Model + +### `style_samples` + +Purpose: original user-provided sample metadata and raw content. + +Columns: + +- `id TEXT PRIMARY KEY` +- `novel_id TEXT DEFAULT ''` +- `profile_id TEXT DEFAULT ''` +- `title TEXT NOT NULL` +- `source_type TEXT DEFAULT 'reference'` +- `genre TEXT DEFAULT ''` +- `scene_type TEXT DEFAULT ''` +- `pov TEXT DEFAULT ''` +- `allowed_for_generation INTEGER DEFAULT 0` +- `content TEXT NOT NULL` +- `content_hash TEXT NOT NULL` +- `char_count INTEGER DEFAULT 0` +- `created_at TEXT DEFAULT ''` +- `updated_at TEXT DEFAULT ''` + +Index: + +- `idx_style_samples_novel ON style_samples(novel_id, created_at)` +- `idx_style_samples_profile ON style_samples(profile_id)` +- unique soft guard in repository by `content_hash + novel_id` + +### `style_sample_chunks` + +Purpose: deterministic chunks for analysis and later retrieval. + +Columns: + +- `id TEXT PRIMARY KEY` +- `sample_id TEXT NOT NULL` +- `chunk_type TEXT NOT NULL` +- `chapter_number INTEGER DEFAULT 0` +- `sequence INTEGER NOT NULL` +- `title TEXT DEFAULT ''` +- `content TEXT NOT NULL` +- `char_count INTEGER DEFAULT 0` +- `metrics_json TEXT DEFAULT '{}'` +- `created_at TEXT DEFAULT ''` + +Chunk types: + +- `chapter` +- `scene` +- `paragraph` + +### `style_profiles` + +Purpose: editable style package used during generation. + +Columns: + +- `id TEXT PRIMARY KEY` +- `novel_id TEXT DEFAULT ''` +- `name TEXT NOT NULL` +- `description TEXT DEFAULT ''` +- `status TEXT DEFAULT 'active'` +- `profile_json TEXT DEFAULT '{}'` +- `metrics_json TEXT DEFAULT '{}'` +- `rules_json TEXT DEFAULT '[]'` +- `forbidden_patterns_json TEXT DEFAULT '[]'` +- `version INTEGER DEFAULT 1` +- `created_at TEXT DEFAULT ''` +- `updated_at TEXT DEFAULT ''` + +### `style_technique_cards` + +Purpose: actionable craft cards extracted from samples. + +Columns: + +- `id TEXT PRIMARY KEY` +- `profile_id TEXT NOT NULL` +- `title TEXT NOT NULL` +- `category TEXT DEFAULT ''` +- `scene_type TEXT DEFAULT ''` +- `rule_text TEXT NOT NULL` +- `example_summary TEXT DEFAULT ''` +- `prompt_instruction TEXT NOT NULL` +- `enabled INTEGER DEFAULT 1` +- `weight REAL DEFAULT 1.0` +- `created_at TEXT DEFAULT ''` +- `updated_at TEXT DEFAULT ''` + +Categories: + +- `pacing` +- `dialogue` +- `emotion` +- `conflict` +- `hook` +- `description` +- `anti_ai` + +--- + +## Prompt Overlay Contract + +Generated prompt block should be compact and deterministic: + +```text +【写作手法库】 +使用风格包:{profile_name} + +节奏约束: +- 平均句长:{avg_sentence_length} 字附近,关键动作可短句单独成段 +- 段落:每段 {paragraph_min}-{paragraph_max} 字为主,避免连续长段解释 +- 场景推进:每 {beat_interval_chars} 字出现一次信息、关系或目标变化 + +技法卡: +- {card_1.prompt_instruction} +- {card_2.prompt_instruction} +- {card_3.prompt_instruction} + +禁用项: +- {forbidden_pattern_1} +- {forbidden_pattern_2} + +执行要求: +- 只学习写法和节奏,不复刻样本文字、角色、设定或专有表达。 +- 本章必须服从当前小说 Bible、章节大纲和连续性约束。 +``` + +Initial card limit: max 6 enabled cards, sorted by scene type match, weight, and recency. + +--- + +## Task 1: Domain Entities And Repository Contract + +**Files:** + +- Create `domain/style_bible/__init__.py` +- Create `domain/style_bible/entities.py` +- Create `domain/style_bible/repositories.py` +- Test `tests/unit/domain/style_bible/test_style_bible_entities.py` + +- [ ] **Step 1: Write failing entity tests** + +Create tests for: + +- sample rejects empty content +- sample computes `char_count` +- profile starts as version 1 active +- technique card can be disabled without deleting it + +Run: + +```bash +uv run --with-requirements requirements.txt --with pytest python -m pytest tests/unit/domain/style_bible/test_style_bible_entities.py -q +``` + +Expected: fails because modules do not exist. + +- [ ] **Step 2: Implement minimal domain entities** + +Use dataclasses, matching the project’s lightweight domain style. Do not introduce ORM models. + +Required entity fields: + +- `StyleSample(id, title, content, source_type, genre, scene_type, pov, allowed_for_generation, novel_id, profile_id, content_hash, char_count, created_at, updated_at)` +- `StyleSampleChunk(id, sample_id, chunk_type, chapter_number, sequence, title, content, char_count, metrics, created_at)` +- `StyleProfile(id, name, description, status, novel_id, profile, metrics, rules, forbidden_patterns, version, created_at, updated_at)` +- `StyleTechniqueCard(id, profile_id, title, category, scene_type, rule_text, example_summary, prompt_instruction, enabled, weight, created_at, updated_at)` + +- [ ] **Step 3: Define repository protocol** + +Required methods: + +- `save_sample(sample, chunks)` +- `list_samples(novel_id=None, profile_id=None)` +- `get_sample(sample_id)` +- `save_profile(profile)` +- `list_profiles(novel_id=None, status=None)` +- `get_profile(profile_id)` +- `save_technique_cards(profile_id, cards)` +- `list_technique_cards(profile_id, enabled=None)` +- `update_technique_card(card)` + +- [ ] **Step 4: Run tests** + +Expected: entity tests pass. + +- [ ] **Step 5: Commit** + +```bash +git add domain/style_bible tests/unit/domain/style_bible +git commit -m "feat: add style bible domain model" +``` + +--- + +## Task 2: SQLite Persistence And Migrations + +**Files:** + +- Create `infrastructure/persistence/database/sqlite_style_bible_repository.py` +- Modify `infrastructure/persistence/database/connection.py` +- Modify `infrastructure/persistence/database/schema.sql` +- Test `tests/unit/infrastructure/database/test_sqlite_style_bible_repository.py` + +- [ ] **Step 1: Write repository tests** + +Cover: + +- saving a sample with chunks +- duplicate `content_hash + novel_id` does not create duplicate samples +- saving and listing profiles +- saving, disabling, and listing technique cards +- old empty database creates tables on startup + +Run: + +```bash +uv run --with-requirements requirements.txt --with pytest python -m pytest tests/unit/infrastructure/database/test_sqlite_style_bible_repository.py -q +``` + +Expected: fail because repository does not exist. + +- [ ] **Step 2: Add schema** + +Add `CREATE TABLE IF NOT EXISTS` blocks for: + +- `style_samples` +- `style_sample_chunks` +- `style_profiles` +- `style_technique_cards` + +Add indexes listed in Data Model. + +- [ ] **Step 3: Add startup migration guards** + +In `connection.py`, add `_ensure_style_bible_tables(conn)` after existing topic and NovelPro table guards. It must be idempotent for old databases. + +- [ ] **Step 4: Implement SQLite repository** + +Use explicit JSON serialization for `metrics`, `profile`, `rules`, `forbidden_patterns`. + +Repository must call `conn.commit()` after writes. + +- [ ] **Step 5: Run repository tests** + +Expected: pass. + +- [ ] **Step 6: Commit** + +```bash +git add infrastructure/persistence/database tests/unit/infrastructure/database/test_sqlite_style_bible_repository.py +git commit -m "feat: persist style bible samples and profiles" +``` + +--- + +## Task 3: Deterministic Text Splitting And Metrics + +**Files:** + +- Create `application/style_bible/services/text_splitter.py` +- Create `application/style_bible/services/style_metric_analyzer.py` +- Test `tests/unit/application/services/test_style_text_splitter.py` +- Test `tests/unit/application/services/test_style_metric_analyzer.py` + +- [ ] **Step 1: Write splitter tests** + +Cover: + +- recognizes headings like `第十二章 标题` +- falls back to one chapter when no heading exists +- splits paragraphs by blank lines +- preserves original order and sequence + +- [ ] **Step 2: Implement splitter** + +Rules: + +- Chapter heading regex: `第[一二三四五六七八九十百千万0-9]+章` +- Paragraph split: one or more blank lines first, then long paragraph fallback. +- Scene split MVP: paragraph groups of 1200-2500 Chinese chars, not semantic LLM split. + +- [ ] **Step 3: Write metric tests** + +Use a short Chinese fixture with narration, dialogue, action, psychology, and environment text. + +Expected metrics: + +- `avg_sentence_length > 0` +- `dialogue_ratio > 0` +- `paragraph_count` +- `cliche_hit_count` +- ratio keys exist: `action_ratio`, `psychology_ratio`, `environment_ratio` + +- [ ] **Step 4: Implement metrics** + +Use deterministic heuristics: + +- Dialogue: Chinese quote pairs `“...”`, `「...」`, or lines containing `说/问/答/喊/低声`. +- Psychology: `想/觉得/意识到/心里/脑海/明白/害怕/犹豫`. +- Action: verbs such as `走/推/拉/抬/转/握/冲/停/看/拿`. +- Environment: `雨/风/灯/门/窗/街/夜/屋/楼/光/影`. +- AI cliches: reuse `ClicheScanner`. + +- [ ] **Step 5: Run tests** + +```bash +uv run --with-requirements requirements.txt --with pytest python -m pytest \ + tests/unit/application/services/test_style_text_splitter.py \ + tests/unit/application/services/test_style_metric_analyzer.py \ + -q +``` + +- [ ] **Step 6: Commit** + +```bash +git add application/style_bible tests/unit/application/services/test_style_text_splitter.py tests/unit/application/services/test_style_metric_analyzer.py +git commit -m "feat: analyze style bible text samples" +``` + +--- + +## Task 4: Profile Service And Technique Card Extraction + +**Files:** + +- Create `application/style_bible/dtos.py` +- Create `application/style_bible/services/style_profile_service.py` +- Test `tests/unit/application/services/test_style_profile_service.py` + +- [ ] **Step 1: Write service tests** + +Cover: + +- `import_sample` saves sample, chunks, metrics, and profile when requested. +- `generate_profile_from_samples` produces a profile with deterministic fallback cards when LLM is unavailable. +- LLM JSON fields are normalized when arrays/objects appear where strings are expected. +- disabled sample with `allowed_for_generation=False` can be analyzed but does not get selected by overlay service. + +- [ ] **Step 2: Define DTOs** + +Create: + +- `StyleSampleImportRequestDTO` +- `StyleSampleDTO` +- `StyleChunkDTO` +- `StyleProfileDTO` +- `StyleTechniqueCardDTO` +- `StyleProfileGenerateRequestDTO` +- `StyleProfileMatchReportDTO` + +- [ ] **Step 3: Implement service** + +The service coordinates: + +- hash content +- split text +- compute metrics per chunk and aggregate metrics +- save sample/chunks +- create or update profile +- generate fallback technique cards + +Fallback cards must include: + +- pacing card from paragraph/sentence metrics +- dialogue card from dialogue ratio +- anti-AI card from cliche hits +- hook card from chapter ending pattern + +- [ ] **Step 4: Add optional LLM card extraction** + +Use existing LLM provider dependency if available. Prompt must ask for JSON with: + +```json +{ + "profile_summary": "string", + "rhythm_rules": ["string"], + "forbidden_patterns": ["string"], + "technique_cards": [ + { + "title": "string", + "category": "pacing", + "scene_type": "dialogue", + "rule_text": "string", + "example_summary": "string", + "prompt_instruction": "string" + } + ] +} +``` + +If parsing fails, keep deterministic fallback. + +- [ ] **Step 5: Run tests** + +```bash +uv run --with-requirements requirements.txt --with pytest python -m pytest tests/unit/application/services/test_style_profile_service.py -q +``` + +- [ ] **Step 6: Commit** + +```bash +git add application/style_bible tests/unit/application/services/test_style_profile_service.py +git commit -m "feat: generate style bible profiles" +``` + +--- + +## Task 5: Prompt Overlay And Chapter Generation Integration + +**Files:** + +- Create `application/style_bible/services/style_prompt_overlay_service.py` +- Modify `application/workflows/auto_novel_generation_workflow.py` +- Modify `infrastructure/ai/prompts/prompts_defaults.json` +- Modify `interfaces/api/v1/engine/generation.py` +- Test `tests/unit/application/services/test_style_prompt_overlay_service.py` +- Update `tests/unit/application/workflows/test_auto_novel_generation_workflow.py` + +- [ ] **Step 1: Write overlay tests** + +Cover: + +- no selected profile returns empty overlay +- selected profile builds compact block +- disabled cards are excluded +- scene type matching ranks cards first +- overlay includes “do not copy sample text” safety instruction + +- [ ] **Step 2: Implement overlay service** + +Inputs: + +- `novel_id` +- `style_profile_id` +- `scene_type` +- optional `max_cards=6` + +Output: + +- plain string prompt block +- profile metadata for UI/debug + +- [ ] **Step 3: Add prompt plaza node** + +Add `style-bible-chapter-overlay` to `prompts_defaults.json` with variables: + +- `{style_overlay}` required +- `{scene_type}` optional + +This node is not a standalone generator. It is a reusable prompt fragment. + +- [ ] **Step 4: Wire chapter generation** + +Extend request DTOs to accept: + +- `style_profile_id?: string` +- `scene_type?: string` + +Inject overlay into `context` or a dedicated runtime variable before prompt build. + +- [ ] **Step 5: Run tests** + +```bash +uv run --with-requirements requirements.txt --with pytest python -m pytest \ + tests/unit/application/services/test_style_prompt_overlay_service.py \ + tests/unit/application/workflows/test_auto_novel_generation_workflow.py::TestBuildPrompt \ + -q +python3 -m json.tool infrastructure/ai/prompts/prompts_defaults.json >/dev/null +``` + +- [ ] **Step 6: Commit** + +```bash +git add application/style_bible application/workflows/auto_novel_generation_workflow.py infrastructure/ai/prompts/prompts_defaults.json interfaces/api/v1/engine/generation.py tests/unit/application/services/test_style_prompt_overlay_service.py tests/unit/application/workflows/test_auto_novel_generation_workflow.py +git commit -m "feat: inject style bible into chapter generation" +``` + +--- + +## Task 6: Style Bible API + +**Files:** + +- Create `interfaces/api/v1/style_bible.py` +- Modify `interfaces/api/dependencies.py` +- Modify `interfaces/main.py` +- Test `tests/unit/interfaces/api/test_style_bible_api.py` + +- [ ] **Step 1: Write API tests** + +Cover: + +- import sample +- list samples +- create/generate profile +- list profiles +- get profile detail with cards +- update technique card enabled/rule text +- build overlay preview + +- [ ] **Step 2: Add dependency constructors** + +Add: + +- `get_style_bible_repository` +- `get_style_profile_service` +- `get_style_prompt_overlay_service` + +Reuse: + +- active LLM provider service if already available +- `ClicheScanner` + +- [ ] **Step 3: Add routes** + +Routes: + +```http +POST /api/v1/style-bible/samples +GET /api/v1/style-bible/samples +GET /api/v1/style-bible/samples/{sample_id} +POST /api/v1/style-bible/profiles +GET /api/v1/style-bible/profiles +GET /api/v1/style-bible/profiles/{profile_id} +PATCH /api/v1/style-bible/profiles/{profile_id} +PATCH /api/v1/style-bible/cards/{card_id} +POST /api/v1/style-bible/overlay/preview +``` + +- [ ] **Step 4: Register router** + +In `interfaces/main.py`, include the router under `/api/v1`. + +- [ ] **Step 5: Run tests** + +```bash +uv run --with-requirements requirements.txt --with pytest python -m pytest tests/unit/interfaces/api/test_style_bible_api.py -q +``` + +- [ ] **Step 6: Commit** + +```bash +git add interfaces/api/v1/style_bible.py interfaces/api/dependencies.py interfaces/main.py tests/unit/interfaces/api/test_style_bible_api.py +git commit -m "feat: expose style bible api" +``` + +--- + +## Task 7: Frontend Panel + +**Files:** + +- Create `frontend/src/api/styleBible.ts` +- Create `frontend/src/components/workbench/StyleBiblePanel.vue` +- Modify `frontend/src/components/workbench/SettingsPanel.vue` +- Modify chapter generation controls to select `style_profile_id` + +- [ ] **Step 1: Add API client** + +Types: + +- `StyleSample` +- `StyleProfile` +- `StyleTechniqueCard` +- `StyleOverlayPreview` + +Methods: + +- `importSample` +- `listSamples` +- `listProfiles` +- `getProfile` +- `createProfile` +- `updateCard` +- `previewOverlay` + +- [ ] **Step 2: Add panel layout** + +Panel tabs: + +- `样本` +- `画像` +- `技法卡` +- `注入预览` + +Rules: + +- No nested cards. +- Dense operational UI, not a landing page. +- Long text areas must have fixed min/max height. + +- [ ] **Step 3: Add sample import form** + +Fields: + +- title required +- source type +- genre +- scene type +- POV +- allowed for generation checkbox +- content textarea + +- [ ] **Step 4: Add profile/cards UI** + +Allow: + +- list profiles +- inspect metrics +- edit card instruction +- enable/disable cards +- preview overlay + +- [ ] **Step 5: Wire generation selection** + +In the quick generation area, add: + +- style profile select +- scene type select/input + +Pass values into generation request. + +- [ ] **Step 6: Build frontend** + +```bash +cd frontend +npm run build +``` + +Expected: build passes. + +- [ ] **Step 7: Commit** + +```bash +git add frontend/src/api/styleBible.ts frontend/src/components/workbench/StyleBiblePanel.vue frontend/src/components/workbench/SettingsPanel.vue frontend/src +git commit -m "feat: add style bible workbench panel" +``` + +--- + +## Task 8: Match Report And Feedback Loop + +**Files:** + +- Extend `application/style_bible/services/style_metric_analyzer.py` +- Extend `application/style_bible/services/style_profile_service.py` +- Extend `interfaces/api/v1/style_bible.py` +- Extend `frontend/src/components/workbench/StyleBiblePanel.vue` +- Test `tests/unit/application/services/test_style_profile_service.py` + +- [ ] **Step 1: Write match report tests** + +Given generated text and a profile, assert report includes: + +- overall score +- sentence length status +- paragraph rhythm status +- dialogue ratio status +- cliche hit status +- actionable suggestions + +- [ ] **Step 2: Implement report** + +Scoring MVP: + +- sentence length within 20% -> full score for that dimension +- paragraph average within 25% -> full score +- dialogue ratio within 0.1 absolute -> full score +- cliche hits below threshold -> full score + +- [ ] **Step 3: Add API** + +```http +POST /api/v1/style-bible/profiles/{profile_id}/match +``` + +Body: + +```json +{ + "content": "generated chapter text", + "scene_type": "dialogue" +} +``` + +- [ ] **Step 4: Add UI** + +Add match report action in `注入预览` tab. + +- [ ] **Step 5: Run tests and build** + +```bash +uv run --with-requirements requirements.txt --with pytest python -m pytest tests/unit/application/services/test_style_profile_service.py tests/unit/interfaces/api/test_style_bible_api.py -q +cd frontend && npm run build +``` + +- [ ] **Step 6: Commit** + +```bash +git add application/style_bible interfaces/api/v1/style_bible.py frontend/src/components/workbench/StyleBiblePanel.vue tests/unit/application/services/test_style_profile_service.py tests/unit/interfaces/api/test_style_bible_api.py +git commit -m "feat: score chapters against style bible profiles" +``` + +--- + +## Task 9: Documentation And Verification + +**Files:** + +- Modify `docs/NOVELPRO_README.md` +- Add `docs/style-bible-guide.md` +- Update this plan with final verification notes if behavior differs. + +- [ ] **Step 1: Add user guide** + +Guide must cover: + +- What sample text is used for. +- Recommended sample size: 3-10 chapters for stable rhythm, 1 chapter for quick tests. +- Why generated prompt overlay does not copy original text. +- How to create a profile. +- How to edit cards. +- How to select profile in chapter generation. +- How to interpret match report. + +- [ ] **Step 2: Add developer notes** + +Include: + +- schema tables +- API list +- prompt overlay contract +- test commands + +- [ ] **Step 3: Run full focused verification** + +```bash +python3 -m compileall -q domain/style_bible application/style_bible infrastructure/persistence/database/sqlite_style_bible_repository.py interfaces/api/v1/style_bible.py +uv run --with-requirements requirements.txt --with pytest python -m pytest \ + tests/unit/domain/style_bible \ + tests/unit/application/services/test_style_text_splitter.py \ + tests/unit/application/services/test_style_metric_analyzer.py \ + tests/unit/application/services/test_style_profile_service.py \ + tests/unit/application/services/test_style_prompt_overlay_service.py \ + tests/unit/infrastructure/database/test_sqlite_style_bible_repository.py \ + tests/unit/interfaces/api/test_style_bible_api.py \ + tests/unit/application/workflows/test_auto_novel_generation_workflow.py::TestBuildPrompt \ + -q +cd frontend && npm run build +``` + +- [ ] **Step 4: Commit** + +```bash +git add docs/NOVELPRO_README.md docs/style-bible-guide.md docs/superpowers/plans/2026-04-30-style-bible-implementation-plan.md +git commit -m "docs: document style bible workflow" +``` + +--- + +## Rollout Order + +1. Local backend tests only. +2. Local frontend build. +3. Local API smoke test with one pasted chapter. +4. Deploy to Baota. +5. Run online smoke test: + - create sample + - generate profile + - preview overlay + - generate chapter with profile + - run match report +6. Update memory with known issues and verification status. + +--- + +## Risk Controls + +- Do not put original sample text into every generation context. +- Prompt overlay must include “do not copy sample text, characters, settings, or proper nouns.” +- Keep raw sample storage local SQLite only. +- Do not expose raw samples through public unauthenticated endpoints beyond existing app access model. +- Keep LLM extraction optional; deterministic fallback must work without API key. +- Keep generation request backward compatible when `style_profile_id` is empty. + +--- + +## Self-Review Notes + +- Spec coverage: sample ingestion, analysis, profile/card storage, prompt injection, frontend, and match report are each mapped to implementation tasks. +- Placeholder scan: no intentionally deferred MVP requirements remain; non-MVP items are explicitly excluded. +- Type consistency: file names and service names use `style_bible` across domain, application, API, and frontend API client. diff --git a/docs/superpowers/plans/2026-05-02-story-studio-pipeline-phase1.md b/docs/superpowers/plans/2026-05-02-story-studio-pipeline-phase1.md new file mode 100644 index 000000000..04cd198f0 --- /dev/null +++ b/docs/superpowers/plans/2026-05-02-story-studio-pipeline-phase1.md @@ -0,0 +1,1009 @@ +# Story Studio Pipeline Phase 1 Implementation Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Build the phase-one Story Studio Pipeline: chapter contract, scene-package show-don't-tell fields, prompt injection, scene-budget execution, and editorial review `showing` score. + +**Architecture:** Extend the existing `AutoNovelGenerationWorkflow` and generation API contracts without adding new database tables. Keep the normal generation button intact: strategy preview returns richer data, generation consumes it through existing `chapter_strategy`, and editorial review adds one score dimension while remaining backward compatible. + +**Tech Stack:** Python 3.12, FastAPI, Pydantic, pytest, Vue 3 + TypeScript + Naive UI, existing PP workflow and LLM routing. + +--- + +## Scope + +Included: + +- `chapter_contract` in strategy preview. +- Scene package fields: `visible_action`, `subtext_dialogue`, `unspoken_emotion`, `object_or_clue_change`. +- A reusable "展示优先写作协议" prompt block. +- Strategy overlay and scene-budget overlay include show-don't-tell constraints. +- Editorial review adds `showing` score. +- Frontend types and review score display support the new fields. + +Not included: + +- Multi-candidate generation. +- Candidate selector. +- Local reroll. +- Long-draft splitting improvements. +- New persistent tables. + +## File Map + +Modify: + +- `application/workflows/auto_novel_generation_workflow.py` + - Add show-don't-tell protocol helper. + - Expand strategy prompt JSON contract. + - Normalize `chapter_contract` and scene package fields. + - Inject richer strategy/scene overlays into generation prompts. + - Add editorial review `showing` score. +- `interfaces/api/v1/engine/generation.py` + - Add Pydantic response models for chapter contract and scene package fields. + - Add `showing` to editorial review scores. +- `frontend/src/api/workflow.ts` + - Add TypeScript fields. +- `frontend/src/components/workbench/WorkArea.vue` + - Display new `showing` score automatically through existing score grid. + - Update score label helper. +- `tests/unit/application/workflows/test_auto_novel_generation_workflow.py` + - Unit tests for normalization, overlays, and editorial score. +- `tests/integration/interfaces/api/v1/test_generation_api.py` + - API contract tests for strategy preview and editorial review. + +Optional docs update if implementation changes user-visible behavior: + +- `docs/NOVELPRO_README.md` + +--- + +## Task 1: Backend strategy payload supports chapter contract + +**Files:** + +- Modify: `tests/unit/application/workflows/test_auto_novel_generation_workflow.py` +- Modify: `application/workflows/auto_novel_generation_workflow.py` + +- [ ] **Step 1: Add failing unit test for `chapter_contract` normalization** + +Add this test near the existing strategy tests: + +```python +def test_normalize_strategy_payload_includes_chapter_contract(self, workflow): + payload = workflow._normalize_strategy_payload( + { + "chapter_contract": { + "chapter_question": "灰卡为什么还能刷开门禁?", + "protagonist_want": "白雨翔要确认 774 写卡器是否被截留。", + "opposition": "许照只交出部分证据。", + "reader_expectation": "看到两个人从对抗到有限合作。", + "required_information_change": "签收记录从嫌疑证据变成伪造证据。", + "required_relationship_change": "白雨翔和许照互相保留但开始交换证据。", + "ending_question": "操盘者是否借用了内部审计流程?", + "show_dont_tell_rules": [ + "不能写白雨翔感到怀疑,只能写他追问和扣住证物。", + "对白不能每句完整回答,允许反问和避重就轻。", + ], + }, + "scene_plan": [ + { + "label": "核对签收单", + "task": "逼出 774 的异常入库记录", + "resistance": "许照不给完整文件", + "info_shift": "扫描件的模糊签名变成疑点", + "relationship_shift": "两人从互相试探进入有限合作", + "anchor": "证物袋和灰卡划痕", + "hook": "签名不属于当前习惯", + "target_words": 900, + "visible_action": "白雨翔把证物袋封口按住,不让许照立刻收走。", + "subtext_dialogue": "表面问流程,实际确认许照掌握多少证据。", + "unspoken_emotion": "怀疑和防备不能直说。", + "object_or_clue_change": "灰卡从拾获物变成伪造链条证据。", + } + ], + "writing_focus": ["少解释,多用动作和证物推进。"], + }, + outline="白雨翔追查灰卡。", + target_word_count=2500, + word_tolerance_ratio=0.05, + ) + + contract = payload["chapter_contract"] + assert contract["chapter_question"] == "灰卡为什么还能刷开门禁?" + assert "扣住证物" in contract["show_dont_tell_rules"][0] + + scene = payload["scene_plan"][0] + assert scene["visible_action"].startswith("白雨翔把证物袋") + assert scene["subtext_dialogue"].startswith("表面问流程") + assert scene["unspoken_emotion"] == "怀疑和防备不能直说。" + assert scene["object_or_clue_change"].startswith("灰卡从拾获物") +``` + +- [ ] **Step 2: Run the test and verify it fails** + +Run: + +```bash +uv run pytest -q tests/unit/application/workflows/test_auto_novel_generation_workflow.py -k "chapter_contract" +``` + +Expected: fails with missing `chapter_contract` or missing scene fields. + +- [ ] **Step 3: Implement strategy normalization** + +In `application/workflows/auto_novel_generation_workflow.py`, add helper methods near `_normalize_strategy_payload`: + +```python + @staticmethod + def _clean_text(value: Any, fallback: str) -> str: + text = str(value or "").strip() + return text or fallback + + @staticmethod + def _clean_text_list(value: Any, fallback: List[str], *, limit: int = 4) -> List[str]: + if isinstance(value, list): + cleaned = [str(item).strip() for item in value if str(item).strip()] + if cleaned: + return cleaned[:limit] + return fallback[:limit] +``` + +Then in `_normalize_strategy_payload`, before `return`, build: + +```python + raw_contract = data.get("chapter_contract") if isinstance(data.get("chapter_contract"), dict) else {} + chapter_contract = { + "chapter_question": AutoNovelGenerationWorkflow._clean_text( + raw_contract.get("chapter_question"), + "本章的关键问题必须在具体行动中被推进。", + ), + "protagonist_want": AutoNovelGenerationWorkflow._clean_text( + raw_contract.get("protagonist_want"), + dramatic.get("goal") or outline[:36] or "主角要确认一条关键线索。", + ), + "opposition": AutoNovelGenerationWorkflow._clean_text( + raw_contract.get("opposition"), + dramatic.get("obstacle") or "有人或流程阻碍主角。", + ), + "reader_expectation": AutoNovelGenerationWorkflow._clean_text( + raw_contract.get("reader_expectation"), + dramatic.get("reader_expectation") or "读者要看到冲突推进,而不是解释背景。", + ), + "required_information_change": AutoNovelGenerationWorkflow._clean_text( + raw_contract.get("required_information_change"), + "至少交付一条会改变判断的新信息。", + ), + "required_relationship_change": AutoNovelGenerationWorkflow._clean_text( + raw_contract.get("required_relationship_change"), + "至少让主要人物的立场或信任关系发生细微变化。", + ), + "ending_question": AutoNovelGenerationWorkflow._clean_text( + raw_contract.get("ending_question"), + dramatic.get("ending_hook") or "章末留下新的追问。", + ), + "show_dont_tell_rules": AutoNovelGenerationWorkflow._clean_text_list( + raw_contract.get("show_dont_tell_rules"), + [ + "不能直接命名复杂情绪,必须写动作、停顿、回避或身体反应。", + "不能用总结句跳过冲突过程,必须让读者看到试探和阻力。", + "对白不能每句都完整礼貌,允许打断、反问、答非所问。", + ], + limit=5, + ), + } +``` + +In each normalized scene dict, add: + +```python + "visible_action": AutoNovelGenerationWorkflow._clean_text( + item.get("visible_action"), + str(item.get("anchor") or "用一个具体动作承载情绪和信息。"), + ), + "subtext_dialogue": AutoNovelGenerationWorkflow._clean_text( + item.get("subtext_dialogue"), + "对白表面推进事实,底层保留试探、遮掩或误判。", + ), + "unspoken_emotion": AutoNovelGenerationWorkflow._clean_text( + item.get("unspoken_emotion"), + "不要直接命名情绪,用动作和反应表现。", + ), + "object_or_clue_change": AutoNovelGenerationWorkflow._clean_text( + item.get("object_or_clue_change"), + "本场景至少让一个线索、道具或判断发生变化。", + ), +``` + +In fallback scenes, add the same four keys with concrete fallback strings. + +Finally include `chapter_contract` in the returned dict: + +```python + "chapter_contract": chapter_contract, +``` + +- [ ] **Step 4: Run the test and verify it passes** + +Run: + +```bash +uv run pytest -q tests/unit/application/workflows/test_auto_novel_generation_workflow.py -k "chapter_contract" +``` + +Expected: selected test passes. + +- [ ] **Step 5: Commit** + +```bash +git add application/workflows/auto_novel_generation_workflow.py tests/unit/application/workflows/test_auto_novel_generation_workflow.py +git commit -m "feat: add chapter contract strategy payload" +``` + +--- + +## Task 2: Strategy prompt requests show-don't-tell fields + +**Files:** + +- Modify: `tests/unit/application/workflows/test_auto_novel_generation_workflow.py` +- Modify: `application/workflows/auto_novel_generation_workflow.py` + +- [ ] **Step 1: Add failing unit test for strategy prompt schema** + +Add: + +```python +def test_build_strategy_prompt_requests_show_dont_tell_contract(self, workflow): + prompt = workflow._build_strategy_prompt( + context="CTX", + outline="白雨翔追查灰卡。", + target_word_count=2500, + word_tolerance_ratio=0.05, + ) + + assert "chapter_contract" in prompt.system + assert "show_dont_tell_rules" in prompt.system + assert "visible_action" in prompt.system + assert "subtext_dialogue" in prompt.system + assert "unspoken_emotion" in prompt.system + assert "object_or_clue_change" in prompt.system + assert "少解释,多展示" in prompt.system +``` + +- [ ] **Step 2: Run the test and verify it fails** + +Run: + +```bash +uv run pytest -q tests/unit/application/workflows/test_auto_novel_generation_workflow.py -k "strategy_prompt_requests" +``` + +Expected: fails because prompt schema does not include the new fields. + +- [ ] **Step 3: Update `_build_strategy_prompt`** + +Replace the JSON structure section inside `_build_strategy_prompt` with a schema containing: + +```text +{ + "chapter_contract": { + "chapter_question": "本章读者最想知道的问题", + "protagonist_want": "主角最具体想拿到/确认/避免什么", + "opposition": "谁或什么阻碍他", + "reader_expectation": "读者期待看到的具体场面", + "required_information_change": "本章必须交付的信息变化", + "required_relationship_change": "本章必须发生的人物关系变化", + "ending_question": "章末留下的追问", + "show_dont_tell_rules": ["本章禁止直说的情绪/动机/解释,改用动作、停顿、物件、对白表现"] + }, + "dramatic_task": { + "goal": "角色这章最具体想拿到/确认/隐瞒什么", + "obstacle": "谁或什么阻碍他", + "reader_expectation": "读者这一章最期待看到什么兑现", + "ending_hook": "章末要留下什么追读钩子" + }, + "scene_plan": [ + { + "label": "场景标题", + "task": "这个场景的任务", + "resistance": "阻力", + "info_shift": "新信息或局势变化", + "relationship_shift": "人物关系变化,没有就写无明显变化", + "anchor": "一个具体物件/动作/地点锚点", + "visible_action": "必须出现的具体动作", + "subtext_dialogue": "对白表面内容和真实意图", + "unspoken_emotion": "不能直说的情绪", + "object_or_clue_change": "道具或线索状态变化", + "hook": "场景结尾钩子", + "target_words": 800 + } + ], + "writing_focus": ["3-4 条执行提醒"] +} +``` + +Add to hard requirements: + +```text +6. 展示优先:少解释,多展示;少总结,多动作和细节;少金句,多具体反应。 +7. 不要直接写“复杂情绪”,必须要求正文通过动作、停顿、回避、物件处理来表现。 +8. 对话不要每句都完整、礼貌、逻辑闭环;允许打断、反问、避重就轻。 +``` + +- [ ] **Step 4: Run the test and verify it passes** + +Run: + +```bash +uv run pytest -q tests/unit/application/workflows/test_auto_novel_generation_workflow.py -k "strategy_prompt_requests" +``` + +Expected: PASS. + +- [ ] **Step 5: Commit** + +```bash +git add application/workflows/auto_novel_generation_workflow.py tests/unit/application/workflows/test_auto_novel_generation_workflow.py +git commit -m "feat: request showing-first strategy fields" +``` + +--- + +## Task 3: Generation overlays enforce showing-first execution + +**Files:** + +- Modify: `tests/unit/application/workflows/test_auto_novel_generation_workflow.py` +- Modify: `application/workflows/auto_novel_generation_workflow.py` + +- [ ] **Step 1: Add failing overlay tests** + +Add: + +```python +def test_build_strategy_overlay_includes_show_dont_tell_contract(self, workflow): + overlay = workflow._build_strategy_overlay( + { + "chapter_contract": { + "chapter_question": "灰卡是谁写入的?", + "protagonist_want": "白雨翔要确认写卡器来源。", + "opposition": "许照只给半份证据。", + "reader_expectation": "看到两人互相试探。", + "required_information_change": "伪造签名暴露。", + "required_relationship_change": "形成有限合作。", + "ending_question": "谁借用了审计流程?", + "show_dont_tell_rules": ["不能写他感到怀疑,只能写他扣住证物。"], + }, + "dramatic_task": { + "goal": "确认写卡器来源", + "obstacle": "许照保留证据", + "reader_expectation": "看到试探", + "ending_hook": "审计流程异常", + }, + "scene_plan": [], + "writing_focus": [], + } + ) + + assert "章节合同" in overlay + assert "灰卡是谁写入的" in overlay + assert "展示优先" in overlay + assert "扣住证物" in overlay + + +def test_build_scene_budget_overlay_includes_showing_fields(self, workflow): + overlay = workflow._build_scene_budget_overlay( + { + "label": "核对签收单", + "task": "确认签名真伪", + "resistance": "许照不交原件", + "info_shift": "签名疑点出现", + "relationship_shift": "有限合作", + "anchor": "证物袋", + "visible_action": "白雨翔按住证物袋封口。", + "subtext_dialogue": "表面问流程,实际逼许照露底。", + "unspoken_emotion": "怀疑不能直说。", + "object_or_clue_change": "灰卡变成伪造链条证据。", + "hook": "审计流程异常", + "target_words": 800, + "min_words": 720, + "max_words": 880, + } + ) + + assert "白雨翔按住证物袋封口" in overlay + assert "表面问流程" in overlay + assert "怀疑不能直说" in overlay + assert "灰卡变成伪造链条证据" in overlay +``` + +- [ ] **Step 2: Run tests and verify failure** + +Run: + +```bash +uv run pytest -q tests/unit/application/workflows/test_auto_novel_generation_workflow.py -k "show_dont_tell_contract or showing_fields" +``` + +Expected: fails before overlay implementation. + +- [ ] **Step 3: Update `_resolve_scene_budget_plan` and `_build_scene_budget_overlay`** + +In `_resolve_scene_budget_plan`, copy these keys into each normalized scene: + +```python + visible_action = str(item.get("visible_action") or item.get("anchor") or "用具体动作推进").strip() or "用具体动作推进" + subtext_dialogue = str(item.get("subtext_dialogue") or "对白必须有试探、遮掩或信息差").strip() or "对白必须有试探、遮掩或信息差" + unspoken_emotion = str(item.get("unspoken_emotion") or "情绪不能直说").strip() or "情绪不能直说" + object_or_clue_change = str(item.get("object_or_clue_change") or "线索或道具状态必须变化").strip() or "线索或道具状态必须变化" +``` + +Add to the scene dict: + +```python + "visible_action": visible_action, + "subtext_dialogue": subtext_dialogue, + "unspoken_emotion": unspoken_emotion, + "object_or_clue_change": object_or_clue_change, +``` + +In `_build_scene_budget_overlay`, read these keys and append lines: + +```python + visible_action = str(scene_hint.get("visible_action") or "用具体动作推进").strip() or "用具体动作推进" + subtext_dialogue = str(scene_hint.get("subtext_dialogue") or "对白保留潜台词").strip() or "对白保留潜台词" + unspoken_emotion = str(scene_hint.get("unspoken_emotion") or "情绪不能直说").strip() or "情绪不能直说" + object_or_clue_change = str(scene_hint.get("object_or_clue_change") or "线索或道具状态变化").strip() or "线索或道具状态变化" +``` + +Add lines in the returned block: + +```python + f"- 可见动作:{visible_action}\n" + f"- 潜台词对白:{subtext_dialogue}\n" + f"- 未说出口的情绪:{unspoken_emotion}\n" + f"- 道具/线索变化:{object_or_clue_change}\n" +``` + +- [ ] **Step 4: Update `_build_strategy_overlay`** + +At the start of `_build_strategy_overlay`, read: + +```python + contract = chapter_strategy.get("chapter_contract") or {} +``` + +After `lines = ["【本章写作策略(已确认,必须执行)】"]`, add: + +```python + if isinstance(contract, dict) and contract: + lines.extend([ + "章节合同:", + f"- 本章问题:{str(contract.get('chapter_question') or '未说明').strip()}", + f"- 主角想要:{str(contract.get('protagonist_want') or '未说明').strip()}", + f"- 阻力来源:{str(contract.get('opposition') or '未说明').strip()}", + f"- 信息变化:{str(contract.get('required_information_change') or '未说明').strip()}", + f"- 关系变化:{str(contract.get('required_relationship_change') or '未说明').strip()}", + f"- 章末追问:{str(contract.get('ending_question') or '未说明').strip()}", + "展示优先:", + ]) + rules = contract.get("show_dont_tell_rules") if isinstance(contract.get("show_dont_tell_rules"), list) else [] + for rule in rules[:5]: + text = str(rule or "").strip() + if text: + lines.append(f"- {text}") +``` + +In the scene line, include `visible_action`, `subtext_dialogue`, and `unspoken_emotion`: + +```python + visible_action = str(scene.get("visible_action") or scene.get("anchor") or "未说明").strip() + subtext_dialogue = str(scene.get("subtext_dialogue") or "未说明").strip() + unspoken_emotion = str(scene.get("unspoken_emotion") or "未说明").strip() + clue_change = str(scene.get("object_or_clue_change") or "未说明").strip() + lines.append( + f"{index}. {title}|任务:{task}|阻力:{resistance}|变化:{info_shift}|关系:{relation_shift}|动作:{visible_action}|潜台词:{subtext_dialogue}|不直说:{unspoken_emotion}|线索/道具:{clue_change}|钩子:{hook}" + ) +``` + +- [ ] **Step 5: Run tests and verify they pass** + +Run: + +```bash +uv run pytest -q tests/unit/application/workflows/test_auto_novel_generation_workflow.py -k "show_dont_tell_contract or showing_fields" +``` + +Expected: PASS. + +- [ ] **Step 6: Commit** + +```bash +git add application/workflows/auto_novel_generation_workflow.py tests/unit/application/workflows/test_auto_novel_generation_workflow.py +git commit -m "feat: inject showing-first scene overlays" +``` + +--- + +## Task 4: Editorial review returns `showing` score + +**Files:** + +- Modify: `tests/unit/application/workflows/test_auto_novel_generation_workflow.py` +- Modify: `application/workflows/auto_novel_generation_workflow.py` +- Modify: `interfaces/api/v1/engine/generation.py` +- Modify: `frontend/src/api/workflow.ts` +- Modify: `frontend/src/components/workbench/WorkArea.vue` + +- [ ] **Step 1: Add failing workflow test** + +Add: + +```python +def test_normalize_editorial_review_payload_includes_showing_score(self, workflow): + payload = workflow._normalize_editorial_review_payload( + { + "summary": "对白有张力,但解释略多。", + "scores": { + "opening": 88, + "conflict": 90, + "character": 86, + "dialogue": 84, + "hook": 92, + "pacing": 87, + "showing": 79, + }, + "strengths": ["证物动作具体。"], + "problems": ["部分情绪仍被直接命名。"], + "actions": ["把解释改成动作。"], + "verdict": "可优化后使用", + } + ) + + assert payload["scores"]["showing"] == 79 +``` + +- [ ] **Step 2: Run test and verify failure** + +Run: + +```bash +uv run pytest -q tests/unit/application/workflows/test_auto_novel_generation_workflow.py -k "showing_score" +``` + +Expected: fails because `showing` is not returned. + +- [ ] **Step 3: Update editorial prompt and normalizer** + +In `_build_editorial_review_prompt`, update JSON schema scores: + +```text + "showing": 0-100 +``` + +Add scoring rule: + +```text +- showing:是否少解释、多展示;情绪是否通过动作/细节/潜台词表现;对白是否避免完整礼貌闭环 +``` + +Add review instruction: + +```text +展示优先专项检查: +- 扣解释句过密、总结句替代场景、直接命名情绪。 +- 扣客服式完整对白和段尾金句。 +- 修改动作必须说明如何把解释改成动作或潜台词。 +``` + +In `_normalize_editorial_review_payload`, add: + +```python + "showing": score_of("showing"), +``` + +- [ ] **Step 4: Update API response model** + +In `interfaces/api/v1/engine/generation.py`, add to `ChapterEditorialReviewScoresResponse`: + +```python + showing: int = Field(0, description="展示优先:少解释、多动作细节、潜台词对白") +``` + +If current class does not use `Field`, add it consistently with existing import already present. + +- [ ] **Step 5: Update frontend types and score label** + +In `frontend/src/api/workflow.ts`, add: + +```ts + showing: number +``` + +to `ChapterEditorialReviewScoresDTO`. + +In `frontend/src/components/workbench/WorkArea.vue`, find `editorialScoreLabel`. Add mapping: + +```ts + showing: '展示' +``` + +If the helper uses a switch, add: + +```ts +case 'showing': + return '展示' +``` + +- [ ] **Step 6: Run targeted backend tests** + +Run: + +```bash +uv run pytest -q tests/unit/application/workflows/test_auto_novel_generation_workflow.py -k "showing_score" +``` + +Expected: PASS. + +- [ ] **Step 7: Run frontend type/build check** + +Run: + +```bash +cd frontend && npm run build +``` + +Expected: build exits 0. + +- [ ] **Step 8: Commit** + +```bash +git add application/workflows/auto_novel_generation_workflow.py interfaces/api/v1/engine/generation.py frontend/src/api/workflow.ts frontend/src/components/workbench/WorkArea.vue tests/unit/application/workflows/test_auto_novel_generation_workflow.py +git commit -m "feat: add showing score to editorial review" +``` + +--- + +## Task 5: API contract exposes richer strategy preview + +**Files:** + +- Modify: `tests/integration/interfaces/api/v1/test_generation_api.py` +- Modify: `interfaces/api/v1/engine/generation.py` +- Modify: `frontend/src/api/workflow.ts` + +- [ ] **Step 1: Add integration test for response model compatibility** + +In `tests/integration/interfaces/api/v1/test_generation_api.py`, add a test near strategy-preview tests: + +```python +def test_strategy_preview_returns_chapter_contract_and_showing_scene_fields(self, client, mock_workflow): + async def strategy_with_showing_fields(*args, **kwargs): + return { + "chapter_contract": { + "chapter_question": "灰卡为什么能刷开门禁?", + "protagonist_want": "白雨翔要确认写卡器来源。", + "opposition": "许照只给半份证据。", + "reader_expectation": "看到两人互相试探。", + "required_information_change": "签收记录暴露伪造痕迹。", + "required_relationship_change": "两人形成有限合作。", + "ending_question": "谁借用了审计流程?", + "show_dont_tell_rules": ["不能直写怀疑,只写扣住证物。"], + }, + "dramatic_task": { + "goal": "确认写卡器来源", + "obstacle": "许照保留证据", + "reader_expectation": "看到试探", + "ending_hook": "审计流程异常", + }, + "scene_plan": [ + { + "label": "核对签收单", + "task": "确认签名真伪", + "resistance": "许照不交原件", + "info_shift": "签名疑点出现", + "relationship_shift": "有限合作", + "anchor": "证物袋", + "visible_action": "白雨翔按住证物袋封口。", + "subtext_dialogue": "表面问流程,实际逼许照露底。", + "unspoken_emotion": "怀疑不能直说。", + "object_or_clue_change": "灰卡变成伪造链条证据。", + "hook": "审计流程异常", + "target_words": 800, + } + ], + "writing_focus": ["少解释,多展示。"], + } + + mock_workflow.generate_chapter_strategy = strategy_with_showing_fields + response = client.post( + "/api/v1/novels/novel-1/chapters/2/strategy-preview", + json={"outline": "白雨翔追查灰卡。"}, + ) + + assert response.status_code == 200 + data = response.json() + assert data["chapter_contract"]["chapter_question"].startswith("灰卡") + assert data["scene_plan"][0]["visible_action"].startswith("白雨翔") +``` + +- [ ] **Step 2: Run test and verify failure** + +Run: + +```bash +uv run pytest -q tests/integration/interfaces/api/v1/test_generation_api.py -k "strategy_preview_returns_chapter_contract" +``` + +Expected: fails until response models include new fields. + +- [ ] **Step 3: Add Pydantic response models** + +In `interfaces/api/v1/engine/generation.py`, add: + +```python +class ChapterContractResponse(BaseModel): + chapter_question: str + protagonist_want: str + opposition: str + reader_expectation: str + required_information_change: str + required_relationship_change: str + ending_question: str + show_dont_tell_rules: List[str] +``` + +Extend `ChapterStrategySceneResponse`: + +```python + visible_action: str = "" + subtext_dialogue: str = "" + unspoken_emotion: str = "" + object_or_clue_change: str = "" +``` + +Extend `ChapterStrategyPreviewResponse`: + +```python + chapter_contract: ChapterContractResponse +``` + +- [ ] **Step 4: Update frontend DTOs** + +In `frontend/src/api/workflow.ts`, add: + +```ts +export interface ChapterContractDTO { + chapter_question: string + protagonist_want: string + opposition: string + reader_expectation: string + required_information_change: string + required_relationship_change: string + ending_question: string + show_dont_tell_rules: string[] +} +``` + +Extend `ChapterStrategySceneDTO`: + +```ts + visible_action?: string + subtext_dialogue?: string + unspoken_emotion?: string + object_or_clue_change?: string +``` + +Extend `ChapterStrategyPreviewDTO`: + +```ts + chapter_contract: ChapterContractDTO +``` + +- [ ] **Step 5: Run integration test** + +Run: + +```bash +uv run pytest -q tests/integration/interfaces/api/v1/test_generation_api.py -k "strategy_preview_returns_chapter_contract" +``` + +Expected: PASS. + +- [ ] **Step 6: Run frontend build** + +Run: + +```bash +cd frontend && npm run build +``` + +Expected: build exits 0. + +- [ ] **Step 7: Commit** + +```bash +git add interfaces/api/v1/engine/generation.py frontend/src/api/workflow.ts tests/integration/interfaces/api/v1/test_generation_api.py +git commit -m "feat: expose story studio strategy contract" +``` + +--- + +## Task 6: Workbench displays chapter contract and showing score + +**Files:** + +- Modify: `frontend/src/components/workbench/WorkArea.vue` + +- [ ] **Step 1: Add contract preview in strategy panel** + +Find where `chapterStrategy` is displayed in the generation modal. Add this block above scene-plan display: + +```vue + + + 本章问题:{{ chapterStrategy.chapter_contract.chapter_question }} + 主角想要:{{ chapterStrategy.chapter_contract.protagonist_want }} + 阻力来源:{{ chapterStrategy.chapter_contract.opposition }} + 信息变化:{{ chapterStrategy.chapter_contract.required_information_change }} + 章末追问:{{ chapterStrategy.chapter_contract.ending_question }} + + 展示优先 + + - {{ rule }} + + + + +``` + +- [ ] **Step 2: Extend scene display** + +Where scene plan items are rendered, add these labels if present: + +```vue +动作:{{ scene.visible_action }} +潜台词:{{ scene.subtext_dialogue }} +不直说:{{ scene.unspoken_emotion }} +线索/道具:{{ scene.object_or_clue_change }} +``` + +- [ ] **Step 3: Add score label fallback** + +If `editorialScoreLabel` uses an object map, add: + +```ts +showing: '展示', +``` + +If it uses a switch, add: + +```ts +case 'showing': + return '展示' +``` + +- [ ] **Step 4: Build frontend** + +Run: + +```bash +cd frontend && npm run build +``` + +Expected: build exits 0. + +- [ ] **Step 5: Commit** + +```bash +git add frontend/src/components/workbench/WorkArea.vue +git commit -m "feat: show story studio contract in workbench" +``` + +--- + +## Task 7: Focused regression and smoke test + +**Files:** + +- No source changes unless failures identify a narrow bug. + +- [ ] **Step 1: Run focused backend tests** + +Run: + +```bash +uv run pytest -q tests/unit/application/workflows/test_auto_novel_generation_workflow.py -k "chapter_contract or strategy_prompt_requests or show_dont_tell_contract or showing_fields or showing_score" +``` + +Expected: all selected tests pass. + +- [ ] **Step 2: Run focused API tests** + +Run: + +```bash +uv run pytest -q tests/integration/interfaces/api/v1/test_generation_api.py -k "strategy_preview_returns_chapter_contract or editorial" +``` + +Expected: selected tests pass. + +- [ ] **Step 3: Run frontend build** + +Run: + +```bash +cd frontend && npm run build +``` + +Expected: build exits 0. + +- [ ] **Step 4: Restart local backend** + +Run: + +```bash +pkill -f "uvicorn interfaces.main:app" || true +nohup .venv/bin/python -m uvicorn interfaces.main:app --host 127.0.0.1 --port 39101 > /tmp/pp39101.log 2>&1 & +sleep 3 +curl -sS -o /dev/null -w '%{http_code}\n' http://127.0.0.1:39101/openapi.json +``` + +Expected: prints `200`. + +- [ ] **Step 5: Smoke strategy preview API** + +Run: + +```bash +curl -sS -X POST http://127.0.0.1:39101/api/v1/novels/test-novel-web-writing-157db1b8/chapters/2/strategy-preview \ + -H 'Content-Type: application/json' \ + -d '{"outline":"白雨翔追查灰卡,许照拿出矛盾证据。","target_word_count":900,"word_tolerance_percent":8}' \ + | python -m json.tool | sed -n '1,120p' +``` + +Expected: response contains `chapter_contract`, `show_dont_tell_rules`, and scene fields such as `visible_action`. + +- [ ] **Step 6: Commit test/documentation adjustment if needed** + +If smoke test reveals no code change, skip commit. If a narrow bug was fixed: + +```bash +git add +git commit -m "fix: stabilize story studio phase one smoke" +``` + +--- + +## Self-Review + +Spec coverage: + +- Chapter contract: Task 1, Task 2, Task 5, Task 6. +- Scene package show fields: Task 1, Task 2, Task 3, Task 5, Task 6. +- Prompt injection: Task 2 and Task 3. +- Scene budget execution: Task 3 extends existing budget overlays. +- Editorial `showing` score: Task 4 and Task 6. +- No new database tables: preserved by file map. +- Candidate competition and reroll: intentionally excluded from phase one. + +Placeholder scan: + +- No reserved empty-slot wording is used in actionable steps. +- Every code-changing task includes concrete snippets and exact commands. + +Type consistency: + +- Backend uses `chapter_contract`, `show_dont_tell_rules`, `visible_action`, `subtext_dialogue`, `unspoken_emotion`, `object_or_clue_change`, and `showing`. +- Frontend DTOs use the same names. diff --git a/docs/superpowers/specs/2026-05-02-story-studio-pipeline-design.md b/docs/superpowers/specs/2026-05-02-story-studio-pipeline-design.md new file mode 100644 index 000000000..d7045e218 --- /dev/null +++ b/docs/superpowers/specs/2026-05-02-story-studio-pipeline-design.md @@ -0,0 +1,355 @@ +# Story Studio Pipeline 设计:展示优先的小说生成流水线 + +## 背景 + +当前 PP 的小说生成已经具备章节策略、场景预算、风格手法库、主编审稿、长稿母本和字数收束器,但真实链路验证显示:单纯“去 AI 味提示词”只能带来有限改善。更稳定的方向不是要求模型“写得像人”,而是把“好看”拆成可执行、可检查、可回炉的生产流程。 + +本设计目标是把章节生成从“一口气写一章”升级为“章节制片流水线”:先定章节合同,再拆场景包,再生成候选、筛选、装配、审稿和局部回炉。 + +## 核心原则:展示优先写作协议 + +展示优先协议是本方案的硬约束,不是附加提示词。 + +- 少解释,多展示。 +- 少总结,多动作和细节。 +- 少金句,多具体反应。 +- 不直接写“复杂情绪”,改写人物做了什么、停顿了什么、避开了什么。 +- 对话不要每句都完整、礼貌、逻辑闭环。 +- 人物不能都像同一个聪明客服,必须有各自的信息盲区、说话习惯和不合作方式。 + +该协议会进入三个位置: + +1. 章节合同:明确本章哪些情绪、信息和动机不能直说。 +2. 场景包:给每个场景分配可见动作、潜台词对白、未说出口的情绪。 +3. 主编审稿与回炉:对解释、总结、情绪命名、客服式对白做扣分和局部修正。 + +## 目标 + +1. 提升章节可读性:冲突更清楚,人物更有差异,读者更愿意追下一章。 +2. 控制字数:目标字数默认控制在 ±8%。 +3. 避免硬截断:结尾必须以自然钩子收束。 +4. 减少 AI 味的可感知来源:解释句、总结句、抽象情绪、礼貌闭环对白、段尾升华句。 +5. 保留现有 PP 能力,不重做架构,不破坏常规生成按钮。 + +## 非目标 + +- 不继续以 AI 检测器分数作为主目标。 +- 不把每章都交给昂贵模型多轮整章重写。 +- 不让局部问题触发整章重写。 +- 不引入全新的小说数据库结构,优先复用现有章节策略、风格库、候选稿、主编审稿和工作流。 + +## 总体流程 + +```mermaid +flowchart TD + A["章节输入:大纲 / Bible / 线索 / 道具 / 风格档案"] --> B["章节合同"] + B --> C["场景包规划"] + C --> D["场景候选生成"] + D --> E["候选选择器"] + E --> F["章节装配器"] + F --> G["字数仲裁器 + 章尾收束器"] + G --> H["主编审稿"] + H --> I{"达标?"} + I -->|"是"| J["输出正文 / 候选稿"] + I -->|"否"| K["局部回炉最差场景"] + K --> F +``` + +## 组件设计 + +### 1. Chapter Contract:章节合同 + +章节合同是正文生成前的硬约束对象。它不写正文,只回答本章要完成什么。 + +字段: + +- `chapter_question`:本章读者想知道的问题。 +- `protagonist_want`:主角当前想拿到什么、确认什么或避免什么。 +- `opposition`:谁或什么阻碍主角。 +- `reader_expectation`:读者期待看到的具体场面。 +- `required_information_change`:本章必须新增或改变的信息。 +- `required_relationship_change`:本章必须发生的人物关系变化。 +- `ending_question`:章末留下的追问。 +- `show_dont_tell_rules`:本章禁止直说的情绪、动机和解释。 + +示例: + +```json +{ + "chapter_question": "灰卡的写入设备是否还在系统内部?", + "protagonist_want": "白雨翔想证明 774 写卡器未被正常销毁。", + "opposition": "许照保留关键证据,督导科流程也在误导他。", + "required_information_change": "签收记录从嫌疑证据变成伪造证据。", + "required_relationship_change": "白雨翔和许照从对抗变成有限合作。", + "ending_question": "操盘者是否借用了内部审计流程?", + "show_dont_tell_rules": [ + "不能写白雨翔感到怀疑,只能写他追问、停顿、扣住证物不交。", + "不能解释许照开始信任白雨翔,只能写他让出半步、交出一份次级证据。", + "对白不能每句完整回答,允许打断、反问、避重就轻。" + ] +} +``` + +### 2. Scene Package:场景包 + +每章拆成 3-5 个场景包。场景包是字数和可读性的基本单位。 + +字段: + +- `label`:场景名。 +- `target_words`:本场景预算。 +- `goal`:角色在此场景要达成什么。 +- `resistance`:阻力。 +- `information_shift`:信息如何变化。 +- `relationship_shift`:人物关系如何变化。 +- `visible_action`:必须出现的具体动作。 +- `subtext_dialogue`:对白表面内容和真实意图。 +- `unspoken_emotion`:不能直说的情绪。 +- `object_or_clue_change`:道具或线索状态变化。 +- `scene_hook`:场景末的小钩子。 + +场景包规划必须让 `target_words` 总和接近章节目标字数。最后一个场景包由字数仲裁器动态调整预算,避免最后硬裁。 + +### 3. Candidate Generator:场景候选生成 + +每个场景包默认生成 1 个候选。关键章或用户开启“质量优先”时生成 2 个候选。 + +候选生成只负责写当前场景,不负责整章。输入包括: + +- 章节合同 +- 当前场景包 +- 已选前文尾段 +- Bible / CoC 正典 / 线索 / 道具账本 +- 风格档案与展示优先协议 + +候选生成的硬要求: + +- 场景必须发生具体行动。 +- 对话必须存在潜台词或信息不对称。 +- 不允许用总结句跳过冲突过程。 +- 不允许把未说出口的情绪命名出来。 +- 不允许段尾金句式升华。 + +### 4. Candidate Selector:候选选择器 + +候选选择器只做判断,不重写正文。可使用分析模型,默认 DS。 + +评分维度: + +- 冲突是否可见。 +- 信息是否发生变化。 +- 人物关系是否发生变化。 +- 对白是否有潜台词。 +- 动作是否承载情绪。 +- 是否出现解释句、总结句、抽象情绪、客服式对白。 +- 是否符合场景字数预算。 + +输出: + +- `selected_candidate_id` +- `score` +- `reasons` +- `rewrite_flags` + +### 5. Chapter Assembler:章节装配器 + +章节装配器把选中的场景候选拼为一章,并负责过渡。 + +装配规则: + +- 不重写已选正文的大段内容。 +- 只补必要的场景过渡句。 +- 检查人物、线索、道具、认知边界是否连续。 +- 记录每个场景的最终字数和预算偏差。 + +如果拼接后超预算,优先压缩解释句、重复环境和段尾总结,不压缩冲突动作与关键对白。 + +### 6. Word Budget Arbiter:字数仲裁器 + +字数仲裁器不在最后硬截断,而是在三个阶段介入: + +1. 场景包规划阶段:分配预算。 +2. 场景候选阶段:控制单场景输出。 +3. 章节装配阶段:动态调整最后一个场景和结尾收束长度。 + +规则: + +- 默认章节容差 ±8%。 +- 低于下限:优先补动作链、反应、阻力升级、信息确认过程。 +- 高于上限:优先删解释、总结、重复环境、抽象情绪。 +- 接近上限:只允许自然收束,不新增设定。 + +### 7. Ending Closer:章尾收束器 + +章尾收束器只处理最后 200-400 字。 + +目标: + +- 保留追读钩子。 +- 不新增大设定。 +- 不用“他知道事情才刚刚开始”这类模板句。 +- 让最后一句来自一个具体动作、物件、声音、短信、脚步、门、证据变化或对话断点。 + +### 8. Editorial Review:主编审稿 + +审稿继续使用现有维度,但新增展示优先检查。 + +评分: + +- 开篇 +- 冲突 +- 人物 +- 对白 +- 追读 +- 节奏 +- 展示优先 + +展示优先扣分项: + +- 解释句过密。 +- 总结句替代场景。 +- 直接命名情绪。 +- 对白完整礼貌且闭环。 +- 人物声音趋同。 +- 段尾升华或金句。 + +### 9. Local Reroll:局部回炉 + +如果审稿不达标,不整章重写,只回炉最差的 1-2 个场景包。 + +回炉输入: + +- 原场景包 +- 原候选正文 +- 审稿问题 +- 章节合同 +- 前后场景边界 + +回炉动作: + +- 把解释改成动作。 +- 把情绪词改成身体反应。 +- 把完整对白打碎。 +- 增加不合作、停顿、误解、抢话、答非所问。 +- 保持剧情事实、线索状态和道具状态不变。 + +## 与现有系统的集成 + +### 后端 + +优先扩展 `AutoNovelGenerationWorkflow`,不新建并行大工作流。 + +现有能力复用: + +- `generate_chapter_strategy`:升级为章节合同 + 场景包规划。 +- `_resolve_scene_budget_plan`:继续做场景预算基础。 +- `_build_prompt`:注入展示优先协议和场景包。 +- `_enforce_chapter_word_target`:保留为兜底,但减少硬截断依赖。 +- `review_generated_chapter_editorially`:增加展示优先评分。 +- 候选稿 API:承载场景候选和局部回炉结果。 + +### 前端 + +默认生成按钮保留。新增设置尽量少: + +- 默认模式:章节制片流水线。 +- 质量优先:每场景 2 候选,成本更高。 +- 长稿母本:继续灰度开关。 + +工作台展示: + +- 章节合同预览。 +- 场景包列表。 +- 每个场景的预算/实际字数。 +- 审稿结果和局部回炉按钮。 + +### 模型路由 + +- 写作正文:跟随后台当前写作模型配置。 +- 合同、场景包、候选选择、审稿:默认分析模型 DS。 +- 质量优先候选生成:仍使用写作模型,但只在用户开启时增加调用次数。 + +## 错误处理 + +- 章节合同 JSON 解析失败:回退为保守合同模板,并记录 warning。 +- 场景包总预算偏差超过 15%:重新归一化预算,不重新调用模型。 +- 单场景候选为空:重试一次;仍失败则回退为单候选生成。 +- 候选选择器失败:选择字数最接近且禁忌项最少的候选。 +- 装配后超预算:运行字数仲裁器;仍超出时提示用户生成候选稿,不自动覆盖正文。 +- CoC 正典或认知边界阻断:保留现有硬阻断,不进入正文生成。 + +## 测试方案 + +### 单元测试 + +- 章节合同包含展示优先规则。 +- 场景包预算总和接近目标字数。 +- 候选选择器能扣除解释句、总结句和情绪命名。 +- 局部回炉不会改变线索和道具状态。 +- 章尾收束器不会新增设定。 + +### 集成测试 + +- `generate-chapter-stream` 默认可返回章节合同、场景包和 done。 +- 质量优先模式会生成多候选并选择一版。 +- 长稿母本模式继续返回 `long_draft_mode` 和拆章数。 +- 主编审稿返回 `展示优先` 分数。 + +### 人工抽检 + +使用同一大纲生成 3 组: + +- 旧默认链路。 +- 章节制片流水线。 +- 质量优先流水线。 + +对比指标: + +- 字数命中:目标 ±8%。 +- 追读分:>= 90。 +- 主编总均分:>= 88。 +- 展示优先分:>= 85。 +- 人工读感:对白是否像不同人、是否少解释多动作。 + +## 分阶段落地 + +### 阶段一:最小可用流水线 + +- 章节合同。 +- 场景包规划。 +- 展示优先协议注入。 +- 场景预算 + 章尾收束。 +- 主编审稿增加展示优先评分。 + +### 阶段二:候选选择 + +- 每场景 2 候选。 +- 候选选择器。 +- 质量优先开关。 +- 候选评分记录。 + +### 阶段三:局部回炉 + +- 根据主编审稿定位最差场景。 +- 只回炉局部。 +- 装配后重新审稿。 + +### 阶段四:长稿母本增强 + +- 长稿按场景峰值拆章。 +- 每章重写结尾钩子。 +- 保留下一章前摄设定。 + +## 验收标准 + +- 目标字数 ±8% 命中率 >= 90%。 +- 主编审稿平均分 >= 88。 +- 追读分 >= 90。 +- 展示优先分 >= 85。 +- 章尾突兀人工判定 <= 10%。 +- 人工抽检中“人物像同一个声音”的比例明显下降。 + +## 结论 + +本方案不再把问题定义为“降低 AI 味”,而是定义为“让正文通过可见动作、具体细节、潜台词对白和人物差异来推进”。它保留现有 PP 能力,并把章节策略、场景预算、风格库、主编审稿和候选稿系统串成一条可验证的生成流水线。 + diff --git a/domain/novel/entities/novel.py b/domain/novel/entities/novel.py index 6c0187709..5715e5fd6 100644 --- a/domain/novel/entities/novel.py +++ b/domain/novel/entities/novel.py @@ -60,6 +60,8 @@ def __init__( last_audit_issues: Optional[List[Dict[str, str]]] = None, # 目标字数控制 target_words_per_chapter: int = 2500, + # 审计进度指示 + audit_progress: Optional[str] = None, ): super().__init__(id.value) self.novel_id = id @@ -98,6 +100,8 @@ def __init__( self.last_audit_issues = last_audit_issues or [] # 目标字数控制 self.target_words_per_chapter = target_words_per_chapter + # 审计进度指示 + self.audit_progress = audit_progress def add_chapter(self, chapter: Chapter) -> None: """添加章节(必须连续)""" diff --git a/domain/style_bible/__init__.py b/domain/style_bible/__init__.py new file mode 100644 index 000000000..53180b7ae --- /dev/null +++ b/domain/style_bible/__init__.py @@ -0,0 +1,2 @@ +"""Style Bible domain package.""" + diff --git a/domain/style_bible/entities.py b/domain/style_bible/entities.py new file mode 100644 index 000000000..26c478235 --- /dev/null +++ b/domain/style_bible/entities.py @@ -0,0 +1,221 @@ +"""写作手法知识库领域模型。""" +from __future__ import annotations + +import hashlib +from dataclasses import dataclass, field +from datetime import datetime, timezone +from typing import Any, Iterable, Optional +from uuid import uuid4 + + +def _now() -> datetime: + return datetime.now(timezone.utc) + + +def _clean_text(value: Optional[str]) -> str: + return (value or "").strip() + + +def _clean_dict(value: Any) -> dict[str, Any]: + return dict(value) if isinstance(value, dict) else {} + + +def _clean_list(values: Optional[Iterable[Any] | str]) -> list[Any]: + if not values: + return [] + if isinstance(values, str): + text = _clean_text(values) + return [text] if text else [] + result: list[Any] = [] + for value in values: + if isinstance(value, str): + item = _clean_text(value) + else: + item = value + if item and item not in result: + result.append(item) + return result + + +def _content_hash(content: str) -> str: + return hashlib.sha256(content.encode("utf-8")).hexdigest() + + +@dataclass +class StyleSample: + """用户提供的参考样本文本。""" + + title: str + content: str + source_type: str = "reference" + genre: str = "" + scene_type: str = "" + pov: str = "" + allowed_for_generation: bool = False + novel_id: str = "" + profile_id: str = "" + content_hash: str = "" + char_count: int = 0 + id: str = field(default_factory=lambda: f"style-sample-{uuid4().hex}") + created_at: datetime = field(default_factory=_now) + updated_at: datetime = field(default_factory=_now) + + def __post_init__(self) -> None: + self.id = _clean_text(self.id) or f"style-sample-{uuid4().hex}" + self.title = _clean_text(self.title) + self.content = _clean_text(self.content) + if not self.title: + raise ValueError("Style sample title cannot be empty") + if not self.content: + raise ValueError("Style sample content cannot be empty") + self.source_type = _clean_text(self.source_type) or "reference" + self.genre = _clean_text(self.genre) + self.scene_type = _clean_text(self.scene_type) + self.pov = _clean_text(self.pov) + self.novel_id = _clean_text(self.novel_id) + self.profile_id = _clean_text(self.profile_id) + self.allowed_for_generation = bool(self.allowed_for_generation) + self.char_count = len(self.content) + self.content_hash = _clean_text(self.content_hash) or _content_hash(self.content) + + +@dataclass +class StyleSampleChunk: + """样本文本切片。""" + + sample_id: str + chunk_type: str + sequence: int + content: str + chapter_number: int = 0 + title: str = "" + char_count: int = 0 + metrics: dict[str, Any] = field(default_factory=dict) + id: str = field(default_factory=lambda: f"style-chunk-{uuid4().hex}") + created_at: datetime = field(default_factory=_now) + + def __post_init__(self) -> None: + self.id = _clean_text(self.id) or f"style-chunk-{uuid4().hex}" + self.sample_id = _clean_text(self.sample_id) + self.chunk_type = _clean_text(self.chunk_type) + self.title = _clean_text(self.title) + self.content = _clean_text(self.content) + if not self.sample_id: + raise ValueError("Style sample chunk sample_id cannot be empty") + if self.chunk_type not in {"chapter", "scene", "paragraph"}: + raise ValueError(f"Invalid style sample chunk type: {self.chunk_type}") + if not self.content: + raise ValueError("Style sample chunk content cannot be empty") + self.sequence = max(0, int(self.sequence or 0)) + self.chapter_number = max(0, int(self.chapter_number or 0)) + self.char_count = len(self.content) + self.metrics = _clean_dict(self.metrics) + + +@dataclass +class StyleRule: + """可复用写作规则。""" + + title: str + instruction: str + category: str = "" + weight: float = 1.0 + + def __post_init__(self) -> None: + self.title = _clean_text(self.title) + self.instruction = _clean_text(self.instruction) + self.category = _clean_text(self.category) + if not self.title: + raise ValueError("Style rule title cannot be empty") + if not self.instruction: + raise ValueError("Style rule instruction cannot be empty") + try: + self.weight = float(self.weight) + except (TypeError, ValueError): + self.weight = 1.0 + + +@dataclass +class StyleProfile: + """可被章节生成引用的写作风格包。""" + + name: str + description: str = "" + status: str = "active" + novel_id: str = "" + profile: dict[str, Any] = field(default_factory=dict) + metrics: dict[str, Any] = field(default_factory=dict) + rules: list[Any] = field(default_factory=list) + forbidden_patterns: list[str] = field(default_factory=list) + version: int = 1 + id: str = field(default_factory=lambda: f"style-profile-{uuid4().hex}") + created_at: datetime = field(default_factory=_now) + updated_at: datetime = field(default_factory=_now) + + def __post_init__(self) -> None: + self.id = _clean_text(self.id) or f"style-profile-{uuid4().hex}" + self.name = _clean_text(self.name) + if not self.name: + raise ValueError("Style profile name cannot be empty") + self.description = _clean_text(self.description) + self.status = _clean_text(self.status) or "active" + if self.status not in {"active", "archived"}: + raise ValueError(f"Invalid style profile status: {self.status}") + self.novel_id = _clean_text(self.novel_id) + self.profile = _clean_dict(self.profile) + self.metrics = _clean_dict(self.metrics) + self.rules = _clean_list(self.rules) + self.forbidden_patterns = [str(v) for v in _clean_list(self.forbidden_patterns)] + self.version = max(1, int(self.version or 1)) + + +@dataclass +class StyleTechniqueCard: + """从样本中抽取的可执行写作技法卡。""" + + profile_id: str + title: str + rule_text: str + prompt_instruction: str + category: str = "" + scene_type: str = "" + example_summary: str = "" + enabled: bool = True + weight: float = 1.0 + id: str = field(default_factory=lambda: f"style-card-{uuid4().hex}") + created_at: datetime = field(default_factory=_now) + updated_at: datetime = field(default_factory=_now) + + def __post_init__(self) -> None: + self.id = _clean_text(self.id) or f"style-card-{uuid4().hex}" + self.profile_id = _clean_text(self.profile_id) + self.title = _clean_text(self.title) + self.category = _clean_text(self.category) + self.scene_type = _clean_text(self.scene_type) + self.rule_text = _clean_text(self.rule_text) + self.example_summary = _clean_text(self.example_summary) + self.prompt_instruction = _clean_text(self.prompt_instruction) + if not self.profile_id: + raise ValueError("Style technique card profile_id cannot be empty") + if not self.title: + raise ValueError("Style technique card title cannot be empty") + if not self.rule_text: + raise ValueError("Style technique card rule_text cannot be empty") + if not self.prompt_instruction: + raise ValueError("Style technique card prompt_instruction cannot be empty") + self.enabled = bool(self.enabled) + try: + self.weight = float(self.weight) + except (TypeError, ValueError): + self.weight = 1.0 + + def disable(self) -> None: + """禁用卡片但保留历史。""" + self.enabled = False + self.updated_at = _now() + + def enable(self) -> None: + """重新启用卡片。""" + self.enabled = True + self.updated_at = _now() + diff --git a/domain/style_bible/repositories.py b/domain/style_bible/repositories.py new file mode 100644 index 000000000..db0a8aa08 --- /dev/null +++ b/domain/style_bible/repositories.py @@ -0,0 +1,76 @@ +"""写作手法知识库仓储接口。""" +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Optional + +from domain.style_bible.entities import ( + StyleProfile, + StyleSample, + StyleSampleChunk, + StyleTechniqueCard, +) + + +class StyleBibleRepository(ABC): + """写作手法知识库仓储。""" + + @abstractmethod + def save_sample( + self, + sample: StyleSample, + chunks: list[StyleSampleChunk], + ) -> StyleSample: + pass + + @abstractmethod + def list_samples( + self, + novel_id: Optional[str] = None, + profile_id: Optional[str] = None, + ) -> list[StyleSample]: + pass + + @abstractmethod + def get_sample(self, sample_id: str) -> Optional[StyleSample]: + pass + + @abstractmethod + def save_profile(self, profile: StyleProfile) -> StyleProfile: + pass + + @abstractmethod + def list_profiles( + self, + novel_id: Optional[str] = None, + status: Optional[str] = None, + ) -> list[StyleProfile]: + pass + + @abstractmethod + def get_profile(self, profile_id: str) -> Optional[StyleProfile]: + pass + + @abstractmethod + def save_technique_cards( + self, + profile_id: str, + cards: list[StyleTechniqueCard], + ) -> list[StyleTechniqueCard]: + pass + + @abstractmethod + def list_technique_cards( + self, + profile_id: str, + enabled: Optional[bool] = None, + ) -> list[StyleTechniqueCard]: + pass + + @abstractmethod + def get_technique_card(self, card_id: str) -> Optional[StyleTechniqueCard]: + pass + + @abstractmethod + def update_technique_card(self, card: StyleTechniqueCard) -> StyleTechniqueCard: + pass diff --git a/domain/topic/__init__.py b/domain/topic/__init__.py new file mode 100644 index 000000000..3bd4c1893 --- /dev/null +++ b/domain/topic/__init__.py @@ -0,0 +1,6 @@ +"""选题立项领域模块。""" + +from domain.topic.entities import TopicIdea, TopicIdeaStatus +from domain.topic.repositories import TopicIdeaRepository + +__all__ = ["TopicIdea", "TopicIdeaStatus", "TopicIdeaRepository"] diff --git a/domain/topic/entities.py b/domain/topic/entities.py new file mode 100644 index 000000000..36632f627 --- /dev/null +++ b/domain/topic/entities.py @@ -0,0 +1,122 @@ +"""选题立项领域模型。""" +from __future__ import annotations + +from dataclasses import dataclass, field +from datetime import datetime, timezone +from enum import Enum +from typing import Any, Iterable, Optional +from uuid import uuid4 + + +class TopicIdeaStatus(str, Enum): + """选题候选状态。""" + + DRAFT = "draft" + ADOPTED = "adopted" + ARCHIVED = "archived" + + +def _now() -> datetime: + return datetime.now(timezone.utc) + + +def _clean_text(value: Optional[str]) -> str: + return (value or "").strip() + + +def _clean_list(values: Optional[Iterable[str] | str]) -> list[str]: + if not values: + return [] + if isinstance(values, str): + text = _clean_text(values) + return [text] if text else [] + result: list[str] = [] + for value in values: + text = _clean_text(str(value)) + if text and text not in result: + result.append(text) + return result + + +def _clean_dict(value: Any) -> dict[str, Any]: + return dict(value) if isinstance(value, dict) else {} + + +def _normalize_status(status: TopicIdeaStatus | str) -> TopicIdeaStatus: + if isinstance(status, TopicIdeaStatus): + return status + try: + return TopicIdeaStatus(str(status).strip()) + except ValueError as exc: + raise ValueError(f"Invalid topic idea status: {status}") from exc + + +@dataclass +class TopicIdea: + """选题立项池候选。""" + + title: str + genre: str = "" + world_preset: str = "" + length_tier: str = "" + logline: str = "" + premise: str = "" + protagonist_hook: str = "" + core_conflict: str = "" + opening_hook: str = "" + selling_points: list[str] = field(default_factory=list) + long_term_potential: str = "" + risk_notes: list[str] = field(default_factory=list) + market_tags: list[str] = field(default_factory=list) + score: int = 0 + adopted_novel_id: Optional[str] = None + source_brief: dict[str, Any] = field(default_factory=dict) + development_notes: dict[str, Any] = field(default_factory=dict) + evaluation: dict[str, Any] = field(default_factory=dict) + status: TopicIdeaStatus | str = TopicIdeaStatus.DRAFT + id: str = field(default_factory=lambda: f"topic-{uuid4().hex}") + created_at: datetime = field(default_factory=_now) + updated_at: datetime = field(default_factory=_now) + + def __post_init__(self) -> None: + self.id = _clean_text(self.id) or f"topic-{uuid4().hex}" + self.title = _clean_text(self.title) + if not self.title: + raise ValueError("Topic idea title cannot be empty") + + self.genre = _clean_text(self.genre) + self.world_preset = _clean_text(self.world_preset) + self.length_tier = _clean_text(self.length_tier) + self.logline = _clean_text(self.logline) + self.premise = _clean_text(self.premise) + self.protagonist_hook = _clean_text(self.protagonist_hook) + self.core_conflict = _clean_text(self.core_conflict) + self.opening_hook = _clean_text(self.opening_hook) + self.selling_points = _clean_list(self.selling_points) + self.long_term_potential = _clean_text(self.long_term_potential) + self.risk_notes = _clean_list(self.risk_notes) + self.market_tags = _clean_list(self.market_tags) + self.source_brief = _clean_dict(self.source_brief) + self.development_notes = _clean_dict(self.development_notes) + self.evaluation = _clean_dict(self.evaluation) + self.status = _normalize_status(self.status) + self.adopted_novel_id = _clean_text(self.adopted_novel_id) or None + + try: + self.score = int(round(float(self.score))) + except (TypeError, ValueError): + self.score = 0 + self.score = max(0, min(100, self.score)) + + def update_status( + self, + status: TopicIdeaStatus | str, + adopted_novel_id: Optional[str] = None, + ) -> None: + """更新状态并刷新更新时间。""" + self.status = _normalize_status(status) + if adopted_novel_id is not None: + self.adopted_novel_id = _clean_text(adopted_novel_id) or None + if self.status != TopicIdeaStatus.ADOPTED: + self.adopted_novel_id = None + self.updated_at = _now() diff --git a/domain/topic/repositories.py b/domain/topic/repositories.py new file mode 100644 index 000000000..3c9a936d5 --- /dev/null +++ b/domain/topic/repositories.py @@ -0,0 +1,36 @@ +"""选题立项仓储接口。""" +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Optional + +from domain.topic.entities import TopicIdea, TopicIdeaStatus + + +class TopicIdeaRepository(ABC): + """选题候选仓储。""" + + @abstractmethod + def save(self, idea: TopicIdea) -> None: + pass + + @abstractmethod + def get_by_id(self, idea_id: str) -> Optional[TopicIdea]: + pass + + @abstractmethod + def list(self, status: TopicIdeaStatus | str | None = None) -> list[TopicIdea]: + pass + + @abstractmethod + def update_status( + self, + idea_id: str, + status: TopicIdeaStatus | str, + adopted_novel_id: Optional[str] = None, + ) -> Optional[TopicIdea]: + pass + + @abstractmethod + def update(self, idea: TopicIdea) -> TopicIdea: + pass diff --git a/frontend/src-tauri/bin/backend-sidecar.bat b/frontend/src-tauri/bin/backend-sidecar.bat new file mode 100644 index 000000000..075f501fa --- /dev/null +++ b/frontend/src-tauri/bin/backend-sidecar.bat @@ -0,0 +1,29 @@ +@echo off +:: PlotPilot Backend Sidecar +:: 由 Tauri 自动调用,不要手动运行 +:: +:: 用法: backend-sidecar.bat +:: Tauri 会传入动态分配的端口号 + +set PORT=%1 +if "%PORT%"=="" set PORT=8005 + +:: 查找 Python(优先内嵌 > venv > 系统) +set "PYTHON_EXE=" + +if exist "%~dp0..\..\tools\python_embed\python.exe" ( + set "PYTHON_EXE=%~dp0..\..\tools\python_embed\python.exe" +) else if exist "%~dp0..\.venv\Scripts\python.exe" ( + set "PYTHON_EXE=%~dp0..\.venv\Scripts\python.exe" +) else ( + where python >nul 2>&1 && set "PYTHON_EXE=python" +) + +if "%PYTHON_EXE%"=="" ( + echo [ERROR] Python not found + exit /b 1 +) + +:: 启动 uvicorn +cd /d "%~dp0..\.." +"%PYTHON_EXE%" -m uvicorn interfaces.main:app --host 127.0.0.1 --port %PORT% --log-level info diff --git a/frontend/src-tauri/build.rs b/frontend/src-tauri/build.rs new file mode 100644 index 000000000..d860e1e6a --- /dev/null +++ b/frontend/src-tauri/build.rs @@ -0,0 +1,3 @@ +fn main() { + tauri_build::build() +} diff --git a/frontend/src-tauri/capabilities/default.json b/frontend/src-tauri/capabilities/default.json new file mode 100644 index 000000000..899da7e6d --- /dev/null +++ b/frontend/src-tauri/capabilities/default.json @@ -0,0 +1 @@ +{"identifier": "default", "description": "Default capability for PlotPilot", "windows": ["main"], "permissions": ["core:default", "shell:allow-spawn", "shell:allow-execute", "shell:allow-kill", "shell:allow-open", "shell:allow-stdin-write", "core:window:allow-set-title", "core:window:allow-close", "core:window:allow-minimize", "core:window:allow-maximize", "core:window:allow-start-dragging", "core:window:allow-is-maximized", "core:window:allow-is-visible", "core:window:allow-inner-position", "core:window:allow-inner-size", "core:window:allow-outer-position", "core:window:allow-outer-size", "core:window:allow-is-fullscreen", "core:window:allow-set-fullscreen", "core:window:allow-set-focus", "core:webview:allow-print"]} \ No newline at end of file diff --git a/frontend/src-tauri/gen/schemas/acl-manifests.json b/frontend/src-tauri/gen/schemas/acl-manifests.json new file mode 100644 index 000000000..86cdb1f5f --- /dev/null +++ b/frontend/src-tauri/gen/schemas/acl-manifests.json @@ -0,0 +1 @@ +{"core":{"default_permission":{"identifier":"default","description":"Default core plugins set.","permissions":["core:path:default","core:event:default","core:window:default","core:webview:default","core:app:default","core:image:default","core:resources:default","core:menu:default","core:tray:default"]},"permissions":{},"permission_sets":{},"global_scope_schema":null},"core:app":{"default_permission":{"identifier":"default","description":"Default permissions for the plugin.","permissions":["allow-version","allow-name","allow-tauri-version","allow-identifier","allow-bundle-type","allow-register-listener","allow-remove-listener"]},"permissions":{"allow-app-hide":{"identifier":"allow-app-hide","description":"Enables the app_hide command without any pre-configured scope.","commands":{"allow":["app_hide"],"deny":[]}},"allow-app-show":{"identifier":"allow-app-show","description":"Enables the app_show command without any pre-configured scope.","commands":{"allow":["app_show"],"deny":[]}},"allow-bundle-type":{"identifier":"allow-bundle-type","description":"Enables the bundle_type command without any pre-configured scope.","commands":{"allow":["bundle_type"],"deny":[]}},"allow-default-window-icon":{"identifier":"allow-default-window-icon","description":"Enables the default_window_icon command without any pre-configured scope.","commands":{"allow":["default_window_icon"],"deny":[]}},"allow-fetch-data-store-identifiers":{"identifier":"allow-fetch-data-store-identifiers","description":"Enables the fetch_data_store_identifiers command without any pre-configured scope.","commands":{"allow":["fetch_data_store_identifiers"],"deny":[]}},"allow-identifier":{"identifier":"allow-identifier","description":"Enables the identifier command without any pre-configured scope.","commands":{"allow":["identifier"],"deny":[]}},"allow-name":{"identifier":"allow-name","description":"Enables the name command without any pre-configured scope.","commands":{"allow":["name"],"deny":[]}},"allow-register-listener":{"identifier":"allow-register-listener","description":"Enables the register_listener command without any pre-configured scope.","commands":{"allow":["register_listener"],"deny":[]}},"allow-remove-data-store":{"identifier":"allow-remove-data-store","description":"Enables the remove_data_store command without any pre-configured scope.","commands":{"allow":["remove_data_store"],"deny":[]}},"allow-remove-listener":{"identifier":"allow-remove-listener","description":"Enables the remove_listener command without any pre-configured scope.","commands":{"allow":["remove_listener"],"deny":[]}},"allow-set-app-theme":{"identifier":"allow-set-app-theme","description":"Enables the set_app_theme command without any pre-configured scope.","commands":{"allow":["set_app_theme"],"deny":[]}},"allow-set-dock-visibility":{"identifier":"allow-set-dock-visibility","description":"Enables the set_dock_visibility command without any pre-configured scope.","commands":{"allow":["set_dock_visibility"],"deny":[]}},"allow-tauri-version":{"identifier":"allow-tauri-version","description":"Enables the tauri_version command without any pre-configured scope.","commands":{"allow":["tauri_version"],"deny":[]}},"allow-version":{"identifier":"allow-version","description":"Enables the version command without any pre-configured scope.","commands":{"allow":["version"],"deny":[]}},"deny-app-hide":{"identifier":"deny-app-hide","description":"Denies the app_hide command without any pre-configured scope.","commands":{"allow":[],"deny":["app_hide"]}},"deny-app-show":{"identifier":"deny-app-show","description":"Denies the app_show command without any pre-configured scope.","commands":{"allow":[],"deny":["app_show"]}},"deny-bundle-type":{"identifier":"deny-bundle-type","description":"Denies the bundle_type command without any pre-configured scope.","commands":{"allow":[],"deny":["bundle_type"]}},"deny-default-window-icon":{"identifier":"deny-default-window-icon","description":"Denies the default_window_icon command without any pre-configured scope.","commands":{"allow":[],"deny":["default_window_icon"]}},"deny-fetch-data-store-identifiers":{"identifier":"deny-fetch-data-store-identifiers","description":"Denies the fetch_data_store_identifiers command without any pre-configured scope.","commands":{"allow":[],"deny":["fetch_data_store_identifiers"]}},"deny-identifier":{"identifier":"deny-identifier","description":"Denies the identifier command without any pre-configured scope.","commands":{"allow":[],"deny":["identifier"]}},"deny-name":{"identifier":"deny-name","description":"Denies the name command without any pre-configured scope.","commands":{"allow":[],"deny":["name"]}},"deny-register-listener":{"identifier":"deny-register-listener","description":"Denies the register_listener command without any pre-configured scope.","commands":{"allow":[],"deny":["register_listener"]}},"deny-remove-data-store":{"identifier":"deny-remove-data-store","description":"Denies the remove_data_store command without any pre-configured scope.","commands":{"allow":[],"deny":["remove_data_store"]}},"deny-remove-listener":{"identifier":"deny-remove-listener","description":"Denies the remove_listener command without any pre-configured scope.","commands":{"allow":[],"deny":["remove_listener"]}},"deny-set-app-theme":{"identifier":"deny-set-app-theme","description":"Denies the set_app_theme command without any pre-configured scope.","commands":{"allow":[],"deny":["set_app_theme"]}},"deny-set-dock-visibility":{"identifier":"deny-set-dock-visibility","description":"Denies the set_dock_visibility command without any pre-configured scope.","commands":{"allow":[],"deny":["set_dock_visibility"]}},"deny-tauri-version":{"identifier":"deny-tauri-version","description":"Denies the tauri_version command without any pre-configured scope.","commands":{"allow":[],"deny":["tauri_version"]}},"deny-version":{"identifier":"deny-version","description":"Denies the version command without any pre-configured scope.","commands":{"allow":[],"deny":["version"]}}},"permission_sets":{},"global_scope_schema":null},"core:event":{"default_permission":{"identifier":"default","description":"Default permissions for the plugin, which enables all commands.","permissions":["allow-listen","allow-unlisten","allow-emit","allow-emit-to"]},"permissions":{"allow-emit":{"identifier":"allow-emit","description":"Enables the emit command without any pre-configured scope.","commands":{"allow":["emit"],"deny":[]}},"allow-emit-to":{"identifier":"allow-emit-to","description":"Enables the emit_to command without any pre-configured scope.","commands":{"allow":["emit_to"],"deny":[]}},"allow-listen":{"identifier":"allow-listen","description":"Enables the listen command without any pre-configured scope.","commands":{"allow":["listen"],"deny":[]}},"allow-unlisten":{"identifier":"allow-unlisten","description":"Enables the unlisten command without any pre-configured scope.","commands":{"allow":["unlisten"],"deny":[]}},"deny-emit":{"identifier":"deny-emit","description":"Denies the emit command without any pre-configured scope.","commands":{"allow":[],"deny":["emit"]}},"deny-emit-to":{"identifier":"deny-emit-to","description":"Denies the emit_to command without any pre-configured scope.","commands":{"allow":[],"deny":["emit_to"]}},"deny-listen":{"identifier":"deny-listen","description":"Denies the listen command without any pre-configured scope.","commands":{"allow":[],"deny":["listen"]}},"deny-unlisten":{"identifier":"deny-unlisten","description":"Denies the unlisten command without any pre-configured scope.","commands":{"allow":[],"deny":["unlisten"]}}},"permission_sets":{},"global_scope_schema":null},"core:image":{"default_permission":{"identifier":"default","description":"Default permissions for the plugin, which enables all commands.","permissions":["allow-new","allow-from-bytes","allow-from-path","allow-rgba","allow-size"]},"permissions":{"allow-from-bytes":{"identifier":"allow-from-bytes","description":"Enables the from_bytes command without any pre-configured scope.","commands":{"allow":["from_bytes"],"deny":[]}},"allow-from-path":{"identifier":"allow-from-path","description":"Enables the from_path command without any pre-configured scope.","commands":{"allow":["from_path"],"deny":[]}},"allow-new":{"identifier":"allow-new","description":"Enables the new command without any pre-configured scope.","commands":{"allow":["new"],"deny":[]}},"allow-rgba":{"identifier":"allow-rgba","description":"Enables the rgba command without any pre-configured scope.","commands":{"allow":["rgba"],"deny":[]}},"allow-size":{"identifier":"allow-size","description":"Enables the size command without any pre-configured scope.","commands":{"allow":["size"],"deny":[]}},"deny-from-bytes":{"identifier":"deny-from-bytes","description":"Denies the from_bytes command without any pre-configured scope.","commands":{"allow":[],"deny":["from_bytes"]}},"deny-from-path":{"identifier":"deny-from-path","description":"Denies the from_path command without any pre-configured scope.","commands":{"allow":[],"deny":["from_path"]}},"deny-new":{"identifier":"deny-new","description":"Denies the new command without any pre-configured scope.","commands":{"allow":[],"deny":["new"]}},"deny-rgba":{"identifier":"deny-rgba","description":"Denies the rgba command without any pre-configured scope.","commands":{"allow":[],"deny":["rgba"]}},"deny-size":{"identifier":"deny-size","description":"Denies the size command without any pre-configured scope.","commands":{"allow":[],"deny":["size"]}}},"permission_sets":{},"global_scope_schema":null},"core:menu":{"default_permission":{"identifier":"default","description":"Default permissions for the plugin, which enables all commands.","permissions":["allow-new","allow-append","allow-prepend","allow-insert","allow-remove","allow-remove-at","allow-items","allow-get","allow-popup","allow-create-default","allow-set-as-app-menu","allow-set-as-window-menu","allow-text","allow-set-text","allow-is-enabled","allow-set-enabled","allow-set-accelerator","allow-set-as-windows-menu-for-nsapp","allow-set-as-help-menu-for-nsapp","allow-is-checked","allow-set-checked","allow-set-icon"]},"permissions":{"allow-append":{"identifier":"allow-append","description":"Enables the append command without any pre-configured scope.","commands":{"allow":["append"],"deny":[]}},"allow-create-default":{"identifier":"allow-create-default","description":"Enables the create_default command without any pre-configured scope.","commands":{"allow":["create_default"],"deny":[]}},"allow-get":{"identifier":"allow-get","description":"Enables the get command without any pre-configured scope.","commands":{"allow":["get"],"deny":[]}},"allow-insert":{"identifier":"allow-insert","description":"Enables the insert command without any pre-configured scope.","commands":{"allow":["insert"],"deny":[]}},"allow-is-checked":{"identifier":"allow-is-checked","description":"Enables the is_checked command without any pre-configured scope.","commands":{"allow":["is_checked"],"deny":[]}},"allow-is-enabled":{"identifier":"allow-is-enabled","description":"Enables the is_enabled command without any pre-configured scope.","commands":{"allow":["is_enabled"],"deny":[]}},"allow-items":{"identifier":"allow-items","description":"Enables the items command without any pre-configured scope.","commands":{"allow":["items"],"deny":[]}},"allow-new":{"identifier":"allow-new","description":"Enables the new command without any pre-configured scope.","commands":{"allow":["new"],"deny":[]}},"allow-popup":{"identifier":"allow-popup","description":"Enables the popup command without any pre-configured scope.","commands":{"allow":["popup"],"deny":[]}},"allow-prepend":{"identifier":"allow-prepend","description":"Enables the prepend command without any pre-configured scope.","commands":{"allow":["prepend"],"deny":[]}},"allow-remove":{"identifier":"allow-remove","description":"Enables the remove command without any pre-configured scope.","commands":{"allow":["remove"],"deny":[]}},"allow-remove-at":{"identifier":"allow-remove-at","description":"Enables the remove_at command without any pre-configured scope.","commands":{"allow":["remove_at"],"deny":[]}},"allow-set-accelerator":{"identifier":"allow-set-accelerator","description":"Enables the set_accelerator command without any pre-configured scope.","commands":{"allow":["set_accelerator"],"deny":[]}},"allow-set-as-app-menu":{"identifier":"allow-set-as-app-menu","description":"Enables the set_as_app_menu command without any pre-configured scope.","commands":{"allow":["set_as_app_menu"],"deny":[]}},"allow-set-as-help-menu-for-nsapp":{"identifier":"allow-set-as-help-menu-for-nsapp","description":"Enables the set_as_help_menu_for_nsapp command without any pre-configured scope.","commands":{"allow":["set_as_help_menu_for_nsapp"],"deny":[]}},"allow-set-as-window-menu":{"identifier":"allow-set-as-window-menu","description":"Enables the set_as_window_menu command without any pre-configured scope.","commands":{"allow":["set_as_window_menu"],"deny":[]}},"allow-set-as-windows-menu-for-nsapp":{"identifier":"allow-set-as-windows-menu-for-nsapp","description":"Enables the set_as_windows_menu_for_nsapp command without any pre-configured scope.","commands":{"allow":["set_as_windows_menu_for_nsapp"],"deny":[]}},"allow-set-checked":{"identifier":"allow-set-checked","description":"Enables the set_checked command without any pre-configured scope.","commands":{"allow":["set_checked"],"deny":[]}},"allow-set-enabled":{"identifier":"allow-set-enabled","description":"Enables the set_enabled command without any pre-configured scope.","commands":{"allow":["set_enabled"],"deny":[]}},"allow-set-icon":{"identifier":"allow-set-icon","description":"Enables the set_icon command without any pre-configured scope.","commands":{"allow":["set_icon"],"deny":[]}},"allow-set-text":{"identifier":"allow-set-text","description":"Enables the set_text command without any pre-configured scope.","commands":{"allow":["set_text"],"deny":[]}},"allow-text":{"identifier":"allow-text","description":"Enables the text command without any pre-configured scope.","commands":{"allow":["text"],"deny":[]}},"deny-append":{"identifier":"deny-append","description":"Denies the append command without any pre-configured scope.","commands":{"allow":[],"deny":["append"]}},"deny-create-default":{"identifier":"deny-create-default","description":"Denies the create_default command without any pre-configured scope.","commands":{"allow":[],"deny":["create_default"]}},"deny-get":{"identifier":"deny-get","description":"Denies the get command without any pre-configured scope.","commands":{"allow":[],"deny":["get"]}},"deny-insert":{"identifier":"deny-insert","description":"Denies the insert command without any pre-configured scope.","commands":{"allow":[],"deny":["insert"]}},"deny-is-checked":{"identifier":"deny-is-checked","description":"Denies the is_checked command without any pre-configured scope.","commands":{"allow":[],"deny":["is_checked"]}},"deny-is-enabled":{"identifier":"deny-is-enabled","description":"Denies the is_enabled command without any pre-configured scope.","commands":{"allow":[],"deny":["is_enabled"]}},"deny-items":{"identifier":"deny-items","description":"Denies the items command without any pre-configured scope.","commands":{"allow":[],"deny":["items"]}},"deny-new":{"identifier":"deny-new","description":"Denies the new command without any pre-configured scope.","commands":{"allow":[],"deny":["new"]}},"deny-popup":{"identifier":"deny-popup","description":"Denies the popup command without any pre-configured scope.","commands":{"allow":[],"deny":["popup"]}},"deny-prepend":{"identifier":"deny-prepend","description":"Denies the prepend command without any pre-configured scope.","commands":{"allow":[],"deny":["prepend"]}},"deny-remove":{"identifier":"deny-remove","description":"Denies the remove command without any pre-configured scope.","commands":{"allow":[],"deny":["remove"]}},"deny-remove-at":{"identifier":"deny-remove-at","description":"Denies the remove_at command without any pre-configured scope.","commands":{"allow":[],"deny":["remove_at"]}},"deny-set-accelerator":{"identifier":"deny-set-accelerator","description":"Denies the set_accelerator command without any pre-configured scope.","commands":{"allow":[],"deny":["set_accelerator"]}},"deny-set-as-app-menu":{"identifier":"deny-set-as-app-menu","description":"Denies the set_as_app_menu command without any pre-configured scope.","commands":{"allow":[],"deny":["set_as_app_menu"]}},"deny-set-as-help-menu-for-nsapp":{"identifier":"deny-set-as-help-menu-for-nsapp","description":"Denies the set_as_help_menu_for_nsapp command without any pre-configured scope.","commands":{"allow":[],"deny":["set_as_help_menu_for_nsapp"]}},"deny-set-as-window-menu":{"identifier":"deny-set-as-window-menu","description":"Denies the set_as_window_menu command without any pre-configured scope.","commands":{"allow":[],"deny":["set_as_window_menu"]}},"deny-set-as-windows-menu-for-nsapp":{"identifier":"deny-set-as-windows-menu-for-nsapp","description":"Denies the set_as_windows_menu_for_nsapp command without any pre-configured scope.","commands":{"allow":[],"deny":["set_as_windows_menu_for_nsapp"]}},"deny-set-checked":{"identifier":"deny-set-checked","description":"Denies the set_checked command without any pre-configured scope.","commands":{"allow":[],"deny":["set_checked"]}},"deny-set-enabled":{"identifier":"deny-set-enabled","description":"Denies the set_enabled command without any pre-configured scope.","commands":{"allow":[],"deny":["set_enabled"]}},"deny-set-icon":{"identifier":"deny-set-icon","description":"Denies the set_icon command without any pre-configured scope.","commands":{"allow":[],"deny":["set_icon"]}},"deny-set-text":{"identifier":"deny-set-text","description":"Denies the set_text command without any pre-configured scope.","commands":{"allow":[],"deny":["set_text"]}},"deny-text":{"identifier":"deny-text","description":"Denies the text command without any pre-configured scope.","commands":{"allow":[],"deny":["text"]}}},"permission_sets":{},"global_scope_schema":null},"core:path":{"default_permission":{"identifier":"default","description":"Default permissions for the plugin, which enables all commands.","permissions":["allow-resolve-directory","allow-resolve","allow-normalize","allow-join","allow-dirname","allow-extname","allow-basename","allow-is-absolute"]},"permissions":{"allow-basename":{"identifier":"allow-basename","description":"Enables the basename command without any pre-configured scope.","commands":{"allow":["basename"],"deny":[]}},"allow-dirname":{"identifier":"allow-dirname","description":"Enables the dirname command without any pre-configured scope.","commands":{"allow":["dirname"],"deny":[]}},"allow-extname":{"identifier":"allow-extname","description":"Enables the extname command without any pre-configured scope.","commands":{"allow":["extname"],"deny":[]}},"allow-is-absolute":{"identifier":"allow-is-absolute","description":"Enables the is_absolute command without any pre-configured scope.","commands":{"allow":["is_absolute"],"deny":[]}},"allow-join":{"identifier":"allow-join","description":"Enables the join command without any pre-configured scope.","commands":{"allow":["join"],"deny":[]}},"allow-normalize":{"identifier":"allow-normalize","description":"Enables the normalize command without any pre-configured scope.","commands":{"allow":["normalize"],"deny":[]}},"allow-resolve":{"identifier":"allow-resolve","description":"Enables the resolve command without any pre-configured scope.","commands":{"allow":["resolve"],"deny":[]}},"allow-resolve-directory":{"identifier":"allow-resolve-directory","description":"Enables the resolve_directory command without any pre-configured scope.","commands":{"allow":["resolve_directory"],"deny":[]}},"deny-basename":{"identifier":"deny-basename","description":"Denies the basename command without any pre-configured scope.","commands":{"allow":[],"deny":["basename"]}},"deny-dirname":{"identifier":"deny-dirname","description":"Denies the dirname command without any pre-configured scope.","commands":{"allow":[],"deny":["dirname"]}},"deny-extname":{"identifier":"deny-extname","description":"Denies the extname command without any pre-configured scope.","commands":{"allow":[],"deny":["extname"]}},"deny-is-absolute":{"identifier":"deny-is-absolute","description":"Denies the is_absolute command without any pre-configured scope.","commands":{"allow":[],"deny":["is_absolute"]}},"deny-join":{"identifier":"deny-join","description":"Denies the join command without any pre-configured scope.","commands":{"allow":[],"deny":["join"]}},"deny-normalize":{"identifier":"deny-normalize","description":"Denies the normalize command without any pre-configured scope.","commands":{"allow":[],"deny":["normalize"]}},"deny-resolve":{"identifier":"deny-resolve","description":"Denies the resolve command without any pre-configured scope.","commands":{"allow":[],"deny":["resolve"]}},"deny-resolve-directory":{"identifier":"deny-resolve-directory","description":"Denies the resolve_directory command without any pre-configured scope.","commands":{"allow":[],"deny":["resolve_directory"]}}},"permission_sets":{},"global_scope_schema":null},"core:resources":{"default_permission":{"identifier":"default","description":"Default permissions for the plugin, which enables all commands.","permissions":["allow-close"]},"permissions":{"allow-close":{"identifier":"allow-close","description":"Enables the close command without any pre-configured scope.","commands":{"allow":["close"],"deny":[]}},"deny-close":{"identifier":"deny-close","description":"Denies the close command without any pre-configured scope.","commands":{"allow":[],"deny":["close"]}}},"permission_sets":{},"global_scope_schema":null},"core:tray":{"default_permission":{"identifier":"default","description":"Default permissions for the plugin, which enables all commands.","permissions":["allow-new","allow-get-by-id","allow-remove-by-id","allow-set-icon","allow-set-menu","allow-set-tooltip","allow-set-title","allow-set-visible","allow-set-temp-dir-path","allow-set-icon-as-template","allow-set-show-menu-on-left-click"]},"permissions":{"allow-get-by-id":{"identifier":"allow-get-by-id","description":"Enables the get_by_id command without any pre-configured scope.","commands":{"allow":["get_by_id"],"deny":[]}},"allow-new":{"identifier":"allow-new","description":"Enables the new command without any pre-configured scope.","commands":{"allow":["new"],"deny":[]}},"allow-remove-by-id":{"identifier":"allow-remove-by-id","description":"Enables the remove_by_id command without any pre-configured scope.","commands":{"allow":["remove_by_id"],"deny":[]}},"allow-set-icon":{"identifier":"allow-set-icon","description":"Enables the set_icon command without any pre-configured scope.","commands":{"allow":["set_icon"],"deny":[]}},"allow-set-icon-as-template":{"identifier":"allow-set-icon-as-template","description":"Enables the set_icon_as_template command without any pre-configured scope.","commands":{"allow":["set_icon_as_template"],"deny":[]}},"allow-set-menu":{"identifier":"allow-set-menu","description":"Enables the set_menu command without any pre-configured scope.","commands":{"allow":["set_menu"],"deny":[]}},"allow-set-show-menu-on-left-click":{"identifier":"allow-set-show-menu-on-left-click","description":"Enables the set_show_menu_on_left_click command without any pre-configured scope.","commands":{"allow":["set_show_menu_on_left_click"],"deny":[]}},"allow-set-temp-dir-path":{"identifier":"allow-set-temp-dir-path","description":"Enables the set_temp_dir_path command without any pre-configured scope.","commands":{"allow":["set_temp_dir_path"],"deny":[]}},"allow-set-title":{"identifier":"allow-set-title","description":"Enables the set_title command without any pre-configured scope.","commands":{"allow":["set_title"],"deny":[]}},"allow-set-tooltip":{"identifier":"allow-set-tooltip","description":"Enables the set_tooltip command without any pre-configured scope.","commands":{"allow":["set_tooltip"],"deny":[]}},"allow-set-visible":{"identifier":"allow-set-visible","description":"Enables the set_visible command without any pre-configured scope.","commands":{"allow":["set_visible"],"deny":[]}},"deny-get-by-id":{"identifier":"deny-get-by-id","description":"Denies the get_by_id command without any pre-configured scope.","commands":{"allow":[],"deny":["get_by_id"]}},"deny-new":{"identifier":"deny-new","description":"Denies the new command without any pre-configured scope.","commands":{"allow":[],"deny":["new"]}},"deny-remove-by-id":{"identifier":"deny-remove-by-id","description":"Denies the remove_by_id command without any pre-configured scope.","commands":{"allow":[],"deny":["remove_by_id"]}},"deny-set-icon":{"identifier":"deny-set-icon","description":"Denies the set_icon command without any pre-configured scope.","commands":{"allow":[],"deny":["set_icon"]}},"deny-set-icon-as-template":{"identifier":"deny-set-icon-as-template","description":"Denies the set_icon_as_template command without any pre-configured scope.","commands":{"allow":[],"deny":["set_icon_as_template"]}},"deny-set-menu":{"identifier":"deny-set-menu","description":"Denies the set_menu command without any pre-configured scope.","commands":{"allow":[],"deny":["set_menu"]}},"deny-set-show-menu-on-left-click":{"identifier":"deny-set-show-menu-on-left-click","description":"Denies the set_show_menu_on_left_click command without any pre-configured scope.","commands":{"allow":[],"deny":["set_show_menu_on_left_click"]}},"deny-set-temp-dir-path":{"identifier":"deny-set-temp-dir-path","description":"Denies the set_temp_dir_path command without any pre-configured scope.","commands":{"allow":[],"deny":["set_temp_dir_path"]}},"deny-set-title":{"identifier":"deny-set-title","description":"Denies the set_title command without any pre-configured scope.","commands":{"allow":[],"deny":["set_title"]}},"deny-set-tooltip":{"identifier":"deny-set-tooltip","description":"Denies the set_tooltip command without any pre-configured scope.","commands":{"allow":[],"deny":["set_tooltip"]}},"deny-set-visible":{"identifier":"deny-set-visible","description":"Denies the set_visible command without any pre-configured scope.","commands":{"allow":[],"deny":["set_visible"]}}},"permission_sets":{},"global_scope_schema":null},"core:webview":{"default_permission":{"identifier":"default","description":"Default permissions for the plugin.","permissions":["allow-get-all-webviews","allow-webview-position","allow-webview-size","allow-internal-toggle-devtools"]},"permissions":{"allow-clear-all-browsing-data":{"identifier":"allow-clear-all-browsing-data","description":"Enables the clear_all_browsing_data command without any pre-configured scope.","commands":{"allow":["clear_all_browsing_data"],"deny":[]}},"allow-create-webview":{"identifier":"allow-create-webview","description":"Enables the create_webview command without any pre-configured scope.","commands":{"allow":["create_webview"],"deny":[]}},"allow-create-webview-window":{"identifier":"allow-create-webview-window","description":"Enables the create_webview_window command without any pre-configured scope.","commands":{"allow":["create_webview_window"],"deny":[]}},"allow-get-all-webviews":{"identifier":"allow-get-all-webviews","description":"Enables the get_all_webviews command without any pre-configured scope.","commands":{"allow":["get_all_webviews"],"deny":[]}},"allow-internal-toggle-devtools":{"identifier":"allow-internal-toggle-devtools","description":"Enables the internal_toggle_devtools command without any pre-configured scope.","commands":{"allow":["internal_toggle_devtools"],"deny":[]}},"allow-print":{"identifier":"allow-print","description":"Enables the print command without any pre-configured scope.","commands":{"allow":["print"],"deny":[]}},"allow-reparent":{"identifier":"allow-reparent","description":"Enables the reparent command without any pre-configured scope.","commands":{"allow":["reparent"],"deny":[]}},"allow-set-webview-auto-resize":{"identifier":"allow-set-webview-auto-resize","description":"Enables the set_webview_auto_resize command without any pre-configured scope.","commands":{"allow":["set_webview_auto_resize"],"deny":[]}},"allow-set-webview-background-color":{"identifier":"allow-set-webview-background-color","description":"Enables the set_webview_background_color command without any pre-configured scope.","commands":{"allow":["set_webview_background_color"],"deny":[]}},"allow-set-webview-focus":{"identifier":"allow-set-webview-focus","description":"Enables the set_webview_focus command without any pre-configured scope.","commands":{"allow":["set_webview_focus"],"deny":[]}},"allow-set-webview-position":{"identifier":"allow-set-webview-position","description":"Enables the set_webview_position command without any pre-configured scope.","commands":{"allow":["set_webview_position"],"deny":[]}},"allow-set-webview-size":{"identifier":"allow-set-webview-size","description":"Enables the set_webview_size command without any pre-configured scope.","commands":{"allow":["set_webview_size"],"deny":[]}},"allow-set-webview-zoom":{"identifier":"allow-set-webview-zoom","description":"Enables the set_webview_zoom command without any pre-configured scope.","commands":{"allow":["set_webview_zoom"],"deny":[]}},"allow-webview-close":{"identifier":"allow-webview-close","description":"Enables the webview_close command without any pre-configured scope.","commands":{"allow":["webview_close"],"deny":[]}},"allow-webview-hide":{"identifier":"allow-webview-hide","description":"Enables the webview_hide command without any pre-configured scope.","commands":{"allow":["webview_hide"],"deny":[]}},"allow-webview-position":{"identifier":"allow-webview-position","description":"Enables the webview_position command without any pre-configured scope.","commands":{"allow":["webview_position"],"deny":[]}},"allow-webview-show":{"identifier":"allow-webview-show","description":"Enables the webview_show command without any pre-configured scope.","commands":{"allow":["webview_show"],"deny":[]}},"allow-webview-size":{"identifier":"allow-webview-size","description":"Enables the webview_size command without any pre-configured scope.","commands":{"allow":["webview_size"],"deny":[]}},"deny-clear-all-browsing-data":{"identifier":"deny-clear-all-browsing-data","description":"Denies the clear_all_browsing_data command without any pre-configured scope.","commands":{"allow":[],"deny":["clear_all_browsing_data"]}},"deny-create-webview":{"identifier":"deny-create-webview","description":"Denies the create_webview command without any pre-configured scope.","commands":{"allow":[],"deny":["create_webview"]}},"deny-create-webview-window":{"identifier":"deny-create-webview-window","description":"Denies the create_webview_window command without any pre-configured scope.","commands":{"allow":[],"deny":["create_webview_window"]}},"deny-get-all-webviews":{"identifier":"deny-get-all-webviews","description":"Denies the get_all_webviews command without any pre-configured scope.","commands":{"allow":[],"deny":["get_all_webviews"]}},"deny-internal-toggle-devtools":{"identifier":"deny-internal-toggle-devtools","description":"Denies the internal_toggle_devtools command without any pre-configured scope.","commands":{"allow":[],"deny":["internal_toggle_devtools"]}},"deny-print":{"identifier":"deny-print","description":"Denies the print command without any pre-configured scope.","commands":{"allow":[],"deny":["print"]}},"deny-reparent":{"identifier":"deny-reparent","description":"Denies the reparent command without any pre-configured scope.","commands":{"allow":[],"deny":["reparent"]}},"deny-set-webview-auto-resize":{"identifier":"deny-set-webview-auto-resize","description":"Denies the set_webview_auto_resize command without any pre-configured scope.","commands":{"allow":[],"deny":["set_webview_auto_resize"]}},"deny-set-webview-background-color":{"identifier":"deny-set-webview-background-color","description":"Denies the set_webview_background_color command without any pre-configured scope.","commands":{"allow":[],"deny":["set_webview_background_color"]}},"deny-set-webview-focus":{"identifier":"deny-set-webview-focus","description":"Denies the set_webview_focus command without any pre-configured scope.","commands":{"allow":[],"deny":["set_webview_focus"]}},"deny-set-webview-position":{"identifier":"deny-set-webview-position","description":"Denies the set_webview_position command without any pre-configured scope.","commands":{"allow":[],"deny":["set_webview_position"]}},"deny-set-webview-size":{"identifier":"deny-set-webview-size","description":"Denies the set_webview_size command without any pre-configured scope.","commands":{"allow":[],"deny":["set_webview_size"]}},"deny-set-webview-zoom":{"identifier":"deny-set-webview-zoom","description":"Denies the set_webview_zoom command without any pre-configured scope.","commands":{"allow":[],"deny":["set_webview_zoom"]}},"deny-webview-close":{"identifier":"deny-webview-close","description":"Denies the webview_close command without any pre-configured scope.","commands":{"allow":[],"deny":["webview_close"]}},"deny-webview-hide":{"identifier":"deny-webview-hide","description":"Denies the webview_hide command without any pre-configured scope.","commands":{"allow":[],"deny":["webview_hide"]}},"deny-webview-position":{"identifier":"deny-webview-position","description":"Denies the webview_position command without any pre-configured scope.","commands":{"allow":[],"deny":["webview_position"]}},"deny-webview-show":{"identifier":"deny-webview-show","description":"Denies the webview_show command without any pre-configured scope.","commands":{"allow":[],"deny":["webview_show"]}},"deny-webview-size":{"identifier":"deny-webview-size","description":"Denies the webview_size command without any pre-configured scope.","commands":{"allow":[],"deny":["webview_size"]}}},"permission_sets":{},"global_scope_schema":null},"core:window":{"default_permission":{"identifier":"default","description":"Default permissions for the plugin.","permissions":["allow-get-all-windows","allow-scale-factor","allow-inner-position","allow-outer-position","allow-inner-size","allow-outer-size","allow-is-fullscreen","allow-is-minimized","allow-is-maximized","allow-is-focused","allow-is-decorated","allow-is-resizable","allow-is-maximizable","allow-is-minimizable","allow-is-closable","allow-is-visible","allow-is-enabled","allow-title","allow-current-monitor","allow-primary-monitor","allow-monitor-from-point","allow-available-monitors","allow-cursor-position","allow-theme","allow-is-always-on-top","allow-internal-toggle-maximize"]},"permissions":{"allow-available-monitors":{"identifier":"allow-available-monitors","description":"Enables the available_monitors command without any pre-configured scope.","commands":{"allow":["available_monitors"],"deny":[]}},"allow-center":{"identifier":"allow-center","description":"Enables the center command without any pre-configured scope.","commands":{"allow":["center"],"deny":[]}},"allow-close":{"identifier":"allow-close","description":"Enables the close command without any pre-configured scope.","commands":{"allow":["close"],"deny":[]}},"allow-create":{"identifier":"allow-create","description":"Enables the create command without any pre-configured scope.","commands":{"allow":["create"],"deny":[]}},"allow-current-monitor":{"identifier":"allow-current-monitor","description":"Enables the current_monitor command without any pre-configured scope.","commands":{"allow":["current_monitor"],"deny":[]}},"allow-cursor-position":{"identifier":"allow-cursor-position","description":"Enables the cursor_position command without any pre-configured scope.","commands":{"allow":["cursor_position"],"deny":[]}},"allow-destroy":{"identifier":"allow-destroy","description":"Enables the destroy command without any pre-configured scope.","commands":{"allow":["destroy"],"deny":[]}},"allow-get-all-windows":{"identifier":"allow-get-all-windows","description":"Enables the get_all_windows command without any pre-configured scope.","commands":{"allow":["get_all_windows"],"deny":[]}},"allow-hide":{"identifier":"allow-hide","description":"Enables the hide command without any pre-configured scope.","commands":{"allow":["hide"],"deny":[]}},"allow-inner-position":{"identifier":"allow-inner-position","description":"Enables the inner_position command without any pre-configured scope.","commands":{"allow":["inner_position"],"deny":[]}},"allow-inner-size":{"identifier":"allow-inner-size","description":"Enables the inner_size command without any pre-configured scope.","commands":{"allow":["inner_size"],"deny":[]}},"allow-internal-toggle-maximize":{"identifier":"allow-internal-toggle-maximize","description":"Enables the internal_toggle_maximize command without any pre-configured scope.","commands":{"allow":["internal_toggle_maximize"],"deny":[]}},"allow-is-always-on-top":{"identifier":"allow-is-always-on-top","description":"Enables the is_always_on_top command without any pre-configured scope.","commands":{"allow":["is_always_on_top"],"deny":[]}},"allow-is-closable":{"identifier":"allow-is-closable","description":"Enables the is_closable command without any pre-configured scope.","commands":{"allow":["is_closable"],"deny":[]}},"allow-is-decorated":{"identifier":"allow-is-decorated","description":"Enables the is_decorated command without any pre-configured scope.","commands":{"allow":["is_decorated"],"deny":[]}},"allow-is-enabled":{"identifier":"allow-is-enabled","description":"Enables the is_enabled command without any pre-configured scope.","commands":{"allow":["is_enabled"],"deny":[]}},"allow-is-focused":{"identifier":"allow-is-focused","description":"Enables the is_focused command without any pre-configured scope.","commands":{"allow":["is_focused"],"deny":[]}},"allow-is-fullscreen":{"identifier":"allow-is-fullscreen","description":"Enables the is_fullscreen command without any pre-configured scope.","commands":{"allow":["is_fullscreen"],"deny":[]}},"allow-is-maximizable":{"identifier":"allow-is-maximizable","description":"Enables the is_maximizable command without any pre-configured scope.","commands":{"allow":["is_maximizable"],"deny":[]}},"allow-is-maximized":{"identifier":"allow-is-maximized","description":"Enables the is_maximized command without any pre-configured scope.","commands":{"allow":["is_maximized"],"deny":[]}},"allow-is-minimizable":{"identifier":"allow-is-minimizable","description":"Enables the is_minimizable command without any pre-configured scope.","commands":{"allow":["is_minimizable"],"deny":[]}},"allow-is-minimized":{"identifier":"allow-is-minimized","description":"Enables the is_minimized command without any pre-configured scope.","commands":{"allow":["is_minimized"],"deny":[]}},"allow-is-resizable":{"identifier":"allow-is-resizable","description":"Enables the is_resizable command without any pre-configured scope.","commands":{"allow":["is_resizable"],"deny":[]}},"allow-is-visible":{"identifier":"allow-is-visible","description":"Enables the is_visible command without any pre-configured scope.","commands":{"allow":["is_visible"],"deny":[]}},"allow-maximize":{"identifier":"allow-maximize","description":"Enables the maximize command without any pre-configured scope.","commands":{"allow":["maximize"],"deny":[]}},"allow-minimize":{"identifier":"allow-minimize","description":"Enables the minimize command without any pre-configured scope.","commands":{"allow":["minimize"],"deny":[]}},"allow-monitor-from-point":{"identifier":"allow-monitor-from-point","description":"Enables the monitor_from_point command without any pre-configured scope.","commands":{"allow":["monitor_from_point"],"deny":[]}},"allow-outer-position":{"identifier":"allow-outer-position","description":"Enables the outer_position command without any pre-configured scope.","commands":{"allow":["outer_position"],"deny":[]}},"allow-outer-size":{"identifier":"allow-outer-size","description":"Enables the outer_size command without any pre-configured scope.","commands":{"allow":["outer_size"],"deny":[]}},"allow-primary-monitor":{"identifier":"allow-primary-monitor","description":"Enables the primary_monitor command without any pre-configured scope.","commands":{"allow":["primary_monitor"],"deny":[]}},"allow-request-user-attention":{"identifier":"allow-request-user-attention","description":"Enables the request_user_attention command without any pre-configured scope.","commands":{"allow":["request_user_attention"],"deny":[]}},"allow-scale-factor":{"identifier":"allow-scale-factor","description":"Enables the scale_factor command without any pre-configured scope.","commands":{"allow":["scale_factor"],"deny":[]}},"allow-set-always-on-bottom":{"identifier":"allow-set-always-on-bottom","description":"Enables the set_always_on_bottom command without any pre-configured scope.","commands":{"allow":["set_always_on_bottom"],"deny":[]}},"allow-set-always-on-top":{"identifier":"allow-set-always-on-top","description":"Enables the set_always_on_top command without any pre-configured scope.","commands":{"allow":["set_always_on_top"],"deny":[]}},"allow-set-background-color":{"identifier":"allow-set-background-color","description":"Enables the set_background_color command without any pre-configured scope.","commands":{"allow":["set_background_color"],"deny":[]}},"allow-set-badge-count":{"identifier":"allow-set-badge-count","description":"Enables the set_badge_count command without any pre-configured scope.","commands":{"allow":["set_badge_count"],"deny":[]}},"allow-set-badge-label":{"identifier":"allow-set-badge-label","description":"Enables the set_badge_label command without any pre-configured scope.","commands":{"allow":["set_badge_label"],"deny":[]}},"allow-set-closable":{"identifier":"allow-set-closable","description":"Enables the set_closable command without any pre-configured scope.","commands":{"allow":["set_closable"],"deny":[]}},"allow-set-content-protected":{"identifier":"allow-set-content-protected","description":"Enables the set_content_protected command without any pre-configured scope.","commands":{"allow":["set_content_protected"],"deny":[]}},"allow-set-cursor-grab":{"identifier":"allow-set-cursor-grab","description":"Enables the set_cursor_grab command without any pre-configured scope.","commands":{"allow":["set_cursor_grab"],"deny":[]}},"allow-set-cursor-icon":{"identifier":"allow-set-cursor-icon","description":"Enables the set_cursor_icon command without any pre-configured scope.","commands":{"allow":["set_cursor_icon"],"deny":[]}},"allow-set-cursor-position":{"identifier":"allow-set-cursor-position","description":"Enables the set_cursor_position command without any pre-configured scope.","commands":{"allow":["set_cursor_position"],"deny":[]}},"allow-set-cursor-visible":{"identifier":"allow-set-cursor-visible","description":"Enables the set_cursor_visible command without any pre-configured scope.","commands":{"allow":["set_cursor_visible"],"deny":[]}},"allow-set-decorations":{"identifier":"allow-set-decorations","description":"Enables the set_decorations command without any pre-configured scope.","commands":{"allow":["set_decorations"],"deny":[]}},"allow-set-effects":{"identifier":"allow-set-effects","description":"Enables the set_effects command without any pre-configured scope.","commands":{"allow":["set_effects"],"deny":[]}},"allow-set-enabled":{"identifier":"allow-set-enabled","description":"Enables the set_enabled command without any pre-configured scope.","commands":{"allow":["set_enabled"],"deny":[]}},"allow-set-focus":{"identifier":"allow-set-focus","description":"Enables the set_focus command without any pre-configured scope.","commands":{"allow":["set_focus"],"deny":[]}},"allow-set-focusable":{"identifier":"allow-set-focusable","description":"Enables the set_focusable command without any pre-configured scope.","commands":{"allow":["set_focusable"],"deny":[]}},"allow-set-fullscreen":{"identifier":"allow-set-fullscreen","description":"Enables the set_fullscreen command without any pre-configured scope.","commands":{"allow":["set_fullscreen"],"deny":[]}},"allow-set-icon":{"identifier":"allow-set-icon","description":"Enables the set_icon command without any pre-configured scope.","commands":{"allow":["set_icon"],"deny":[]}},"allow-set-ignore-cursor-events":{"identifier":"allow-set-ignore-cursor-events","description":"Enables the set_ignore_cursor_events command without any pre-configured scope.","commands":{"allow":["set_ignore_cursor_events"],"deny":[]}},"allow-set-max-size":{"identifier":"allow-set-max-size","description":"Enables the set_max_size command without any pre-configured scope.","commands":{"allow":["set_max_size"],"deny":[]}},"allow-set-maximizable":{"identifier":"allow-set-maximizable","description":"Enables the set_maximizable command without any pre-configured scope.","commands":{"allow":["set_maximizable"],"deny":[]}},"allow-set-min-size":{"identifier":"allow-set-min-size","description":"Enables the set_min_size command without any pre-configured scope.","commands":{"allow":["set_min_size"],"deny":[]}},"allow-set-minimizable":{"identifier":"allow-set-minimizable","description":"Enables the set_minimizable command without any pre-configured scope.","commands":{"allow":["set_minimizable"],"deny":[]}},"allow-set-overlay-icon":{"identifier":"allow-set-overlay-icon","description":"Enables the set_overlay_icon command without any pre-configured scope.","commands":{"allow":["set_overlay_icon"],"deny":[]}},"allow-set-position":{"identifier":"allow-set-position","description":"Enables the set_position command without any pre-configured scope.","commands":{"allow":["set_position"],"deny":[]}},"allow-set-progress-bar":{"identifier":"allow-set-progress-bar","description":"Enables the set_progress_bar command without any pre-configured scope.","commands":{"allow":["set_progress_bar"],"deny":[]}},"allow-set-resizable":{"identifier":"allow-set-resizable","description":"Enables the set_resizable command without any pre-configured scope.","commands":{"allow":["set_resizable"],"deny":[]}},"allow-set-shadow":{"identifier":"allow-set-shadow","description":"Enables the set_shadow command without any pre-configured scope.","commands":{"allow":["set_shadow"],"deny":[]}},"allow-set-simple-fullscreen":{"identifier":"allow-set-simple-fullscreen","description":"Enables the set_simple_fullscreen command without any pre-configured scope.","commands":{"allow":["set_simple_fullscreen"],"deny":[]}},"allow-set-size":{"identifier":"allow-set-size","description":"Enables the set_size command without any pre-configured scope.","commands":{"allow":["set_size"],"deny":[]}},"allow-set-size-constraints":{"identifier":"allow-set-size-constraints","description":"Enables the set_size_constraints command without any pre-configured scope.","commands":{"allow":["set_size_constraints"],"deny":[]}},"allow-set-skip-taskbar":{"identifier":"allow-set-skip-taskbar","description":"Enables the set_skip_taskbar command without any pre-configured scope.","commands":{"allow":["set_skip_taskbar"],"deny":[]}},"allow-set-theme":{"identifier":"allow-set-theme","description":"Enables the set_theme command without any pre-configured scope.","commands":{"allow":["set_theme"],"deny":[]}},"allow-set-title":{"identifier":"allow-set-title","description":"Enables the set_title command without any pre-configured scope.","commands":{"allow":["set_title"],"deny":[]}},"allow-set-title-bar-style":{"identifier":"allow-set-title-bar-style","description":"Enables the set_title_bar_style command without any pre-configured scope.","commands":{"allow":["set_title_bar_style"],"deny":[]}},"allow-set-visible-on-all-workspaces":{"identifier":"allow-set-visible-on-all-workspaces","description":"Enables the set_visible_on_all_workspaces command without any pre-configured scope.","commands":{"allow":["set_visible_on_all_workspaces"],"deny":[]}},"allow-show":{"identifier":"allow-show","description":"Enables the show command without any pre-configured scope.","commands":{"allow":["show"],"deny":[]}},"allow-start-dragging":{"identifier":"allow-start-dragging","description":"Enables the start_dragging command without any pre-configured scope.","commands":{"allow":["start_dragging"],"deny":[]}},"allow-start-resize-dragging":{"identifier":"allow-start-resize-dragging","description":"Enables the start_resize_dragging command without any pre-configured scope.","commands":{"allow":["start_resize_dragging"],"deny":[]}},"allow-theme":{"identifier":"allow-theme","description":"Enables the theme command without any pre-configured scope.","commands":{"allow":["theme"],"deny":[]}},"allow-title":{"identifier":"allow-title","description":"Enables the title command without any pre-configured scope.","commands":{"allow":["title"],"deny":[]}},"allow-toggle-maximize":{"identifier":"allow-toggle-maximize","description":"Enables the toggle_maximize command without any pre-configured scope.","commands":{"allow":["toggle_maximize"],"deny":[]}},"allow-unmaximize":{"identifier":"allow-unmaximize","description":"Enables the unmaximize command without any pre-configured scope.","commands":{"allow":["unmaximize"],"deny":[]}},"allow-unminimize":{"identifier":"allow-unminimize","description":"Enables the unminimize command without any pre-configured scope.","commands":{"allow":["unminimize"],"deny":[]}},"deny-available-monitors":{"identifier":"deny-available-monitors","description":"Denies the available_monitors command without any pre-configured scope.","commands":{"allow":[],"deny":["available_monitors"]}},"deny-center":{"identifier":"deny-center","description":"Denies the center command without any pre-configured scope.","commands":{"allow":[],"deny":["center"]}},"deny-close":{"identifier":"deny-close","description":"Denies the close command without any pre-configured scope.","commands":{"allow":[],"deny":["close"]}},"deny-create":{"identifier":"deny-create","description":"Denies the create command without any pre-configured scope.","commands":{"allow":[],"deny":["create"]}},"deny-current-monitor":{"identifier":"deny-current-monitor","description":"Denies the current_monitor command without any pre-configured scope.","commands":{"allow":[],"deny":["current_monitor"]}},"deny-cursor-position":{"identifier":"deny-cursor-position","description":"Denies the cursor_position command without any pre-configured scope.","commands":{"allow":[],"deny":["cursor_position"]}},"deny-destroy":{"identifier":"deny-destroy","description":"Denies the destroy command without any pre-configured scope.","commands":{"allow":[],"deny":["destroy"]}},"deny-get-all-windows":{"identifier":"deny-get-all-windows","description":"Denies the get_all_windows command without any pre-configured scope.","commands":{"allow":[],"deny":["get_all_windows"]}},"deny-hide":{"identifier":"deny-hide","description":"Denies the hide command without any pre-configured scope.","commands":{"allow":[],"deny":["hide"]}},"deny-inner-position":{"identifier":"deny-inner-position","description":"Denies the inner_position command without any pre-configured scope.","commands":{"allow":[],"deny":["inner_position"]}},"deny-inner-size":{"identifier":"deny-inner-size","description":"Denies the inner_size command without any pre-configured scope.","commands":{"allow":[],"deny":["inner_size"]}},"deny-internal-toggle-maximize":{"identifier":"deny-internal-toggle-maximize","description":"Denies the internal_toggle_maximize command without any pre-configured scope.","commands":{"allow":[],"deny":["internal_toggle_maximize"]}},"deny-is-always-on-top":{"identifier":"deny-is-always-on-top","description":"Denies the is_always_on_top command without any pre-configured scope.","commands":{"allow":[],"deny":["is_always_on_top"]}},"deny-is-closable":{"identifier":"deny-is-closable","description":"Denies the is_closable command without any pre-configured scope.","commands":{"allow":[],"deny":["is_closable"]}},"deny-is-decorated":{"identifier":"deny-is-decorated","description":"Denies the is_decorated command without any pre-configured scope.","commands":{"allow":[],"deny":["is_decorated"]}},"deny-is-enabled":{"identifier":"deny-is-enabled","description":"Denies the is_enabled command without any pre-configured scope.","commands":{"allow":[],"deny":["is_enabled"]}},"deny-is-focused":{"identifier":"deny-is-focused","description":"Denies the is_focused command without any pre-configured scope.","commands":{"allow":[],"deny":["is_focused"]}},"deny-is-fullscreen":{"identifier":"deny-is-fullscreen","description":"Denies the is_fullscreen command without any pre-configured scope.","commands":{"allow":[],"deny":["is_fullscreen"]}},"deny-is-maximizable":{"identifier":"deny-is-maximizable","description":"Denies the is_maximizable command without any pre-configured scope.","commands":{"allow":[],"deny":["is_maximizable"]}},"deny-is-maximized":{"identifier":"deny-is-maximized","description":"Denies the is_maximized command without any pre-configured scope.","commands":{"allow":[],"deny":["is_maximized"]}},"deny-is-minimizable":{"identifier":"deny-is-minimizable","description":"Denies the is_minimizable command without any pre-configured scope.","commands":{"allow":[],"deny":["is_minimizable"]}},"deny-is-minimized":{"identifier":"deny-is-minimized","description":"Denies the is_minimized command without any pre-configured scope.","commands":{"allow":[],"deny":["is_minimized"]}},"deny-is-resizable":{"identifier":"deny-is-resizable","description":"Denies the is_resizable command without any pre-configured scope.","commands":{"allow":[],"deny":["is_resizable"]}},"deny-is-visible":{"identifier":"deny-is-visible","description":"Denies the is_visible command without any pre-configured scope.","commands":{"allow":[],"deny":["is_visible"]}},"deny-maximize":{"identifier":"deny-maximize","description":"Denies the maximize command without any pre-configured scope.","commands":{"allow":[],"deny":["maximize"]}},"deny-minimize":{"identifier":"deny-minimize","description":"Denies the minimize command without any pre-configured scope.","commands":{"allow":[],"deny":["minimize"]}},"deny-monitor-from-point":{"identifier":"deny-monitor-from-point","description":"Denies the monitor_from_point command without any pre-configured scope.","commands":{"allow":[],"deny":["monitor_from_point"]}},"deny-outer-position":{"identifier":"deny-outer-position","description":"Denies the outer_position command without any pre-configured scope.","commands":{"allow":[],"deny":["outer_position"]}},"deny-outer-size":{"identifier":"deny-outer-size","description":"Denies the outer_size command without any pre-configured scope.","commands":{"allow":[],"deny":["outer_size"]}},"deny-primary-monitor":{"identifier":"deny-primary-monitor","description":"Denies the primary_monitor command without any pre-configured scope.","commands":{"allow":[],"deny":["primary_monitor"]}},"deny-request-user-attention":{"identifier":"deny-request-user-attention","description":"Denies the request_user_attention command without any pre-configured scope.","commands":{"allow":[],"deny":["request_user_attention"]}},"deny-scale-factor":{"identifier":"deny-scale-factor","description":"Denies the scale_factor command without any pre-configured scope.","commands":{"allow":[],"deny":["scale_factor"]}},"deny-set-always-on-bottom":{"identifier":"deny-set-always-on-bottom","description":"Denies the set_always_on_bottom command without any pre-configured scope.","commands":{"allow":[],"deny":["set_always_on_bottom"]}},"deny-set-always-on-top":{"identifier":"deny-set-always-on-top","description":"Denies the set_always_on_top command without any pre-configured scope.","commands":{"allow":[],"deny":["set_always_on_top"]}},"deny-set-background-color":{"identifier":"deny-set-background-color","description":"Denies the set_background_color command without any pre-configured scope.","commands":{"allow":[],"deny":["set_background_color"]}},"deny-set-badge-count":{"identifier":"deny-set-badge-count","description":"Denies the set_badge_count command without any pre-configured scope.","commands":{"allow":[],"deny":["set_badge_count"]}},"deny-set-badge-label":{"identifier":"deny-set-badge-label","description":"Denies the set_badge_label command without any pre-configured scope.","commands":{"allow":[],"deny":["set_badge_label"]}},"deny-set-closable":{"identifier":"deny-set-closable","description":"Denies the set_closable command without any pre-configured scope.","commands":{"allow":[],"deny":["set_closable"]}},"deny-set-content-protected":{"identifier":"deny-set-content-protected","description":"Denies the set_content_protected command without any pre-configured scope.","commands":{"allow":[],"deny":["set_content_protected"]}},"deny-set-cursor-grab":{"identifier":"deny-set-cursor-grab","description":"Denies the set_cursor_grab command without any pre-configured scope.","commands":{"allow":[],"deny":["set_cursor_grab"]}},"deny-set-cursor-icon":{"identifier":"deny-set-cursor-icon","description":"Denies the set_cursor_icon command without any pre-configured scope.","commands":{"allow":[],"deny":["set_cursor_icon"]}},"deny-set-cursor-position":{"identifier":"deny-set-cursor-position","description":"Denies the set_cursor_position command without any pre-configured scope.","commands":{"allow":[],"deny":["set_cursor_position"]}},"deny-set-cursor-visible":{"identifier":"deny-set-cursor-visible","description":"Denies the set_cursor_visible command without any pre-configured scope.","commands":{"allow":[],"deny":["set_cursor_visible"]}},"deny-set-decorations":{"identifier":"deny-set-decorations","description":"Denies the set_decorations command without any pre-configured scope.","commands":{"allow":[],"deny":["set_decorations"]}},"deny-set-effects":{"identifier":"deny-set-effects","description":"Denies the set_effects command without any pre-configured scope.","commands":{"allow":[],"deny":["set_effects"]}},"deny-set-enabled":{"identifier":"deny-set-enabled","description":"Denies the set_enabled command without any pre-configured scope.","commands":{"allow":[],"deny":["set_enabled"]}},"deny-set-focus":{"identifier":"deny-set-focus","description":"Denies the set_focus command without any pre-configured scope.","commands":{"allow":[],"deny":["set_focus"]}},"deny-set-focusable":{"identifier":"deny-set-focusable","description":"Denies the set_focusable command without any pre-configured scope.","commands":{"allow":[],"deny":["set_focusable"]}},"deny-set-fullscreen":{"identifier":"deny-set-fullscreen","description":"Denies the set_fullscreen command without any pre-configured scope.","commands":{"allow":[],"deny":["set_fullscreen"]}},"deny-set-icon":{"identifier":"deny-set-icon","description":"Denies the set_icon command without any pre-configured scope.","commands":{"allow":[],"deny":["set_icon"]}},"deny-set-ignore-cursor-events":{"identifier":"deny-set-ignore-cursor-events","description":"Denies the set_ignore_cursor_events command without any pre-configured scope.","commands":{"allow":[],"deny":["set_ignore_cursor_events"]}},"deny-set-max-size":{"identifier":"deny-set-max-size","description":"Denies the set_max_size command without any pre-configured scope.","commands":{"allow":[],"deny":["set_max_size"]}},"deny-set-maximizable":{"identifier":"deny-set-maximizable","description":"Denies the set_maximizable command without any pre-configured scope.","commands":{"allow":[],"deny":["set_maximizable"]}},"deny-set-min-size":{"identifier":"deny-set-min-size","description":"Denies the set_min_size command without any pre-configured scope.","commands":{"allow":[],"deny":["set_min_size"]}},"deny-set-minimizable":{"identifier":"deny-set-minimizable","description":"Denies the set_minimizable command without any pre-configured scope.","commands":{"allow":[],"deny":["set_minimizable"]}},"deny-set-overlay-icon":{"identifier":"deny-set-overlay-icon","description":"Denies the set_overlay_icon command without any pre-configured scope.","commands":{"allow":[],"deny":["set_overlay_icon"]}},"deny-set-position":{"identifier":"deny-set-position","description":"Denies the set_position command without any pre-configured scope.","commands":{"allow":[],"deny":["set_position"]}},"deny-set-progress-bar":{"identifier":"deny-set-progress-bar","description":"Denies the set_progress_bar command without any pre-configured scope.","commands":{"allow":[],"deny":["set_progress_bar"]}},"deny-set-resizable":{"identifier":"deny-set-resizable","description":"Denies the set_resizable command without any pre-configured scope.","commands":{"allow":[],"deny":["set_resizable"]}},"deny-set-shadow":{"identifier":"deny-set-shadow","description":"Denies the set_shadow command without any pre-configured scope.","commands":{"allow":[],"deny":["set_shadow"]}},"deny-set-simple-fullscreen":{"identifier":"deny-set-simple-fullscreen","description":"Denies the set_simple_fullscreen command without any pre-configured scope.","commands":{"allow":[],"deny":["set_simple_fullscreen"]}},"deny-set-size":{"identifier":"deny-set-size","description":"Denies the set_size command without any pre-configured scope.","commands":{"allow":[],"deny":["set_size"]}},"deny-set-size-constraints":{"identifier":"deny-set-size-constraints","description":"Denies the set_size_constraints command without any pre-configured scope.","commands":{"allow":[],"deny":["set_size_constraints"]}},"deny-set-skip-taskbar":{"identifier":"deny-set-skip-taskbar","description":"Denies the set_skip_taskbar command without any pre-configured scope.","commands":{"allow":[],"deny":["set_skip_taskbar"]}},"deny-set-theme":{"identifier":"deny-set-theme","description":"Denies the set_theme command without any pre-configured scope.","commands":{"allow":[],"deny":["set_theme"]}},"deny-set-title":{"identifier":"deny-set-title","description":"Denies the set_title command without any pre-configured scope.","commands":{"allow":[],"deny":["set_title"]}},"deny-set-title-bar-style":{"identifier":"deny-set-title-bar-style","description":"Denies the set_title_bar_style command without any pre-configured scope.","commands":{"allow":[],"deny":["set_title_bar_style"]}},"deny-set-visible-on-all-workspaces":{"identifier":"deny-set-visible-on-all-workspaces","description":"Denies the set_visible_on_all_workspaces command without any pre-configured scope.","commands":{"allow":[],"deny":["set_visible_on_all_workspaces"]}},"deny-show":{"identifier":"deny-show","description":"Denies the show command without any pre-configured scope.","commands":{"allow":[],"deny":["show"]}},"deny-start-dragging":{"identifier":"deny-start-dragging","description":"Denies the start_dragging command without any pre-configured scope.","commands":{"allow":[],"deny":["start_dragging"]}},"deny-start-resize-dragging":{"identifier":"deny-start-resize-dragging","description":"Denies the start_resize_dragging command without any pre-configured scope.","commands":{"allow":[],"deny":["start_resize_dragging"]}},"deny-theme":{"identifier":"deny-theme","description":"Denies the theme command without any pre-configured scope.","commands":{"allow":[],"deny":["theme"]}},"deny-title":{"identifier":"deny-title","description":"Denies the title command without any pre-configured scope.","commands":{"allow":[],"deny":["title"]}},"deny-toggle-maximize":{"identifier":"deny-toggle-maximize","description":"Denies the toggle_maximize command without any pre-configured scope.","commands":{"allow":[],"deny":["toggle_maximize"]}},"deny-unmaximize":{"identifier":"deny-unmaximize","description":"Denies the unmaximize command without any pre-configured scope.","commands":{"allow":[],"deny":["unmaximize"]}},"deny-unminimize":{"identifier":"deny-unminimize","description":"Denies the unminimize command without any pre-configured scope.","commands":{"allow":[],"deny":["unminimize"]}}},"permission_sets":{},"global_scope_schema":null},"shell":{"default_permission":{"identifier":"default","description":"This permission set configures which\nshell functionality is exposed by default.\n\n#### Granted Permissions\n\nIt allows to use the `open` functionality with a reasonable\nscope pre-configured. It will allow opening `http(s)://`,\n`tel:` and `mailto:` links.\n","permissions":["allow-open"]},"permissions":{"allow-execute":{"identifier":"allow-execute","description":"Enables the execute command without any pre-configured scope.","commands":{"allow":["execute"],"deny":[]}},"allow-kill":{"identifier":"allow-kill","description":"Enables the kill command without any pre-configured scope.","commands":{"allow":["kill"],"deny":[]}},"allow-open":{"identifier":"allow-open","description":"Enables the open command without any pre-configured scope.","commands":{"allow":["open"],"deny":[]}},"allow-spawn":{"identifier":"allow-spawn","description":"Enables the spawn command without any pre-configured scope.","commands":{"allow":["spawn"],"deny":[]}},"allow-stdin-write":{"identifier":"allow-stdin-write","description":"Enables the stdin_write command without any pre-configured scope.","commands":{"allow":["stdin_write"],"deny":[]}},"deny-execute":{"identifier":"deny-execute","description":"Denies the execute command without any pre-configured scope.","commands":{"allow":[],"deny":["execute"]}},"deny-kill":{"identifier":"deny-kill","description":"Denies the kill command without any pre-configured scope.","commands":{"allow":[],"deny":["kill"]}},"deny-open":{"identifier":"deny-open","description":"Denies the open command without any pre-configured scope.","commands":{"allow":[],"deny":["open"]}},"deny-spawn":{"identifier":"deny-spawn","description":"Denies the spawn command without any pre-configured scope.","commands":{"allow":[],"deny":["spawn"]}},"deny-stdin-write":{"identifier":"deny-stdin-write","description":"Denies the stdin_write command without any pre-configured scope.","commands":{"allow":[],"deny":["stdin_write"]}}},"permission_sets":{},"global_scope_schema":{"$schema":"http://json-schema.org/draft-07/schema#","anyOf":[{"additionalProperties":false,"properties":{"args":{"allOf":[{"$ref":"#/definitions/ShellScopeEntryAllowedArgs"}],"description":"The allowed arguments for the command execution."},"cmd":{"description":"The command name. It can start with a variable that resolves to a system base directory. The variables are: `$AUDIO`, `$CACHE`, `$CONFIG`, `$DATA`, `$LOCALDATA`, `$DESKTOP`, `$DOCUMENT`, `$DOWNLOAD`, `$EXE`, `$FONT`, `$HOME`, `$PICTURE`, `$PUBLIC`, `$RUNTIME`, `$TEMPLATE`, `$VIDEO`, `$RESOURCE`, `$LOG`, `$TEMP`, `$APPCONFIG`, `$APPDATA`, `$APPLOCALDATA`, `$APPCACHE`, `$APPLOG`.","type":"string"},"name":{"description":"The name for this allowed shell command configuration.\n\nThis name will be used inside of the webview API to call this command along with any specified arguments.","type":"string"}},"required":["cmd","name"],"type":"object"},{"additionalProperties":false,"properties":{"args":{"allOf":[{"$ref":"#/definitions/ShellScopeEntryAllowedArgs"}],"description":"The allowed arguments for the command execution."},"name":{"description":"The name for this allowed shell command configuration.\n\nThis name will be used inside of the webview API to call this command along with any specified arguments.","type":"string"},"sidecar":{"description":"If this command is a sidecar command.","type":"boolean"}},"required":["name","sidecar"],"type":"object"}],"definitions":{"ShellScopeEntryAllowedArg":{"anyOf":[{"description":"A non-configurable argument that is passed to the command in the order it was specified.","type":"string"},{"additionalProperties":false,"description":"A variable that is set while calling the command from the webview API.","properties":{"raw":{"default":false,"description":"Marks the validator as a raw regex, meaning the plugin should not make any modification at runtime.\n\nThis means the regex will not match on the entire string by default, which might be exploited if your regex allow unexpected input to be considered valid. When using this option, make sure your regex is correct.","type":"boolean"},"validator":{"description":"[regex] validator to require passed values to conform to an expected input.\n\nThis will require the argument value passed to this variable to match the `validator` regex before it will be executed.\n\nThe regex string is by default surrounded by `^...$` to match the full string. For example the `https?://\\w+` regex would be registered as `^https?://\\w+$`.\n\n[regex]: ","type":"string"}},"required":["validator"],"type":"object"}],"description":"A command argument allowed to be executed by the webview API."},"ShellScopeEntryAllowedArgs":{"anyOf":[{"description":"Use a simple boolean to allow all or disable all arguments to this command configuration.","type":"boolean"},{"description":"A specific set of [`ShellScopeEntryAllowedArg`] that are valid to call for the command configuration.","items":{"$ref":"#/definitions/ShellScopeEntryAllowedArg"},"type":"array"}],"description":"A set of command arguments allowed to be executed by the webview API.\n\nA value of `true` will allow any arguments to be passed to the command. `false` will disable all arguments. A list of [`ShellScopeEntryAllowedArg`] will set those arguments as the only valid arguments to be passed to the attached command configuration."}},"description":"Shell scope entry.","title":"ShellScopeEntry"}}} \ No newline at end of file diff --git a/frontend/src-tauri/gen/schemas/capabilities.json b/frontend/src-tauri/gen/schemas/capabilities.json new file mode 100644 index 000000000..f3bd464e5 --- /dev/null +++ b/frontend/src-tauri/gen/schemas/capabilities.json @@ -0,0 +1 @@ +{"default":{"identifier":"default","description":"Default capability for PlotPilot","local":true,"windows":["main"],"permissions":["core:default","shell:allow-spawn","shell:allow-execute","shell:allow-kill","shell:allow-open","shell:allow-stdin-write","core:window:allow-set-title","core:window:allow-close","core:window:allow-minimize","core:window:allow-maximize","core:window:allow-start-dragging","core:window:allow-is-maximized","core:window:allow-is-visible","core:window:allow-inner-position","core:window:allow-inner-size","core:window:allow-outer-position","core:window:allow-outer-size","core:window:allow-is-fullscreen","core:window:allow-set-fullscreen","core:window:allow-set-focus","core:webview:allow-print"]}} \ No newline at end of file diff --git a/frontend/src-tauri/gen/schemas/desktop-schema.json b/frontend/src-tauri/gen/schemas/desktop-schema.json new file mode 100644 index 000000000..f827fe175 --- /dev/null +++ b/frontend/src-tauri/gen/schemas/desktop-schema.json @@ -0,0 +1,2564 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "CapabilityFile", + "description": "Capability formats accepted in a capability file.", + "anyOf": [ + { + "description": "A single capability.", + "allOf": [ + { + "$ref": "#/definitions/Capability" + } + ] + }, + { + "description": "A list of capabilities.", + "type": "array", + "items": { + "$ref": "#/definitions/Capability" + } + }, + { + "description": "A list of capabilities.", + "type": "object", + "required": [ + "capabilities" + ], + "properties": { + "capabilities": { + "description": "The list of capabilities.", + "type": "array", + "items": { + "$ref": "#/definitions/Capability" + } + } + } + } + ], + "definitions": { + "Capability": { + "description": "A grouping and boundary mechanism developers can use to isolate access to the IPC layer.\n\nIt controls application windows' and webviews' fine grained access to the Tauri core, application, or plugin commands. If a webview or its window is not matching any capability then it has no access to the IPC layer at all.\n\nThis can be done to create groups of windows, based on their required system access, which can reduce impact of frontend vulnerabilities in less privileged windows. Windows can be added to a capability by exact name (e.g. `main-window`) or glob patterns like `*` or `admin-*`. A Window can have none, one, or multiple associated capabilities.\n\n## Example\n\n```json { \"identifier\": \"main-user-files-write\", \"description\": \"This capability allows the `main` window on macOS and Windows access to `filesystem` write related commands and `dialog` commands to enable programmatic access to files selected by the user.\", \"windows\": [ \"main\" ], \"permissions\": [ \"core:default\", \"dialog:open\", { \"identifier\": \"fs:allow-write-text-file\", \"allow\": [{ \"path\": \"$HOME/test.txt\" }] }, ], \"platforms\": [\"macOS\",\"windows\"] } ```", + "type": "object", + "required": [ + "identifier", + "permissions" + ], + "properties": { + "identifier": { + "description": "Identifier of the capability.\n\n## Example\n\n`main-user-files-write`", + "type": "string" + }, + "description": { + "description": "Description of what the capability is intended to allow on associated windows.\n\nIt should contain a description of what the grouped permissions should allow.\n\n## Example\n\nThis capability allows the `main` window access to `filesystem` write related commands and `dialog` commands to enable programmatic access to files selected by the user.", + "default": "", + "type": "string" + }, + "remote": { + "description": "Configure remote URLs that can use the capability permissions.\n\nThis setting is optional and defaults to not being set, as our default use case is that the content is served from our local application.\n\n:::caution Make sure you understand the security implications of providing remote sources with local system access. :::\n\n## Example\n\n```json { \"urls\": [\"https://*.mydomain.dev\"] } ```", + "anyOf": [ + { + "$ref": "#/definitions/CapabilityRemote" + }, + { + "type": "null" + } + ] + }, + "local": { + "description": "Whether this capability is enabled for local app URLs or not. Defaults to `true`.", + "default": true, + "type": "boolean" + }, + "windows": { + "description": "List of windows that are affected by this capability. Can be a glob pattern.\n\nIf a window label matches any of the patterns in this list, the capability will be enabled on all the webviews of that window, regardless of the value of [`Self::webviews`].\n\nOn multiwebview windows, prefer specifying [`Self::webviews`] and omitting [`Self::windows`] for a fine grained access control.\n\n## Example\n\n`[\"main\"]`", + "type": "array", + "items": { + "type": "string" + } + }, + "webviews": { + "description": "List of webviews that are affected by this capability. Can be a glob pattern.\n\nThe capability will be enabled on all the webviews whose label matches any of the patterns in this list, regardless of whether the webview's window label matches a pattern in [`Self::windows`].\n\n## Example\n\n`[\"sub-webview-one\", \"sub-webview-two\"]`", + "type": "array", + "items": { + "type": "string" + } + }, + "permissions": { + "description": "List of permissions attached to this capability.\n\nMust include the plugin name as prefix in the form of `${plugin-name}:${permission-name}`. For commands directly implemented in the application itself only `${permission-name}` is required.\n\n## Example\n\n```json [ \"core:default\", \"shell:allow-open\", \"dialog:open\", { \"identifier\": \"fs:allow-write-text-file\", \"allow\": [{ \"path\": \"$HOME/test.txt\" }] } ] ```", + "type": "array", + "items": { + "$ref": "#/definitions/PermissionEntry" + }, + "uniqueItems": true + }, + "platforms": { + "description": "Limit which target platforms this capability applies to.\n\nBy default all platforms are targeted.\n\n## Example\n\n`[\"macOS\",\"windows\"]`", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/Target" + } + } + } + }, + "CapabilityRemote": { + "description": "Configuration for remote URLs that are associated with the capability.", + "type": "object", + "required": [ + "urls" + ], + "properties": { + "urls": { + "description": "Remote domains this capability refers to using the [URLPattern standard](https://urlpattern.spec.whatwg.org/).\n\n## Examples\n\n- \"https://*.mydomain.dev\": allows subdomains of mydomain.dev - \"https://mydomain.dev/api/*\": allows any subpath of mydomain.dev/api", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "PermissionEntry": { + "description": "An entry for a permission value in a [`Capability`] can be either a raw permission [`Identifier`] or an object that references a permission and extends its scope.", + "anyOf": [ + { + "description": "Reference a permission or permission set by identifier.", + "allOf": [ + { + "$ref": "#/definitions/Identifier" + } + ] + }, + { + "description": "Reference a permission or permission set by identifier and extends its scope.", + "type": "object", + "allOf": [ + { + "if": { + "properties": { + "identifier": { + "anyOf": [ + { + "description": "This permission set configures which\nshell functionality is exposed by default.\n\n#### Granted Permissions\n\nIt allows to use the `open` functionality with a reasonable\nscope pre-configured. It will allow opening `http(s)://`,\n`tel:` and `mailto:` links.\n\n#### This default permission set includes:\n\n- `allow-open`", + "type": "string", + "const": "shell:default", + "markdownDescription": "This permission set configures which\nshell functionality is exposed by default.\n\n#### Granted Permissions\n\nIt allows to use the `open` functionality with a reasonable\nscope pre-configured. It will allow opening `http(s)://`,\n`tel:` and `mailto:` links.\n\n#### This default permission set includes:\n\n- `allow-open`" + }, + { + "description": "Enables the execute command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-execute", + "markdownDescription": "Enables the execute command without any pre-configured scope." + }, + { + "description": "Enables the kill command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-kill", + "markdownDescription": "Enables the kill command without any pre-configured scope." + }, + { + "description": "Enables the open command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-open", + "markdownDescription": "Enables the open command without any pre-configured scope." + }, + { + "description": "Enables the spawn command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-spawn", + "markdownDescription": "Enables the spawn command without any pre-configured scope." + }, + { + "description": "Enables the stdin_write command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-stdin-write", + "markdownDescription": "Enables the stdin_write command without any pre-configured scope." + }, + { + "description": "Denies the execute command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-execute", + "markdownDescription": "Denies the execute command without any pre-configured scope." + }, + { + "description": "Denies the kill command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-kill", + "markdownDescription": "Denies the kill command without any pre-configured scope." + }, + { + "description": "Denies the open command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-open", + "markdownDescription": "Denies the open command without any pre-configured scope." + }, + { + "description": "Denies the spawn command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-spawn", + "markdownDescription": "Denies the spawn command without any pre-configured scope." + }, + { + "description": "Denies the stdin_write command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-stdin-write", + "markdownDescription": "Denies the stdin_write command without any pre-configured scope." + } + ] + } + } + }, + "then": { + "properties": { + "allow": { + "items": { + "title": "ShellScopeEntry", + "description": "Shell scope entry.", + "anyOf": [ + { + "type": "object", + "required": [ + "cmd", + "name" + ], + "properties": { + "args": { + "description": "The allowed arguments for the command execution.", + "allOf": [ + { + "$ref": "#/definitions/ShellScopeEntryAllowedArgs" + } + ] + }, + "cmd": { + "description": "The command name. It can start with a variable that resolves to a system base directory. The variables are: `$AUDIO`, `$CACHE`, `$CONFIG`, `$DATA`, `$LOCALDATA`, `$DESKTOP`, `$DOCUMENT`, `$DOWNLOAD`, `$EXE`, `$FONT`, `$HOME`, `$PICTURE`, `$PUBLIC`, `$RUNTIME`, `$TEMPLATE`, `$VIDEO`, `$RESOURCE`, `$LOG`, `$TEMP`, `$APPCONFIG`, `$APPDATA`, `$APPLOCALDATA`, `$APPCACHE`, `$APPLOG`.", + "type": "string" + }, + "name": { + "description": "The name for this allowed shell command configuration.\n\nThis name will be used inside of the webview API to call this command along with any specified arguments.", + "type": "string" + } + }, + "additionalProperties": false + }, + { + "type": "object", + "required": [ + "name", + "sidecar" + ], + "properties": { + "args": { + "description": "The allowed arguments for the command execution.", + "allOf": [ + { + "$ref": "#/definitions/ShellScopeEntryAllowedArgs" + } + ] + }, + "name": { + "description": "The name for this allowed shell command configuration.\n\nThis name will be used inside of the webview API to call this command along with any specified arguments.", + "type": "string" + }, + "sidecar": { + "description": "If this command is a sidecar command.", + "type": "boolean" + } + }, + "additionalProperties": false + } + ] + } + }, + "deny": { + "items": { + "title": "ShellScopeEntry", + "description": "Shell scope entry.", + "anyOf": [ + { + "type": "object", + "required": [ + "cmd", + "name" + ], + "properties": { + "args": { + "description": "The allowed arguments for the command execution.", + "allOf": [ + { + "$ref": "#/definitions/ShellScopeEntryAllowedArgs" + } + ] + }, + "cmd": { + "description": "The command name. It can start with a variable that resolves to a system base directory. The variables are: `$AUDIO`, `$CACHE`, `$CONFIG`, `$DATA`, `$LOCALDATA`, `$DESKTOP`, `$DOCUMENT`, `$DOWNLOAD`, `$EXE`, `$FONT`, `$HOME`, `$PICTURE`, `$PUBLIC`, `$RUNTIME`, `$TEMPLATE`, `$VIDEO`, `$RESOURCE`, `$LOG`, `$TEMP`, `$APPCONFIG`, `$APPDATA`, `$APPLOCALDATA`, `$APPCACHE`, `$APPLOG`.", + "type": "string" + }, + "name": { + "description": "The name for this allowed shell command configuration.\n\nThis name will be used inside of the webview API to call this command along with any specified arguments.", + "type": "string" + } + }, + "additionalProperties": false + }, + { + "type": "object", + "required": [ + "name", + "sidecar" + ], + "properties": { + "args": { + "description": "The allowed arguments for the command execution.", + "allOf": [ + { + "$ref": "#/definitions/ShellScopeEntryAllowedArgs" + } + ] + }, + "name": { + "description": "The name for this allowed shell command configuration.\n\nThis name will be used inside of the webview API to call this command along with any specified arguments.", + "type": "string" + }, + "sidecar": { + "description": "If this command is a sidecar command.", + "type": "boolean" + } + }, + "additionalProperties": false + } + ] + } + } + } + }, + "properties": { + "identifier": { + "description": "Identifier of the permission or permission set.", + "allOf": [ + { + "$ref": "#/definitions/Identifier" + } + ] + } + } + }, + { + "properties": { + "identifier": { + "description": "Identifier of the permission or permission set.", + "allOf": [ + { + "$ref": "#/definitions/Identifier" + } + ] + }, + "allow": { + "description": "Data that defines what is allowed by the scope.", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/Value" + } + }, + "deny": { + "description": "Data that defines what is denied by the scope. This should be prioritized by validation logic.", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/Value" + } + } + } + } + ], + "required": [ + "identifier" + ] + } + ] + }, + "Identifier": { + "description": "Permission identifier", + "oneOf": [ + { + "description": "Default core plugins set.\n#### This default permission set includes:\n\n- `core:path:default`\n- `core:event:default`\n- `core:window:default`\n- `core:webview:default`\n- `core:app:default`\n- `core:image:default`\n- `core:resources:default`\n- `core:menu:default`\n- `core:tray:default`", + "type": "string", + "const": "core:default", + "markdownDescription": "Default core plugins set.\n#### This default permission set includes:\n\n- `core:path:default`\n- `core:event:default`\n- `core:window:default`\n- `core:webview:default`\n- `core:app:default`\n- `core:image:default`\n- `core:resources:default`\n- `core:menu:default`\n- `core:tray:default`" + }, + { + "description": "Default permissions for the plugin.\n#### This default permission set includes:\n\n- `allow-version`\n- `allow-name`\n- `allow-tauri-version`\n- `allow-identifier`\n- `allow-bundle-type`\n- `allow-register-listener`\n- `allow-remove-listener`", + "type": "string", + "const": "core:app:default", + "markdownDescription": "Default permissions for the plugin.\n#### This default permission set includes:\n\n- `allow-version`\n- `allow-name`\n- `allow-tauri-version`\n- `allow-identifier`\n- `allow-bundle-type`\n- `allow-register-listener`\n- `allow-remove-listener`" + }, + { + "description": "Enables the app_hide command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-app-hide", + "markdownDescription": "Enables the app_hide command without any pre-configured scope." + }, + { + "description": "Enables the app_show command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-app-show", + "markdownDescription": "Enables the app_show command without any pre-configured scope." + }, + { + "description": "Enables the bundle_type command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-bundle-type", + "markdownDescription": "Enables the bundle_type command without any pre-configured scope." + }, + { + "description": "Enables the default_window_icon command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-default-window-icon", + "markdownDescription": "Enables the default_window_icon command without any pre-configured scope." + }, + { + "description": "Enables the fetch_data_store_identifiers command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-fetch-data-store-identifiers", + "markdownDescription": "Enables the fetch_data_store_identifiers command without any pre-configured scope." + }, + { + "description": "Enables the identifier command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-identifier", + "markdownDescription": "Enables the identifier command without any pre-configured scope." + }, + { + "description": "Enables the name command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-name", + "markdownDescription": "Enables the name command without any pre-configured scope." + }, + { + "description": "Enables the register_listener command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-register-listener", + "markdownDescription": "Enables the register_listener command without any pre-configured scope." + }, + { + "description": "Enables the remove_data_store command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-remove-data-store", + "markdownDescription": "Enables the remove_data_store command without any pre-configured scope." + }, + { + "description": "Enables the remove_listener command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-remove-listener", + "markdownDescription": "Enables the remove_listener command without any pre-configured scope." + }, + { + "description": "Enables the set_app_theme command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-set-app-theme", + "markdownDescription": "Enables the set_app_theme command without any pre-configured scope." + }, + { + "description": "Enables the set_dock_visibility command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-set-dock-visibility", + "markdownDescription": "Enables the set_dock_visibility command without any pre-configured scope." + }, + { + "description": "Enables the tauri_version command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-tauri-version", + "markdownDescription": "Enables the tauri_version command without any pre-configured scope." + }, + { + "description": "Enables the version command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-version", + "markdownDescription": "Enables the version command without any pre-configured scope." + }, + { + "description": "Denies the app_hide command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-app-hide", + "markdownDescription": "Denies the app_hide command without any pre-configured scope." + }, + { + "description": "Denies the app_show command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-app-show", + "markdownDescription": "Denies the app_show command without any pre-configured scope." + }, + { + "description": "Denies the bundle_type command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-bundle-type", + "markdownDescription": "Denies the bundle_type command without any pre-configured scope." + }, + { + "description": "Denies the default_window_icon command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-default-window-icon", + "markdownDescription": "Denies the default_window_icon command without any pre-configured scope." + }, + { + "description": "Denies the fetch_data_store_identifiers command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-fetch-data-store-identifiers", + "markdownDescription": "Denies the fetch_data_store_identifiers command without any pre-configured scope." + }, + { + "description": "Denies the identifier command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-identifier", + "markdownDescription": "Denies the identifier command without any pre-configured scope." + }, + { + "description": "Denies the name command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-name", + "markdownDescription": "Denies the name command without any pre-configured scope." + }, + { + "description": "Denies the register_listener command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-register-listener", + "markdownDescription": "Denies the register_listener command without any pre-configured scope." + }, + { + "description": "Denies the remove_data_store command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-remove-data-store", + "markdownDescription": "Denies the remove_data_store command without any pre-configured scope." + }, + { + "description": "Denies the remove_listener command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-remove-listener", + "markdownDescription": "Denies the remove_listener command without any pre-configured scope." + }, + { + "description": "Denies the set_app_theme command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-set-app-theme", + "markdownDescription": "Denies the set_app_theme command without any pre-configured scope." + }, + { + "description": "Denies the set_dock_visibility command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-set-dock-visibility", + "markdownDescription": "Denies the set_dock_visibility command without any pre-configured scope." + }, + { + "description": "Denies the tauri_version command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-tauri-version", + "markdownDescription": "Denies the tauri_version command without any pre-configured scope." + }, + { + "description": "Denies the version command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-version", + "markdownDescription": "Denies the version command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-listen`\n- `allow-unlisten`\n- `allow-emit`\n- `allow-emit-to`", + "type": "string", + "const": "core:event:default", + "markdownDescription": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-listen`\n- `allow-unlisten`\n- `allow-emit`\n- `allow-emit-to`" + }, + { + "description": "Enables the emit command without any pre-configured scope.", + "type": "string", + "const": "core:event:allow-emit", + "markdownDescription": "Enables the emit command without any pre-configured scope." + }, + { + "description": "Enables the emit_to command without any pre-configured scope.", + "type": "string", + "const": "core:event:allow-emit-to", + "markdownDescription": "Enables the emit_to command without any pre-configured scope." + }, + { + "description": "Enables the listen command without any pre-configured scope.", + "type": "string", + "const": "core:event:allow-listen", + "markdownDescription": "Enables the listen command without any pre-configured scope." + }, + { + "description": "Enables the unlisten command without any pre-configured scope.", + "type": "string", + "const": "core:event:allow-unlisten", + "markdownDescription": "Enables the unlisten command without any pre-configured scope." + }, + { + "description": "Denies the emit command without any pre-configured scope.", + "type": "string", + "const": "core:event:deny-emit", + "markdownDescription": "Denies the emit command without any pre-configured scope." + }, + { + "description": "Denies the emit_to command without any pre-configured scope.", + "type": "string", + "const": "core:event:deny-emit-to", + "markdownDescription": "Denies the emit_to command without any pre-configured scope." + }, + { + "description": "Denies the listen command without any pre-configured scope.", + "type": "string", + "const": "core:event:deny-listen", + "markdownDescription": "Denies the listen command without any pre-configured scope." + }, + { + "description": "Denies the unlisten command without any pre-configured scope.", + "type": "string", + "const": "core:event:deny-unlisten", + "markdownDescription": "Denies the unlisten command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-new`\n- `allow-from-bytes`\n- `allow-from-path`\n- `allow-rgba`\n- `allow-size`", + "type": "string", + "const": "core:image:default", + "markdownDescription": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-new`\n- `allow-from-bytes`\n- `allow-from-path`\n- `allow-rgba`\n- `allow-size`" + }, + { + "description": "Enables the from_bytes command without any pre-configured scope.", + "type": "string", + "const": "core:image:allow-from-bytes", + "markdownDescription": "Enables the from_bytes command without any pre-configured scope." + }, + { + "description": "Enables the from_path command without any pre-configured scope.", + "type": "string", + "const": "core:image:allow-from-path", + "markdownDescription": "Enables the from_path command without any pre-configured scope." + }, + { + "description": "Enables the new command without any pre-configured scope.", + "type": "string", + "const": "core:image:allow-new", + "markdownDescription": "Enables the new command without any pre-configured scope." + }, + { + "description": "Enables the rgba command without any pre-configured scope.", + "type": "string", + "const": "core:image:allow-rgba", + "markdownDescription": "Enables the rgba command without any pre-configured scope." + }, + { + "description": "Enables the size command without any pre-configured scope.", + "type": "string", + "const": "core:image:allow-size", + "markdownDescription": "Enables the size command without any pre-configured scope." + }, + { + "description": "Denies the from_bytes command without any pre-configured scope.", + "type": "string", + "const": "core:image:deny-from-bytes", + "markdownDescription": "Denies the from_bytes command without any pre-configured scope." + }, + { + "description": "Denies the from_path command without any pre-configured scope.", + "type": "string", + "const": "core:image:deny-from-path", + "markdownDescription": "Denies the from_path command without any pre-configured scope." + }, + { + "description": "Denies the new command without any pre-configured scope.", + "type": "string", + "const": "core:image:deny-new", + "markdownDescription": "Denies the new command without any pre-configured scope." + }, + { + "description": "Denies the rgba command without any pre-configured scope.", + "type": "string", + "const": "core:image:deny-rgba", + "markdownDescription": "Denies the rgba command without any pre-configured scope." + }, + { + "description": "Denies the size command without any pre-configured scope.", + "type": "string", + "const": "core:image:deny-size", + "markdownDescription": "Denies the size command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-new`\n- `allow-append`\n- `allow-prepend`\n- `allow-insert`\n- `allow-remove`\n- `allow-remove-at`\n- `allow-items`\n- `allow-get`\n- `allow-popup`\n- `allow-create-default`\n- `allow-set-as-app-menu`\n- `allow-set-as-window-menu`\n- `allow-text`\n- `allow-set-text`\n- `allow-is-enabled`\n- `allow-set-enabled`\n- `allow-set-accelerator`\n- `allow-set-as-windows-menu-for-nsapp`\n- `allow-set-as-help-menu-for-nsapp`\n- `allow-is-checked`\n- `allow-set-checked`\n- `allow-set-icon`", + "type": "string", + "const": "core:menu:default", + "markdownDescription": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-new`\n- `allow-append`\n- `allow-prepend`\n- `allow-insert`\n- `allow-remove`\n- `allow-remove-at`\n- `allow-items`\n- `allow-get`\n- `allow-popup`\n- `allow-create-default`\n- `allow-set-as-app-menu`\n- `allow-set-as-window-menu`\n- `allow-text`\n- `allow-set-text`\n- `allow-is-enabled`\n- `allow-set-enabled`\n- `allow-set-accelerator`\n- `allow-set-as-windows-menu-for-nsapp`\n- `allow-set-as-help-menu-for-nsapp`\n- `allow-is-checked`\n- `allow-set-checked`\n- `allow-set-icon`" + }, + { + "description": "Enables the append command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-append", + "markdownDescription": "Enables the append command without any pre-configured scope." + }, + { + "description": "Enables the create_default command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-create-default", + "markdownDescription": "Enables the create_default command without any pre-configured scope." + }, + { + "description": "Enables the get command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-get", + "markdownDescription": "Enables the get command without any pre-configured scope." + }, + { + "description": "Enables the insert command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-insert", + "markdownDescription": "Enables the insert command without any pre-configured scope." + }, + { + "description": "Enables the is_checked command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-is-checked", + "markdownDescription": "Enables the is_checked command without any pre-configured scope." + }, + { + "description": "Enables the is_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-is-enabled", + "markdownDescription": "Enables the is_enabled command without any pre-configured scope." + }, + { + "description": "Enables the items command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-items", + "markdownDescription": "Enables the items command without any pre-configured scope." + }, + { + "description": "Enables the new command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-new", + "markdownDescription": "Enables the new command without any pre-configured scope." + }, + { + "description": "Enables the popup command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-popup", + "markdownDescription": "Enables the popup command without any pre-configured scope." + }, + { + "description": "Enables the prepend command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-prepend", + "markdownDescription": "Enables the prepend command without any pre-configured scope." + }, + { + "description": "Enables the remove command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-remove", + "markdownDescription": "Enables the remove command without any pre-configured scope." + }, + { + "description": "Enables the remove_at command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-remove-at", + "markdownDescription": "Enables the remove_at command without any pre-configured scope." + }, + { + "description": "Enables the set_accelerator command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-accelerator", + "markdownDescription": "Enables the set_accelerator command without any pre-configured scope." + }, + { + "description": "Enables the set_as_app_menu command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-as-app-menu", + "markdownDescription": "Enables the set_as_app_menu command without any pre-configured scope." + }, + { + "description": "Enables the set_as_help_menu_for_nsapp command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-as-help-menu-for-nsapp", + "markdownDescription": "Enables the set_as_help_menu_for_nsapp command without any pre-configured scope." + }, + { + "description": "Enables the set_as_window_menu command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-as-window-menu", + "markdownDescription": "Enables the set_as_window_menu command without any pre-configured scope." + }, + { + "description": "Enables the set_as_windows_menu_for_nsapp command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-as-windows-menu-for-nsapp", + "markdownDescription": "Enables the set_as_windows_menu_for_nsapp command without any pre-configured scope." + }, + { + "description": "Enables the set_checked command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-checked", + "markdownDescription": "Enables the set_checked command without any pre-configured scope." + }, + { + "description": "Enables the set_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-enabled", + "markdownDescription": "Enables the set_enabled command without any pre-configured scope." + }, + { + "description": "Enables the set_icon command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-icon", + "markdownDescription": "Enables the set_icon command without any pre-configured scope." + }, + { + "description": "Enables the set_text command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-text", + "markdownDescription": "Enables the set_text command without any pre-configured scope." + }, + { + "description": "Enables the text command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-text", + "markdownDescription": "Enables the text command without any pre-configured scope." + }, + { + "description": "Denies the append command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-append", + "markdownDescription": "Denies the append command without any pre-configured scope." + }, + { + "description": "Denies the create_default command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-create-default", + "markdownDescription": "Denies the create_default command without any pre-configured scope." + }, + { + "description": "Denies the get command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-get", + "markdownDescription": "Denies the get command without any pre-configured scope." + }, + { + "description": "Denies the insert command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-insert", + "markdownDescription": "Denies the insert command without any pre-configured scope." + }, + { + "description": "Denies the is_checked command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-is-checked", + "markdownDescription": "Denies the is_checked command without any pre-configured scope." + }, + { + "description": "Denies the is_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-is-enabled", + "markdownDescription": "Denies the is_enabled command without any pre-configured scope." + }, + { + "description": "Denies the items command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-items", + "markdownDescription": "Denies the items command without any pre-configured scope." + }, + { + "description": "Denies the new command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-new", + "markdownDescription": "Denies the new command without any pre-configured scope." + }, + { + "description": "Denies the popup command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-popup", + "markdownDescription": "Denies the popup command without any pre-configured scope." + }, + { + "description": "Denies the prepend command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-prepend", + "markdownDescription": "Denies the prepend command without any pre-configured scope." + }, + { + "description": "Denies the remove command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-remove", + "markdownDescription": "Denies the remove command without any pre-configured scope." + }, + { + "description": "Denies the remove_at command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-remove-at", + "markdownDescription": "Denies the remove_at command without any pre-configured scope." + }, + { + "description": "Denies the set_accelerator command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-accelerator", + "markdownDescription": "Denies the set_accelerator command without any pre-configured scope." + }, + { + "description": "Denies the set_as_app_menu command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-as-app-menu", + "markdownDescription": "Denies the set_as_app_menu command without any pre-configured scope." + }, + { + "description": "Denies the set_as_help_menu_for_nsapp command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-as-help-menu-for-nsapp", + "markdownDescription": "Denies the set_as_help_menu_for_nsapp command without any pre-configured scope." + }, + { + "description": "Denies the set_as_window_menu command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-as-window-menu", + "markdownDescription": "Denies the set_as_window_menu command without any pre-configured scope." + }, + { + "description": "Denies the set_as_windows_menu_for_nsapp command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-as-windows-menu-for-nsapp", + "markdownDescription": "Denies the set_as_windows_menu_for_nsapp command without any pre-configured scope." + }, + { + "description": "Denies the set_checked command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-checked", + "markdownDescription": "Denies the set_checked command without any pre-configured scope." + }, + { + "description": "Denies the set_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-enabled", + "markdownDescription": "Denies the set_enabled command without any pre-configured scope." + }, + { + "description": "Denies the set_icon command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-icon", + "markdownDescription": "Denies the set_icon command without any pre-configured scope." + }, + { + "description": "Denies the set_text command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-text", + "markdownDescription": "Denies the set_text command without any pre-configured scope." + }, + { + "description": "Denies the text command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-text", + "markdownDescription": "Denies the text command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-resolve-directory`\n- `allow-resolve`\n- `allow-normalize`\n- `allow-join`\n- `allow-dirname`\n- `allow-extname`\n- `allow-basename`\n- `allow-is-absolute`", + "type": "string", + "const": "core:path:default", + "markdownDescription": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-resolve-directory`\n- `allow-resolve`\n- `allow-normalize`\n- `allow-join`\n- `allow-dirname`\n- `allow-extname`\n- `allow-basename`\n- `allow-is-absolute`" + }, + { + "description": "Enables the basename command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-basename", + "markdownDescription": "Enables the basename command without any pre-configured scope." + }, + { + "description": "Enables the dirname command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-dirname", + "markdownDescription": "Enables the dirname command without any pre-configured scope." + }, + { + "description": "Enables the extname command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-extname", + "markdownDescription": "Enables the extname command without any pre-configured scope." + }, + { + "description": "Enables the is_absolute command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-is-absolute", + "markdownDescription": "Enables the is_absolute command without any pre-configured scope." + }, + { + "description": "Enables the join command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-join", + "markdownDescription": "Enables the join command without any pre-configured scope." + }, + { + "description": "Enables the normalize command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-normalize", + "markdownDescription": "Enables the normalize command without any pre-configured scope." + }, + { + "description": "Enables the resolve command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-resolve", + "markdownDescription": "Enables the resolve command without any pre-configured scope." + }, + { + "description": "Enables the resolve_directory command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-resolve-directory", + "markdownDescription": "Enables the resolve_directory command without any pre-configured scope." + }, + { + "description": "Denies the basename command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-basename", + "markdownDescription": "Denies the basename command without any pre-configured scope." + }, + { + "description": "Denies the dirname command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-dirname", + "markdownDescription": "Denies the dirname command without any pre-configured scope." + }, + { + "description": "Denies the extname command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-extname", + "markdownDescription": "Denies the extname command without any pre-configured scope." + }, + { + "description": "Denies the is_absolute command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-is-absolute", + "markdownDescription": "Denies the is_absolute command without any pre-configured scope." + }, + { + "description": "Denies the join command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-join", + "markdownDescription": "Denies the join command without any pre-configured scope." + }, + { + "description": "Denies the normalize command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-normalize", + "markdownDescription": "Denies the normalize command without any pre-configured scope." + }, + { + "description": "Denies the resolve command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-resolve", + "markdownDescription": "Denies the resolve command without any pre-configured scope." + }, + { + "description": "Denies the resolve_directory command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-resolve-directory", + "markdownDescription": "Denies the resolve_directory command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-close`", + "type": "string", + "const": "core:resources:default", + "markdownDescription": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-close`" + }, + { + "description": "Enables the close command without any pre-configured scope.", + "type": "string", + "const": "core:resources:allow-close", + "markdownDescription": "Enables the close command without any pre-configured scope." + }, + { + "description": "Denies the close command without any pre-configured scope.", + "type": "string", + "const": "core:resources:deny-close", + "markdownDescription": "Denies the close command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-new`\n- `allow-get-by-id`\n- `allow-remove-by-id`\n- `allow-set-icon`\n- `allow-set-menu`\n- `allow-set-tooltip`\n- `allow-set-title`\n- `allow-set-visible`\n- `allow-set-temp-dir-path`\n- `allow-set-icon-as-template`\n- `allow-set-show-menu-on-left-click`", + "type": "string", + "const": "core:tray:default", + "markdownDescription": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-new`\n- `allow-get-by-id`\n- `allow-remove-by-id`\n- `allow-set-icon`\n- `allow-set-menu`\n- `allow-set-tooltip`\n- `allow-set-title`\n- `allow-set-visible`\n- `allow-set-temp-dir-path`\n- `allow-set-icon-as-template`\n- `allow-set-show-menu-on-left-click`" + }, + { + "description": "Enables the get_by_id command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-get-by-id", + "markdownDescription": "Enables the get_by_id command without any pre-configured scope." + }, + { + "description": "Enables the new command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-new", + "markdownDescription": "Enables the new command without any pre-configured scope." + }, + { + "description": "Enables the remove_by_id command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-remove-by-id", + "markdownDescription": "Enables the remove_by_id command without any pre-configured scope." + }, + { + "description": "Enables the set_icon command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-icon", + "markdownDescription": "Enables the set_icon command without any pre-configured scope." + }, + { + "description": "Enables the set_icon_as_template command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-icon-as-template", + "markdownDescription": "Enables the set_icon_as_template command without any pre-configured scope." + }, + { + "description": "Enables the set_menu command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-menu", + "markdownDescription": "Enables the set_menu command without any pre-configured scope." + }, + { + "description": "Enables the set_show_menu_on_left_click command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-show-menu-on-left-click", + "markdownDescription": "Enables the set_show_menu_on_left_click command without any pre-configured scope." + }, + { + "description": "Enables the set_temp_dir_path command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-temp-dir-path", + "markdownDescription": "Enables the set_temp_dir_path command without any pre-configured scope." + }, + { + "description": "Enables the set_title command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-title", + "markdownDescription": "Enables the set_title command without any pre-configured scope." + }, + { + "description": "Enables the set_tooltip command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-tooltip", + "markdownDescription": "Enables the set_tooltip command without any pre-configured scope." + }, + { + "description": "Enables the set_visible command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-visible", + "markdownDescription": "Enables the set_visible command without any pre-configured scope." + }, + { + "description": "Denies the get_by_id command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-get-by-id", + "markdownDescription": "Denies the get_by_id command without any pre-configured scope." + }, + { + "description": "Denies the new command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-new", + "markdownDescription": "Denies the new command without any pre-configured scope." + }, + { + "description": "Denies the remove_by_id command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-remove-by-id", + "markdownDescription": "Denies the remove_by_id command without any pre-configured scope." + }, + { + "description": "Denies the set_icon command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-icon", + "markdownDescription": "Denies the set_icon command without any pre-configured scope." + }, + { + "description": "Denies the set_icon_as_template command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-icon-as-template", + "markdownDescription": "Denies the set_icon_as_template command without any pre-configured scope." + }, + { + "description": "Denies the set_menu command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-menu", + "markdownDescription": "Denies the set_menu command without any pre-configured scope." + }, + { + "description": "Denies the set_show_menu_on_left_click command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-show-menu-on-left-click", + "markdownDescription": "Denies the set_show_menu_on_left_click command without any pre-configured scope." + }, + { + "description": "Denies the set_temp_dir_path command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-temp-dir-path", + "markdownDescription": "Denies the set_temp_dir_path command without any pre-configured scope." + }, + { + "description": "Denies the set_title command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-title", + "markdownDescription": "Denies the set_title command without any pre-configured scope." + }, + { + "description": "Denies the set_tooltip command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-tooltip", + "markdownDescription": "Denies the set_tooltip command without any pre-configured scope." + }, + { + "description": "Denies the set_visible command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-visible", + "markdownDescription": "Denies the set_visible command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin.\n#### This default permission set includes:\n\n- `allow-get-all-webviews`\n- `allow-webview-position`\n- `allow-webview-size`\n- `allow-internal-toggle-devtools`", + "type": "string", + "const": "core:webview:default", + "markdownDescription": "Default permissions for the plugin.\n#### This default permission set includes:\n\n- `allow-get-all-webviews`\n- `allow-webview-position`\n- `allow-webview-size`\n- `allow-internal-toggle-devtools`" + }, + { + "description": "Enables the clear_all_browsing_data command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-clear-all-browsing-data", + "markdownDescription": "Enables the clear_all_browsing_data command without any pre-configured scope." + }, + { + "description": "Enables the create_webview command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-create-webview", + "markdownDescription": "Enables the create_webview command without any pre-configured scope." + }, + { + "description": "Enables the create_webview_window command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-create-webview-window", + "markdownDescription": "Enables the create_webview_window command without any pre-configured scope." + }, + { + "description": "Enables the get_all_webviews command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-get-all-webviews", + "markdownDescription": "Enables the get_all_webviews command without any pre-configured scope." + }, + { + "description": "Enables the internal_toggle_devtools command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-internal-toggle-devtools", + "markdownDescription": "Enables the internal_toggle_devtools command without any pre-configured scope." + }, + { + "description": "Enables the print command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-print", + "markdownDescription": "Enables the print command without any pre-configured scope." + }, + { + "description": "Enables the reparent command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-reparent", + "markdownDescription": "Enables the reparent command without any pre-configured scope." + }, + { + "description": "Enables the set_webview_auto_resize command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-set-webview-auto-resize", + "markdownDescription": "Enables the set_webview_auto_resize command without any pre-configured scope." + }, + { + "description": "Enables the set_webview_background_color command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-set-webview-background-color", + "markdownDescription": "Enables the set_webview_background_color command without any pre-configured scope." + }, + { + "description": "Enables the set_webview_focus command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-set-webview-focus", + "markdownDescription": "Enables the set_webview_focus command without any pre-configured scope." + }, + { + "description": "Enables the set_webview_position command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-set-webview-position", + "markdownDescription": "Enables the set_webview_position command without any pre-configured scope." + }, + { + "description": "Enables the set_webview_size command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-set-webview-size", + "markdownDescription": "Enables the set_webview_size command without any pre-configured scope." + }, + { + "description": "Enables the set_webview_zoom command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-set-webview-zoom", + "markdownDescription": "Enables the set_webview_zoom command without any pre-configured scope." + }, + { + "description": "Enables the webview_close command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-webview-close", + "markdownDescription": "Enables the webview_close command without any pre-configured scope." + }, + { + "description": "Enables the webview_hide command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-webview-hide", + "markdownDescription": "Enables the webview_hide command without any pre-configured scope." + }, + { + "description": "Enables the webview_position command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-webview-position", + "markdownDescription": "Enables the webview_position command without any pre-configured scope." + }, + { + "description": "Enables the webview_show command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-webview-show", + "markdownDescription": "Enables the webview_show command without any pre-configured scope." + }, + { + "description": "Enables the webview_size command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-webview-size", + "markdownDescription": "Enables the webview_size command without any pre-configured scope." + }, + { + "description": "Denies the clear_all_browsing_data command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-clear-all-browsing-data", + "markdownDescription": "Denies the clear_all_browsing_data command without any pre-configured scope." + }, + { + "description": "Denies the create_webview command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-create-webview", + "markdownDescription": "Denies the create_webview command without any pre-configured scope." + }, + { + "description": "Denies the create_webview_window command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-create-webview-window", + "markdownDescription": "Denies the create_webview_window command without any pre-configured scope." + }, + { + "description": "Denies the get_all_webviews command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-get-all-webviews", + "markdownDescription": "Denies the get_all_webviews command without any pre-configured scope." + }, + { + "description": "Denies the internal_toggle_devtools command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-internal-toggle-devtools", + "markdownDescription": "Denies the internal_toggle_devtools command without any pre-configured scope." + }, + { + "description": "Denies the print command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-print", + "markdownDescription": "Denies the print command without any pre-configured scope." + }, + { + "description": "Denies the reparent command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-reparent", + "markdownDescription": "Denies the reparent command without any pre-configured scope." + }, + { + "description": "Denies the set_webview_auto_resize command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-set-webview-auto-resize", + "markdownDescription": "Denies the set_webview_auto_resize command without any pre-configured scope." + }, + { + "description": "Denies the set_webview_background_color command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-set-webview-background-color", + "markdownDescription": "Denies the set_webview_background_color command without any pre-configured scope." + }, + { + "description": "Denies the set_webview_focus command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-set-webview-focus", + "markdownDescription": "Denies the set_webview_focus command without any pre-configured scope." + }, + { + "description": "Denies the set_webview_position command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-set-webview-position", + "markdownDescription": "Denies the set_webview_position command without any pre-configured scope." + }, + { + "description": "Denies the set_webview_size command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-set-webview-size", + "markdownDescription": "Denies the set_webview_size command without any pre-configured scope." + }, + { + "description": "Denies the set_webview_zoom command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-set-webview-zoom", + "markdownDescription": "Denies the set_webview_zoom command without any pre-configured scope." + }, + { + "description": "Denies the webview_close command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-webview-close", + "markdownDescription": "Denies the webview_close command without any pre-configured scope." + }, + { + "description": "Denies the webview_hide command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-webview-hide", + "markdownDescription": "Denies the webview_hide command without any pre-configured scope." + }, + { + "description": "Denies the webview_position command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-webview-position", + "markdownDescription": "Denies the webview_position command without any pre-configured scope." + }, + { + "description": "Denies the webview_show command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-webview-show", + "markdownDescription": "Denies the webview_show command without any pre-configured scope." + }, + { + "description": "Denies the webview_size command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-webview-size", + "markdownDescription": "Denies the webview_size command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin.\n#### This default permission set includes:\n\n- `allow-get-all-windows`\n- `allow-scale-factor`\n- `allow-inner-position`\n- `allow-outer-position`\n- `allow-inner-size`\n- `allow-outer-size`\n- `allow-is-fullscreen`\n- `allow-is-minimized`\n- `allow-is-maximized`\n- `allow-is-focused`\n- `allow-is-decorated`\n- `allow-is-resizable`\n- `allow-is-maximizable`\n- `allow-is-minimizable`\n- `allow-is-closable`\n- `allow-is-visible`\n- `allow-is-enabled`\n- `allow-title`\n- `allow-current-monitor`\n- `allow-primary-monitor`\n- `allow-monitor-from-point`\n- `allow-available-monitors`\n- `allow-cursor-position`\n- `allow-theme`\n- `allow-is-always-on-top`\n- `allow-internal-toggle-maximize`", + "type": "string", + "const": "core:window:default", + "markdownDescription": "Default permissions for the plugin.\n#### This default permission set includes:\n\n- `allow-get-all-windows`\n- `allow-scale-factor`\n- `allow-inner-position`\n- `allow-outer-position`\n- `allow-inner-size`\n- `allow-outer-size`\n- `allow-is-fullscreen`\n- `allow-is-minimized`\n- `allow-is-maximized`\n- `allow-is-focused`\n- `allow-is-decorated`\n- `allow-is-resizable`\n- `allow-is-maximizable`\n- `allow-is-minimizable`\n- `allow-is-closable`\n- `allow-is-visible`\n- `allow-is-enabled`\n- `allow-title`\n- `allow-current-monitor`\n- `allow-primary-monitor`\n- `allow-monitor-from-point`\n- `allow-available-monitors`\n- `allow-cursor-position`\n- `allow-theme`\n- `allow-is-always-on-top`\n- `allow-internal-toggle-maximize`" + }, + { + "description": "Enables the available_monitors command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-available-monitors", + "markdownDescription": "Enables the available_monitors command without any pre-configured scope." + }, + { + "description": "Enables the center command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-center", + "markdownDescription": "Enables the center command without any pre-configured scope." + }, + { + "description": "Enables the close command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-close", + "markdownDescription": "Enables the close command without any pre-configured scope." + }, + { + "description": "Enables the create command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-create", + "markdownDescription": "Enables the create command without any pre-configured scope." + }, + { + "description": "Enables the current_monitor command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-current-monitor", + "markdownDescription": "Enables the current_monitor command without any pre-configured scope." + }, + { + "description": "Enables the cursor_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-cursor-position", + "markdownDescription": "Enables the cursor_position command without any pre-configured scope." + }, + { + "description": "Enables the destroy command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-destroy", + "markdownDescription": "Enables the destroy command without any pre-configured scope." + }, + { + "description": "Enables the get_all_windows command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-get-all-windows", + "markdownDescription": "Enables the get_all_windows command without any pre-configured scope." + }, + { + "description": "Enables the hide command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-hide", + "markdownDescription": "Enables the hide command without any pre-configured scope." + }, + { + "description": "Enables the inner_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-inner-position", + "markdownDescription": "Enables the inner_position command without any pre-configured scope." + }, + { + "description": "Enables the inner_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-inner-size", + "markdownDescription": "Enables the inner_size command without any pre-configured scope." + }, + { + "description": "Enables the internal_toggle_maximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-internal-toggle-maximize", + "markdownDescription": "Enables the internal_toggle_maximize command without any pre-configured scope." + }, + { + "description": "Enables the is_always_on_top command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-always-on-top", + "markdownDescription": "Enables the is_always_on_top command without any pre-configured scope." + }, + { + "description": "Enables the is_closable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-closable", + "markdownDescription": "Enables the is_closable command without any pre-configured scope." + }, + { + "description": "Enables the is_decorated command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-decorated", + "markdownDescription": "Enables the is_decorated command without any pre-configured scope." + }, + { + "description": "Enables the is_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-enabled", + "markdownDescription": "Enables the is_enabled command without any pre-configured scope." + }, + { + "description": "Enables the is_focused command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-focused", + "markdownDescription": "Enables the is_focused command without any pre-configured scope." + }, + { + "description": "Enables the is_fullscreen command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-fullscreen", + "markdownDescription": "Enables the is_fullscreen command without any pre-configured scope." + }, + { + "description": "Enables the is_maximizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-maximizable", + "markdownDescription": "Enables the is_maximizable command without any pre-configured scope." + }, + { + "description": "Enables the is_maximized command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-maximized", + "markdownDescription": "Enables the is_maximized command without any pre-configured scope." + }, + { + "description": "Enables the is_minimizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-minimizable", + "markdownDescription": "Enables the is_minimizable command without any pre-configured scope." + }, + { + "description": "Enables the is_minimized command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-minimized", + "markdownDescription": "Enables the is_minimized command without any pre-configured scope." + }, + { + "description": "Enables the is_resizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-resizable", + "markdownDescription": "Enables the is_resizable command without any pre-configured scope." + }, + { + "description": "Enables the is_visible command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-visible", + "markdownDescription": "Enables the is_visible command without any pre-configured scope." + }, + { + "description": "Enables the maximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-maximize", + "markdownDescription": "Enables the maximize command without any pre-configured scope." + }, + { + "description": "Enables the minimize command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-minimize", + "markdownDescription": "Enables the minimize command without any pre-configured scope." + }, + { + "description": "Enables the monitor_from_point command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-monitor-from-point", + "markdownDescription": "Enables the monitor_from_point command without any pre-configured scope." + }, + { + "description": "Enables the outer_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-outer-position", + "markdownDescription": "Enables the outer_position command without any pre-configured scope." + }, + { + "description": "Enables the outer_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-outer-size", + "markdownDescription": "Enables the outer_size command without any pre-configured scope." + }, + { + "description": "Enables the primary_monitor command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-primary-monitor", + "markdownDescription": "Enables the primary_monitor command without any pre-configured scope." + }, + { + "description": "Enables the request_user_attention command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-request-user-attention", + "markdownDescription": "Enables the request_user_attention command without any pre-configured scope." + }, + { + "description": "Enables the scale_factor command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-scale-factor", + "markdownDescription": "Enables the scale_factor command without any pre-configured scope." + }, + { + "description": "Enables the set_always_on_bottom command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-always-on-bottom", + "markdownDescription": "Enables the set_always_on_bottom command without any pre-configured scope." + }, + { + "description": "Enables the set_always_on_top command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-always-on-top", + "markdownDescription": "Enables the set_always_on_top command without any pre-configured scope." + }, + { + "description": "Enables the set_background_color command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-background-color", + "markdownDescription": "Enables the set_background_color command without any pre-configured scope." + }, + { + "description": "Enables the set_badge_count command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-badge-count", + "markdownDescription": "Enables the set_badge_count command without any pre-configured scope." + }, + { + "description": "Enables the set_badge_label command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-badge-label", + "markdownDescription": "Enables the set_badge_label command without any pre-configured scope." + }, + { + "description": "Enables the set_closable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-closable", + "markdownDescription": "Enables the set_closable command without any pre-configured scope." + }, + { + "description": "Enables the set_content_protected command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-content-protected", + "markdownDescription": "Enables the set_content_protected command without any pre-configured scope." + }, + { + "description": "Enables the set_cursor_grab command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-cursor-grab", + "markdownDescription": "Enables the set_cursor_grab command without any pre-configured scope." + }, + { + "description": "Enables the set_cursor_icon command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-cursor-icon", + "markdownDescription": "Enables the set_cursor_icon command without any pre-configured scope." + }, + { + "description": "Enables the set_cursor_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-cursor-position", + "markdownDescription": "Enables the set_cursor_position command without any pre-configured scope." + }, + { + "description": "Enables the set_cursor_visible command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-cursor-visible", + "markdownDescription": "Enables the set_cursor_visible command without any pre-configured scope." + }, + { + "description": "Enables the set_decorations command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-decorations", + "markdownDescription": "Enables the set_decorations command without any pre-configured scope." + }, + { + "description": "Enables the set_effects command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-effects", + "markdownDescription": "Enables the set_effects command without any pre-configured scope." + }, + { + "description": "Enables the set_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-enabled", + "markdownDescription": "Enables the set_enabled command without any pre-configured scope." + }, + { + "description": "Enables the set_focus command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-focus", + "markdownDescription": "Enables the set_focus command without any pre-configured scope." + }, + { + "description": "Enables the set_focusable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-focusable", + "markdownDescription": "Enables the set_focusable command without any pre-configured scope." + }, + { + "description": "Enables the set_fullscreen command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-fullscreen", + "markdownDescription": "Enables the set_fullscreen command without any pre-configured scope." + }, + { + "description": "Enables the set_icon command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-icon", + "markdownDescription": "Enables the set_icon command without any pre-configured scope." + }, + { + "description": "Enables the set_ignore_cursor_events command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-ignore-cursor-events", + "markdownDescription": "Enables the set_ignore_cursor_events command without any pre-configured scope." + }, + { + "description": "Enables the set_max_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-max-size", + "markdownDescription": "Enables the set_max_size command without any pre-configured scope." + }, + { + "description": "Enables the set_maximizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-maximizable", + "markdownDescription": "Enables the set_maximizable command without any pre-configured scope." + }, + { + "description": "Enables the set_min_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-min-size", + "markdownDescription": "Enables the set_min_size command without any pre-configured scope." + }, + { + "description": "Enables the set_minimizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-minimizable", + "markdownDescription": "Enables the set_minimizable command without any pre-configured scope." + }, + { + "description": "Enables the set_overlay_icon command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-overlay-icon", + "markdownDescription": "Enables the set_overlay_icon command without any pre-configured scope." + }, + { + "description": "Enables the set_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-position", + "markdownDescription": "Enables the set_position command without any pre-configured scope." + }, + { + "description": "Enables the set_progress_bar command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-progress-bar", + "markdownDescription": "Enables the set_progress_bar command without any pre-configured scope." + }, + { + "description": "Enables the set_resizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-resizable", + "markdownDescription": "Enables the set_resizable command without any pre-configured scope." + }, + { + "description": "Enables the set_shadow command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-shadow", + "markdownDescription": "Enables the set_shadow command without any pre-configured scope." + }, + { + "description": "Enables the set_simple_fullscreen command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-simple-fullscreen", + "markdownDescription": "Enables the set_simple_fullscreen command without any pre-configured scope." + }, + { + "description": "Enables the set_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-size", + "markdownDescription": "Enables the set_size command without any pre-configured scope." + }, + { + "description": "Enables the set_size_constraints command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-size-constraints", + "markdownDescription": "Enables the set_size_constraints command without any pre-configured scope." + }, + { + "description": "Enables the set_skip_taskbar command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-skip-taskbar", + "markdownDescription": "Enables the set_skip_taskbar command without any pre-configured scope." + }, + { + "description": "Enables the set_theme command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-theme", + "markdownDescription": "Enables the set_theme command without any pre-configured scope." + }, + { + "description": "Enables the set_title command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-title", + "markdownDescription": "Enables the set_title command without any pre-configured scope." + }, + { + "description": "Enables the set_title_bar_style command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-title-bar-style", + "markdownDescription": "Enables the set_title_bar_style command without any pre-configured scope." + }, + { + "description": "Enables the set_visible_on_all_workspaces command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-visible-on-all-workspaces", + "markdownDescription": "Enables the set_visible_on_all_workspaces command without any pre-configured scope." + }, + { + "description": "Enables the show command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-show", + "markdownDescription": "Enables the show command without any pre-configured scope." + }, + { + "description": "Enables the start_dragging command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-start-dragging", + "markdownDescription": "Enables the start_dragging command without any pre-configured scope." + }, + { + "description": "Enables the start_resize_dragging command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-start-resize-dragging", + "markdownDescription": "Enables the start_resize_dragging command without any pre-configured scope." + }, + { + "description": "Enables the theme command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-theme", + "markdownDescription": "Enables the theme command without any pre-configured scope." + }, + { + "description": "Enables the title command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-title", + "markdownDescription": "Enables the title command without any pre-configured scope." + }, + { + "description": "Enables the toggle_maximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-toggle-maximize", + "markdownDescription": "Enables the toggle_maximize command without any pre-configured scope." + }, + { + "description": "Enables the unmaximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-unmaximize", + "markdownDescription": "Enables the unmaximize command without any pre-configured scope." + }, + { + "description": "Enables the unminimize command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-unminimize", + "markdownDescription": "Enables the unminimize command without any pre-configured scope." + }, + { + "description": "Denies the available_monitors command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-available-monitors", + "markdownDescription": "Denies the available_monitors command without any pre-configured scope." + }, + { + "description": "Denies the center command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-center", + "markdownDescription": "Denies the center command without any pre-configured scope." + }, + { + "description": "Denies the close command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-close", + "markdownDescription": "Denies the close command without any pre-configured scope." + }, + { + "description": "Denies the create command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-create", + "markdownDescription": "Denies the create command without any pre-configured scope." + }, + { + "description": "Denies the current_monitor command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-current-monitor", + "markdownDescription": "Denies the current_monitor command without any pre-configured scope." + }, + { + "description": "Denies the cursor_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-cursor-position", + "markdownDescription": "Denies the cursor_position command without any pre-configured scope." + }, + { + "description": "Denies the destroy command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-destroy", + "markdownDescription": "Denies the destroy command without any pre-configured scope." + }, + { + "description": "Denies the get_all_windows command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-get-all-windows", + "markdownDescription": "Denies the get_all_windows command without any pre-configured scope." + }, + { + "description": "Denies the hide command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-hide", + "markdownDescription": "Denies the hide command without any pre-configured scope." + }, + { + "description": "Denies the inner_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-inner-position", + "markdownDescription": "Denies the inner_position command without any pre-configured scope." + }, + { + "description": "Denies the inner_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-inner-size", + "markdownDescription": "Denies the inner_size command without any pre-configured scope." + }, + { + "description": "Denies the internal_toggle_maximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-internal-toggle-maximize", + "markdownDescription": "Denies the internal_toggle_maximize command without any pre-configured scope." + }, + { + "description": "Denies the is_always_on_top command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-always-on-top", + "markdownDescription": "Denies the is_always_on_top command without any pre-configured scope." + }, + { + "description": "Denies the is_closable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-closable", + "markdownDescription": "Denies the is_closable command without any pre-configured scope." + }, + { + "description": "Denies the is_decorated command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-decorated", + "markdownDescription": "Denies the is_decorated command without any pre-configured scope." + }, + { + "description": "Denies the is_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-enabled", + "markdownDescription": "Denies the is_enabled command without any pre-configured scope." + }, + { + "description": "Denies the is_focused command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-focused", + "markdownDescription": "Denies the is_focused command without any pre-configured scope." + }, + { + "description": "Denies the is_fullscreen command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-fullscreen", + "markdownDescription": "Denies the is_fullscreen command without any pre-configured scope." + }, + { + "description": "Denies the is_maximizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-maximizable", + "markdownDescription": "Denies the is_maximizable command without any pre-configured scope." + }, + { + "description": "Denies the is_maximized command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-maximized", + "markdownDescription": "Denies the is_maximized command without any pre-configured scope." + }, + { + "description": "Denies the is_minimizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-minimizable", + "markdownDescription": "Denies the is_minimizable command without any pre-configured scope." + }, + { + "description": "Denies the is_minimized command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-minimized", + "markdownDescription": "Denies the is_minimized command without any pre-configured scope." + }, + { + "description": "Denies the is_resizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-resizable", + "markdownDescription": "Denies the is_resizable command without any pre-configured scope." + }, + { + "description": "Denies the is_visible command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-visible", + "markdownDescription": "Denies the is_visible command without any pre-configured scope." + }, + { + "description": "Denies the maximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-maximize", + "markdownDescription": "Denies the maximize command without any pre-configured scope." + }, + { + "description": "Denies the minimize command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-minimize", + "markdownDescription": "Denies the minimize command without any pre-configured scope." + }, + { + "description": "Denies the monitor_from_point command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-monitor-from-point", + "markdownDescription": "Denies the monitor_from_point command without any pre-configured scope." + }, + { + "description": "Denies the outer_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-outer-position", + "markdownDescription": "Denies the outer_position command without any pre-configured scope." + }, + { + "description": "Denies the outer_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-outer-size", + "markdownDescription": "Denies the outer_size command without any pre-configured scope." + }, + { + "description": "Denies the primary_monitor command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-primary-monitor", + "markdownDescription": "Denies the primary_monitor command without any pre-configured scope." + }, + { + "description": "Denies the request_user_attention command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-request-user-attention", + "markdownDescription": "Denies the request_user_attention command without any pre-configured scope." + }, + { + "description": "Denies the scale_factor command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-scale-factor", + "markdownDescription": "Denies the scale_factor command without any pre-configured scope." + }, + { + "description": "Denies the set_always_on_bottom command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-always-on-bottom", + "markdownDescription": "Denies the set_always_on_bottom command without any pre-configured scope." + }, + { + "description": "Denies the set_always_on_top command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-always-on-top", + "markdownDescription": "Denies the set_always_on_top command without any pre-configured scope." + }, + { + "description": "Denies the set_background_color command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-background-color", + "markdownDescription": "Denies the set_background_color command without any pre-configured scope." + }, + { + "description": "Denies the set_badge_count command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-badge-count", + "markdownDescription": "Denies the set_badge_count command without any pre-configured scope." + }, + { + "description": "Denies the set_badge_label command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-badge-label", + "markdownDescription": "Denies the set_badge_label command without any pre-configured scope." + }, + { + "description": "Denies the set_closable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-closable", + "markdownDescription": "Denies the set_closable command without any pre-configured scope." + }, + { + "description": "Denies the set_content_protected command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-content-protected", + "markdownDescription": "Denies the set_content_protected command without any pre-configured scope." + }, + { + "description": "Denies the set_cursor_grab command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-cursor-grab", + "markdownDescription": "Denies the set_cursor_grab command without any pre-configured scope." + }, + { + "description": "Denies the set_cursor_icon command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-cursor-icon", + "markdownDescription": "Denies the set_cursor_icon command without any pre-configured scope." + }, + { + "description": "Denies the set_cursor_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-cursor-position", + "markdownDescription": "Denies the set_cursor_position command without any pre-configured scope." + }, + { + "description": "Denies the set_cursor_visible command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-cursor-visible", + "markdownDescription": "Denies the set_cursor_visible command without any pre-configured scope." + }, + { + "description": "Denies the set_decorations command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-decorations", + "markdownDescription": "Denies the set_decorations command without any pre-configured scope." + }, + { + "description": "Denies the set_effects command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-effects", + "markdownDescription": "Denies the set_effects command without any pre-configured scope." + }, + { + "description": "Denies the set_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-enabled", + "markdownDescription": "Denies the set_enabled command without any pre-configured scope." + }, + { + "description": "Denies the set_focus command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-focus", + "markdownDescription": "Denies the set_focus command without any pre-configured scope." + }, + { + "description": "Denies the set_focusable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-focusable", + "markdownDescription": "Denies the set_focusable command without any pre-configured scope." + }, + { + "description": "Denies the set_fullscreen command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-fullscreen", + "markdownDescription": "Denies the set_fullscreen command without any pre-configured scope." + }, + { + "description": "Denies the set_icon command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-icon", + "markdownDescription": "Denies the set_icon command without any pre-configured scope." + }, + { + "description": "Denies the set_ignore_cursor_events command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-ignore-cursor-events", + "markdownDescription": "Denies the set_ignore_cursor_events command without any pre-configured scope." + }, + { + "description": "Denies the set_max_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-max-size", + "markdownDescription": "Denies the set_max_size command without any pre-configured scope." + }, + { + "description": "Denies the set_maximizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-maximizable", + "markdownDescription": "Denies the set_maximizable command without any pre-configured scope." + }, + { + "description": "Denies the set_min_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-min-size", + "markdownDescription": "Denies the set_min_size command without any pre-configured scope." + }, + { + "description": "Denies the set_minimizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-minimizable", + "markdownDescription": "Denies the set_minimizable command without any pre-configured scope." + }, + { + "description": "Denies the set_overlay_icon command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-overlay-icon", + "markdownDescription": "Denies the set_overlay_icon command without any pre-configured scope." + }, + { + "description": "Denies the set_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-position", + "markdownDescription": "Denies the set_position command without any pre-configured scope." + }, + { + "description": "Denies the set_progress_bar command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-progress-bar", + "markdownDescription": "Denies the set_progress_bar command without any pre-configured scope." + }, + { + "description": "Denies the set_resizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-resizable", + "markdownDescription": "Denies the set_resizable command without any pre-configured scope." + }, + { + "description": "Denies the set_shadow command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-shadow", + "markdownDescription": "Denies the set_shadow command without any pre-configured scope." + }, + { + "description": "Denies the set_simple_fullscreen command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-simple-fullscreen", + "markdownDescription": "Denies the set_simple_fullscreen command without any pre-configured scope." + }, + { + "description": "Denies the set_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-size", + "markdownDescription": "Denies the set_size command without any pre-configured scope." + }, + { + "description": "Denies the set_size_constraints command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-size-constraints", + "markdownDescription": "Denies the set_size_constraints command without any pre-configured scope." + }, + { + "description": "Denies the set_skip_taskbar command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-skip-taskbar", + "markdownDescription": "Denies the set_skip_taskbar command without any pre-configured scope." + }, + { + "description": "Denies the set_theme command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-theme", + "markdownDescription": "Denies the set_theme command without any pre-configured scope." + }, + { + "description": "Denies the set_title command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-title", + "markdownDescription": "Denies the set_title command without any pre-configured scope." + }, + { + "description": "Denies the set_title_bar_style command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-title-bar-style", + "markdownDescription": "Denies the set_title_bar_style command without any pre-configured scope." + }, + { + "description": "Denies the set_visible_on_all_workspaces command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-visible-on-all-workspaces", + "markdownDescription": "Denies the set_visible_on_all_workspaces command without any pre-configured scope." + }, + { + "description": "Denies the show command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-show", + "markdownDescription": "Denies the show command without any pre-configured scope." + }, + { + "description": "Denies the start_dragging command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-start-dragging", + "markdownDescription": "Denies the start_dragging command without any pre-configured scope." + }, + { + "description": "Denies the start_resize_dragging command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-start-resize-dragging", + "markdownDescription": "Denies the start_resize_dragging command without any pre-configured scope." + }, + { + "description": "Denies the theme command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-theme", + "markdownDescription": "Denies the theme command without any pre-configured scope." + }, + { + "description": "Denies the title command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-title", + "markdownDescription": "Denies the title command without any pre-configured scope." + }, + { + "description": "Denies the toggle_maximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-toggle-maximize", + "markdownDescription": "Denies the toggle_maximize command without any pre-configured scope." + }, + { + "description": "Denies the unmaximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-unmaximize", + "markdownDescription": "Denies the unmaximize command without any pre-configured scope." + }, + { + "description": "Denies the unminimize command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-unminimize", + "markdownDescription": "Denies the unminimize command without any pre-configured scope." + }, + { + "description": "This permission set configures which\nshell functionality is exposed by default.\n\n#### Granted Permissions\n\nIt allows to use the `open` functionality with a reasonable\nscope pre-configured. It will allow opening `http(s)://`,\n`tel:` and `mailto:` links.\n\n#### This default permission set includes:\n\n- `allow-open`", + "type": "string", + "const": "shell:default", + "markdownDescription": "This permission set configures which\nshell functionality is exposed by default.\n\n#### Granted Permissions\n\nIt allows to use the `open` functionality with a reasonable\nscope pre-configured. It will allow opening `http(s)://`,\n`tel:` and `mailto:` links.\n\n#### This default permission set includes:\n\n- `allow-open`" + }, + { + "description": "Enables the execute command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-execute", + "markdownDescription": "Enables the execute command without any pre-configured scope." + }, + { + "description": "Enables the kill command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-kill", + "markdownDescription": "Enables the kill command without any pre-configured scope." + }, + { + "description": "Enables the open command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-open", + "markdownDescription": "Enables the open command without any pre-configured scope." + }, + { + "description": "Enables the spawn command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-spawn", + "markdownDescription": "Enables the spawn command without any pre-configured scope." + }, + { + "description": "Enables the stdin_write command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-stdin-write", + "markdownDescription": "Enables the stdin_write command without any pre-configured scope." + }, + { + "description": "Denies the execute command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-execute", + "markdownDescription": "Denies the execute command without any pre-configured scope." + }, + { + "description": "Denies the kill command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-kill", + "markdownDescription": "Denies the kill command without any pre-configured scope." + }, + { + "description": "Denies the open command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-open", + "markdownDescription": "Denies the open command without any pre-configured scope." + }, + { + "description": "Denies the spawn command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-spawn", + "markdownDescription": "Denies the spawn command without any pre-configured scope." + }, + { + "description": "Denies the stdin_write command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-stdin-write", + "markdownDescription": "Denies the stdin_write command without any pre-configured scope." + } + ] + }, + "Value": { + "description": "All supported ACL values.", + "anyOf": [ + { + "description": "Represents a null JSON value.", + "type": "null" + }, + { + "description": "Represents a [`bool`].", + "type": "boolean" + }, + { + "description": "Represents a valid ACL [`Number`].", + "allOf": [ + { + "$ref": "#/definitions/Number" + } + ] + }, + { + "description": "Represents a [`String`].", + "type": "string" + }, + { + "description": "Represents a list of other [`Value`]s.", + "type": "array", + "items": { + "$ref": "#/definitions/Value" + } + }, + { + "description": "Represents a map of [`String`] keys to [`Value`]s.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Value" + } + } + ] + }, + "Number": { + "description": "A valid ACL number.", + "anyOf": [ + { + "description": "Represents an [`i64`].", + "type": "integer", + "format": "int64" + }, + { + "description": "Represents a [`f64`].", + "type": "number", + "format": "double" + } + ] + }, + "Target": { + "description": "Platform target.", + "oneOf": [ + { + "description": "MacOS.", + "type": "string", + "enum": [ + "macOS" + ] + }, + { + "description": "Windows.", + "type": "string", + "enum": [ + "windows" + ] + }, + { + "description": "Linux.", + "type": "string", + "enum": [ + "linux" + ] + }, + { + "description": "Android.", + "type": "string", + "enum": [ + "android" + ] + }, + { + "description": "iOS.", + "type": "string", + "enum": [ + "iOS" + ] + } + ] + }, + "ShellScopeEntryAllowedArg": { + "description": "A command argument allowed to be executed by the webview API.", + "anyOf": [ + { + "description": "A non-configurable argument that is passed to the command in the order it was specified.", + "type": "string" + }, + { + "description": "A variable that is set while calling the command from the webview API.", + "type": "object", + "required": [ + "validator" + ], + "properties": { + "raw": { + "description": "Marks the validator as a raw regex, meaning the plugin should not make any modification at runtime.\n\nThis means the regex will not match on the entire string by default, which might be exploited if your regex allow unexpected input to be considered valid. When using this option, make sure your regex is correct.", + "default": false, + "type": "boolean" + }, + "validator": { + "description": "[regex] validator to require passed values to conform to an expected input.\n\nThis will require the argument value passed to this variable to match the `validator` regex before it will be executed.\n\nThe regex string is by default surrounded by `^...$` to match the full string. For example the `https?://\\w+` regex would be registered as `^https?://\\w+$`.\n\n[regex]: ", + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "ShellScopeEntryAllowedArgs": { + "description": "A set of command arguments allowed to be executed by the webview API.\n\nA value of `true` will allow any arguments to be passed to the command. `false` will disable all arguments. A list of [`ShellScopeEntryAllowedArg`] will set those arguments as the only valid arguments to be passed to the attached command configuration.", + "anyOf": [ + { + "description": "Use a simple boolean to allow all or disable all arguments to this command configuration.", + "type": "boolean" + }, + { + "description": "A specific set of [`ShellScopeEntryAllowedArg`] that are valid to call for the command configuration.", + "type": "array", + "items": { + "$ref": "#/definitions/ShellScopeEntryAllowedArg" + } + } + ] + } + } +} \ No newline at end of file diff --git a/frontend/src-tauri/gen/schemas/windows-schema.json b/frontend/src-tauri/gen/schemas/windows-schema.json new file mode 100644 index 000000000..f827fe175 --- /dev/null +++ b/frontend/src-tauri/gen/schemas/windows-schema.json @@ -0,0 +1,2564 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "CapabilityFile", + "description": "Capability formats accepted in a capability file.", + "anyOf": [ + { + "description": "A single capability.", + "allOf": [ + { + "$ref": "#/definitions/Capability" + } + ] + }, + { + "description": "A list of capabilities.", + "type": "array", + "items": { + "$ref": "#/definitions/Capability" + } + }, + { + "description": "A list of capabilities.", + "type": "object", + "required": [ + "capabilities" + ], + "properties": { + "capabilities": { + "description": "The list of capabilities.", + "type": "array", + "items": { + "$ref": "#/definitions/Capability" + } + } + } + } + ], + "definitions": { + "Capability": { + "description": "A grouping and boundary mechanism developers can use to isolate access to the IPC layer.\n\nIt controls application windows' and webviews' fine grained access to the Tauri core, application, or plugin commands. If a webview or its window is not matching any capability then it has no access to the IPC layer at all.\n\nThis can be done to create groups of windows, based on their required system access, which can reduce impact of frontend vulnerabilities in less privileged windows. Windows can be added to a capability by exact name (e.g. `main-window`) or glob patterns like `*` or `admin-*`. A Window can have none, one, or multiple associated capabilities.\n\n## Example\n\n```json { \"identifier\": \"main-user-files-write\", \"description\": \"This capability allows the `main` window on macOS and Windows access to `filesystem` write related commands and `dialog` commands to enable programmatic access to files selected by the user.\", \"windows\": [ \"main\" ], \"permissions\": [ \"core:default\", \"dialog:open\", { \"identifier\": \"fs:allow-write-text-file\", \"allow\": [{ \"path\": \"$HOME/test.txt\" }] }, ], \"platforms\": [\"macOS\",\"windows\"] } ```", + "type": "object", + "required": [ + "identifier", + "permissions" + ], + "properties": { + "identifier": { + "description": "Identifier of the capability.\n\n## Example\n\n`main-user-files-write`", + "type": "string" + }, + "description": { + "description": "Description of what the capability is intended to allow on associated windows.\n\nIt should contain a description of what the grouped permissions should allow.\n\n## Example\n\nThis capability allows the `main` window access to `filesystem` write related commands and `dialog` commands to enable programmatic access to files selected by the user.", + "default": "", + "type": "string" + }, + "remote": { + "description": "Configure remote URLs that can use the capability permissions.\n\nThis setting is optional and defaults to not being set, as our default use case is that the content is served from our local application.\n\n:::caution Make sure you understand the security implications of providing remote sources with local system access. :::\n\n## Example\n\n```json { \"urls\": [\"https://*.mydomain.dev\"] } ```", + "anyOf": [ + { + "$ref": "#/definitions/CapabilityRemote" + }, + { + "type": "null" + } + ] + }, + "local": { + "description": "Whether this capability is enabled for local app URLs or not. Defaults to `true`.", + "default": true, + "type": "boolean" + }, + "windows": { + "description": "List of windows that are affected by this capability. Can be a glob pattern.\n\nIf a window label matches any of the patterns in this list, the capability will be enabled on all the webviews of that window, regardless of the value of [`Self::webviews`].\n\nOn multiwebview windows, prefer specifying [`Self::webviews`] and omitting [`Self::windows`] for a fine grained access control.\n\n## Example\n\n`[\"main\"]`", + "type": "array", + "items": { + "type": "string" + } + }, + "webviews": { + "description": "List of webviews that are affected by this capability. Can be a glob pattern.\n\nThe capability will be enabled on all the webviews whose label matches any of the patterns in this list, regardless of whether the webview's window label matches a pattern in [`Self::windows`].\n\n## Example\n\n`[\"sub-webview-one\", \"sub-webview-two\"]`", + "type": "array", + "items": { + "type": "string" + } + }, + "permissions": { + "description": "List of permissions attached to this capability.\n\nMust include the plugin name as prefix in the form of `${plugin-name}:${permission-name}`. For commands directly implemented in the application itself only `${permission-name}` is required.\n\n## Example\n\n```json [ \"core:default\", \"shell:allow-open\", \"dialog:open\", { \"identifier\": \"fs:allow-write-text-file\", \"allow\": [{ \"path\": \"$HOME/test.txt\" }] } ] ```", + "type": "array", + "items": { + "$ref": "#/definitions/PermissionEntry" + }, + "uniqueItems": true + }, + "platforms": { + "description": "Limit which target platforms this capability applies to.\n\nBy default all platforms are targeted.\n\n## Example\n\n`[\"macOS\",\"windows\"]`", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/Target" + } + } + } + }, + "CapabilityRemote": { + "description": "Configuration for remote URLs that are associated with the capability.", + "type": "object", + "required": [ + "urls" + ], + "properties": { + "urls": { + "description": "Remote domains this capability refers to using the [URLPattern standard](https://urlpattern.spec.whatwg.org/).\n\n## Examples\n\n- \"https://*.mydomain.dev\": allows subdomains of mydomain.dev - \"https://mydomain.dev/api/*\": allows any subpath of mydomain.dev/api", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "PermissionEntry": { + "description": "An entry for a permission value in a [`Capability`] can be either a raw permission [`Identifier`] or an object that references a permission and extends its scope.", + "anyOf": [ + { + "description": "Reference a permission or permission set by identifier.", + "allOf": [ + { + "$ref": "#/definitions/Identifier" + } + ] + }, + { + "description": "Reference a permission or permission set by identifier and extends its scope.", + "type": "object", + "allOf": [ + { + "if": { + "properties": { + "identifier": { + "anyOf": [ + { + "description": "This permission set configures which\nshell functionality is exposed by default.\n\n#### Granted Permissions\n\nIt allows to use the `open` functionality with a reasonable\nscope pre-configured. It will allow opening `http(s)://`,\n`tel:` and `mailto:` links.\n\n#### This default permission set includes:\n\n- `allow-open`", + "type": "string", + "const": "shell:default", + "markdownDescription": "This permission set configures which\nshell functionality is exposed by default.\n\n#### Granted Permissions\n\nIt allows to use the `open` functionality with a reasonable\nscope pre-configured. It will allow opening `http(s)://`,\n`tel:` and `mailto:` links.\n\n#### This default permission set includes:\n\n- `allow-open`" + }, + { + "description": "Enables the execute command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-execute", + "markdownDescription": "Enables the execute command without any pre-configured scope." + }, + { + "description": "Enables the kill command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-kill", + "markdownDescription": "Enables the kill command without any pre-configured scope." + }, + { + "description": "Enables the open command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-open", + "markdownDescription": "Enables the open command without any pre-configured scope." + }, + { + "description": "Enables the spawn command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-spawn", + "markdownDescription": "Enables the spawn command without any pre-configured scope." + }, + { + "description": "Enables the stdin_write command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-stdin-write", + "markdownDescription": "Enables the stdin_write command without any pre-configured scope." + }, + { + "description": "Denies the execute command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-execute", + "markdownDescription": "Denies the execute command without any pre-configured scope." + }, + { + "description": "Denies the kill command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-kill", + "markdownDescription": "Denies the kill command without any pre-configured scope." + }, + { + "description": "Denies the open command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-open", + "markdownDescription": "Denies the open command without any pre-configured scope." + }, + { + "description": "Denies the spawn command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-spawn", + "markdownDescription": "Denies the spawn command without any pre-configured scope." + }, + { + "description": "Denies the stdin_write command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-stdin-write", + "markdownDescription": "Denies the stdin_write command without any pre-configured scope." + } + ] + } + } + }, + "then": { + "properties": { + "allow": { + "items": { + "title": "ShellScopeEntry", + "description": "Shell scope entry.", + "anyOf": [ + { + "type": "object", + "required": [ + "cmd", + "name" + ], + "properties": { + "args": { + "description": "The allowed arguments for the command execution.", + "allOf": [ + { + "$ref": "#/definitions/ShellScopeEntryAllowedArgs" + } + ] + }, + "cmd": { + "description": "The command name. It can start with a variable that resolves to a system base directory. The variables are: `$AUDIO`, `$CACHE`, `$CONFIG`, `$DATA`, `$LOCALDATA`, `$DESKTOP`, `$DOCUMENT`, `$DOWNLOAD`, `$EXE`, `$FONT`, `$HOME`, `$PICTURE`, `$PUBLIC`, `$RUNTIME`, `$TEMPLATE`, `$VIDEO`, `$RESOURCE`, `$LOG`, `$TEMP`, `$APPCONFIG`, `$APPDATA`, `$APPLOCALDATA`, `$APPCACHE`, `$APPLOG`.", + "type": "string" + }, + "name": { + "description": "The name for this allowed shell command configuration.\n\nThis name will be used inside of the webview API to call this command along with any specified arguments.", + "type": "string" + } + }, + "additionalProperties": false + }, + { + "type": "object", + "required": [ + "name", + "sidecar" + ], + "properties": { + "args": { + "description": "The allowed arguments for the command execution.", + "allOf": [ + { + "$ref": "#/definitions/ShellScopeEntryAllowedArgs" + } + ] + }, + "name": { + "description": "The name for this allowed shell command configuration.\n\nThis name will be used inside of the webview API to call this command along with any specified arguments.", + "type": "string" + }, + "sidecar": { + "description": "If this command is a sidecar command.", + "type": "boolean" + } + }, + "additionalProperties": false + } + ] + } + }, + "deny": { + "items": { + "title": "ShellScopeEntry", + "description": "Shell scope entry.", + "anyOf": [ + { + "type": "object", + "required": [ + "cmd", + "name" + ], + "properties": { + "args": { + "description": "The allowed arguments for the command execution.", + "allOf": [ + { + "$ref": "#/definitions/ShellScopeEntryAllowedArgs" + } + ] + }, + "cmd": { + "description": "The command name. It can start with a variable that resolves to a system base directory. The variables are: `$AUDIO`, `$CACHE`, `$CONFIG`, `$DATA`, `$LOCALDATA`, `$DESKTOP`, `$DOCUMENT`, `$DOWNLOAD`, `$EXE`, `$FONT`, `$HOME`, `$PICTURE`, `$PUBLIC`, `$RUNTIME`, `$TEMPLATE`, `$VIDEO`, `$RESOURCE`, `$LOG`, `$TEMP`, `$APPCONFIG`, `$APPDATA`, `$APPLOCALDATA`, `$APPCACHE`, `$APPLOG`.", + "type": "string" + }, + "name": { + "description": "The name for this allowed shell command configuration.\n\nThis name will be used inside of the webview API to call this command along with any specified arguments.", + "type": "string" + } + }, + "additionalProperties": false + }, + { + "type": "object", + "required": [ + "name", + "sidecar" + ], + "properties": { + "args": { + "description": "The allowed arguments for the command execution.", + "allOf": [ + { + "$ref": "#/definitions/ShellScopeEntryAllowedArgs" + } + ] + }, + "name": { + "description": "The name for this allowed shell command configuration.\n\nThis name will be used inside of the webview API to call this command along with any specified arguments.", + "type": "string" + }, + "sidecar": { + "description": "If this command is a sidecar command.", + "type": "boolean" + } + }, + "additionalProperties": false + } + ] + } + } + } + }, + "properties": { + "identifier": { + "description": "Identifier of the permission or permission set.", + "allOf": [ + { + "$ref": "#/definitions/Identifier" + } + ] + } + } + }, + { + "properties": { + "identifier": { + "description": "Identifier of the permission or permission set.", + "allOf": [ + { + "$ref": "#/definitions/Identifier" + } + ] + }, + "allow": { + "description": "Data that defines what is allowed by the scope.", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/Value" + } + }, + "deny": { + "description": "Data that defines what is denied by the scope. This should be prioritized by validation logic.", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/Value" + } + } + } + } + ], + "required": [ + "identifier" + ] + } + ] + }, + "Identifier": { + "description": "Permission identifier", + "oneOf": [ + { + "description": "Default core plugins set.\n#### This default permission set includes:\n\n- `core:path:default`\n- `core:event:default`\n- `core:window:default`\n- `core:webview:default`\n- `core:app:default`\n- `core:image:default`\n- `core:resources:default`\n- `core:menu:default`\n- `core:tray:default`", + "type": "string", + "const": "core:default", + "markdownDescription": "Default core plugins set.\n#### This default permission set includes:\n\n- `core:path:default`\n- `core:event:default`\n- `core:window:default`\n- `core:webview:default`\n- `core:app:default`\n- `core:image:default`\n- `core:resources:default`\n- `core:menu:default`\n- `core:tray:default`" + }, + { + "description": "Default permissions for the plugin.\n#### This default permission set includes:\n\n- `allow-version`\n- `allow-name`\n- `allow-tauri-version`\n- `allow-identifier`\n- `allow-bundle-type`\n- `allow-register-listener`\n- `allow-remove-listener`", + "type": "string", + "const": "core:app:default", + "markdownDescription": "Default permissions for the plugin.\n#### This default permission set includes:\n\n- `allow-version`\n- `allow-name`\n- `allow-tauri-version`\n- `allow-identifier`\n- `allow-bundle-type`\n- `allow-register-listener`\n- `allow-remove-listener`" + }, + { + "description": "Enables the app_hide command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-app-hide", + "markdownDescription": "Enables the app_hide command without any pre-configured scope." + }, + { + "description": "Enables the app_show command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-app-show", + "markdownDescription": "Enables the app_show command without any pre-configured scope." + }, + { + "description": "Enables the bundle_type command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-bundle-type", + "markdownDescription": "Enables the bundle_type command without any pre-configured scope." + }, + { + "description": "Enables the default_window_icon command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-default-window-icon", + "markdownDescription": "Enables the default_window_icon command without any pre-configured scope." + }, + { + "description": "Enables the fetch_data_store_identifiers command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-fetch-data-store-identifiers", + "markdownDescription": "Enables the fetch_data_store_identifiers command without any pre-configured scope." + }, + { + "description": "Enables the identifier command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-identifier", + "markdownDescription": "Enables the identifier command without any pre-configured scope." + }, + { + "description": "Enables the name command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-name", + "markdownDescription": "Enables the name command without any pre-configured scope." + }, + { + "description": "Enables the register_listener command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-register-listener", + "markdownDescription": "Enables the register_listener command without any pre-configured scope." + }, + { + "description": "Enables the remove_data_store command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-remove-data-store", + "markdownDescription": "Enables the remove_data_store command without any pre-configured scope." + }, + { + "description": "Enables the remove_listener command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-remove-listener", + "markdownDescription": "Enables the remove_listener command without any pre-configured scope." + }, + { + "description": "Enables the set_app_theme command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-set-app-theme", + "markdownDescription": "Enables the set_app_theme command without any pre-configured scope." + }, + { + "description": "Enables the set_dock_visibility command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-set-dock-visibility", + "markdownDescription": "Enables the set_dock_visibility command without any pre-configured scope." + }, + { + "description": "Enables the tauri_version command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-tauri-version", + "markdownDescription": "Enables the tauri_version command without any pre-configured scope." + }, + { + "description": "Enables the version command without any pre-configured scope.", + "type": "string", + "const": "core:app:allow-version", + "markdownDescription": "Enables the version command without any pre-configured scope." + }, + { + "description": "Denies the app_hide command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-app-hide", + "markdownDescription": "Denies the app_hide command without any pre-configured scope." + }, + { + "description": "Denies the app_show command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-app-show", + "markdownDescription": "Denies the app_show command without any pre-configured scope." + }, + { + "description": "Denies the bundle_type command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-bundle-type", + "markdownDescription": "Denies the bundle_type command without any pre-configured scope." + }, + { + "description": "Denies the default_window_icon command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-default-window-icon", + "markdownDescription": "Denies the default_window_icon command without any pre-configured scope." + }, + { + "description": "Denies the fetch_data_store_identifiers command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-fetch-data-store-identifiers", + "markdownDescription": "Denies the fetch_data_store_identifiers command without any pre-configured scope." + }, + { + "description": "Denies the identifier command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-identifier", + "markdownDescription": "Denies the identifier command without any pre-configured scope." + }, + { + "description": "Denies the name command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-name", + "markdownDescription": "Denies the name command without any pre-configured scope." + }, + { + "description": "Denies the register_listener command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-register-listener", + "markdownDescription": "Denies the register_listener command without any pre-configured scope." + }, + { + "description": "Denies the remove_data_store command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-remove-data-store", + "markdownDescription": "Denies the remove_data_store command without any pre-configured scope." + }, + { + "description": "Denies the remove_listener command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-remove-listener", + "markdownDescription": "Denies the remove_listener command without any pre-configured scope." + }, + { + "description": "Denies the set_app_theme command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-set-app-theme", + "markdownDescription": "Denies the set_app_theme command without any pre-configured scope." + }, + { + "description": "Denies the set_dock_visibility command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-set-dock-visibility", + "markdownDescription": "Denies the set_dock_visibility command without any pre-configured scope." + }, + { + "description": "Denies the tauri_version command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-tauri-version", + "markdownDescription": "Denies the tauri_version command without any pre-configured scope." + }, + { + "description": "Denies the version command without any pre-configured scope.", + "type": "string", + "const": "core:app:deny-version", + "markdownDescription": "Denies the version command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-listen`\n- `allow-unlisten`\n- `allow-emit`\n- `allow-emit-to`", + "type": "string", + "const": "core:event:default", + "markdownDescription": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-listen`\n- `allow-unlisten`\n- `allow-emit`\n- `allow-emit-to`" + }, + { + "description": "Enables the emit command without any pre-configured scope.", + "type": "string", + "const": "core:event:allow-emit", + "markdownDescription": "Enables the emit command without any pre-configured scope." + }, + { + "description": "Enables the emit_to command without any pre-configured scope.", + "type": "string", + "const": "core:event:allow-emit-to", + "markdownDescription": "Enables the emit_to command without any pre-configured scope." + }, + { + "description": "Enables the listen command without any pre-configured scope.", + "type": "string", + "const": "core:event:allow-listen", + "markdownDescription": "Enables the listen command without any pre-configured scope." + }, + { + "description": "Enables the unlisten command without any pre-configured scope.", + "type": "string", + "const": "core:event:allow-unlisten", + "markdownDescription": "Enables the unlisten command without any pre-configured scope." + }, + { + "description": "Denies the emit command without any pre-configured scope.", + "type": "string", + "const": "core:event:deny-emit", + "markdownDescription": "Denies the emit command without any pre-configured scope." + }, + { + "description": "Denies the emit_to command without any pre-configured scope.", + "type": "string", + "const": "core:event:deny-emit-to", + "markdownDescription": "Denies the emit_to command without any pre-configured scope." + }, + { + "description": "Denies the listen command without any pre-configured scope.", + "type": "string", + "const": "core:event:deny-listen", + "markdownDescription": "Denies the listen command without any pre-configured scope." + }, + { + "description": "Denies the unlisten command without any pre-configured scope.", + "type": "string", + "const": "core:event:deny-unlisten", + "markdownDescription": "Denies the unlisten command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-new`\n- `allow-from-bytes`\n- `allow-from-path`\n- `allow-rgba`\n- `allow-size`", + "type": "string", + "const": "core:image:default", + "markdownDescription": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-new`\n- `allow-from-bytes`\n- `allow-from-path`\n- `allow-rgba`\n- `allow-size`" + }, + { + "description": "Enables the from_bytes command without any pre-configured scope.", + "type": "string", + "const": "core:image:allow-from-bytes", + "markdownDescription": "Enables the from_bytes command without any pre-configured scope." + }, + { + "description": "Enables the from_path command without any pre-configured scope.", + "type": "string", + "const": "core:image:allow-from-path", + "markdownDescription": "Enables the from_path command without any pre-configured scope." + }, + { + "description": "Enables the new command without any pre-configured scope.", + "type": "string", + "const": "core:image:allow-new", + "markdownDescription": "Enables the new command without any pre-configured scope." + }, + { + "description": "Enables the rgba command without any pre-configured scope.", + "type": "string", + "const": "core:image:allow-rgba", + "markdownDescription": "Enables the rgba command without any pre-configured scope." + }, + { + "description": "Enables the size command without any pre-configured scope.", + "type": "string", + "const": "core:image:allow-size", + "markdownDescription": "Enables the size command without any pre-configured scope." + }, + { + "description": "Denies the from_bytes command without any pre-configured scope.", + "type": "string", + "const": "core:image:deny-from-bytes", + "markdownDescription": "Denies the from_bytes command without any pre-configured scope." + }, + { + "description": "Denies the from_path command without any pre-configured scope.", + "type": "string", + "const": "core:image:deny-from-path", + "markdownDescription": "Denies the from_path command without any pre-configured scope." + }, + { + "description": "Denies the new command without any pre-configured scope.", + "type": "string", + "const": "core:image:deny-new", + "markdownDescription": "Denies the new command without any pre-configured scope." + }, + { + "description": "Denies the rgba command without any pre-configured scope.", + "type": "string", + "const": "core:image:deny-rgba", + "markdownDescription": "Denies the rgba command without any pre-configured scope." + }, + { + "description": "Denies the size command without any pre-configured scope.", + "type": "string", + "const": "core:image:deny-size", + "markdownDescription": "Denies the size command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-new`\n- `allow-append`\n- `allow-prepend`\n- `allow-insert`\n- `allow-remove`\n- `allow-remove-at`\n- `allow-items`\n- `allow-get`\n- `allow-popup`\n- `allow-create-default`\n- `allow-set-as-app-menu`\n- `allow-set-as-window-menu`\n- `allow-text`\n- `allow-set-text`\n- `allow-is-enabled`\n- `allow-set-enabled`\n- `allow-set-accelerator`\n- `allow-set-as-windows-menu-for-nsapp`\n- `allow-set-as-help-menu-for-nsapp`\n- `allow-is-checked`\n- `allow-set-checked`\n- `allow-set-icon`", + "type": "string", + "const": "core:menu:default", + "markdownDescription": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-new`\n- `allow-append`\n- `allow-prepend`\n- `allow-insert`\n- `allow-remove`\n- `allow-remove-at`\n- `allow-items`\n- `allow-get`\n- `allow-popup`\n- `allow-create-default`\n- `allow-set-as-app-menu`\n- `allow-set-as-window-menu`\n- `allow-text`\n- `allow-set-text`\n- `allow-is-enabled`\n- `allow-set-enabled`\n- `allow-set-accelerator`\n- `allow-set-as-windows-menu-for-nsapp`\n- `allow-set-as-help-menu-for-nsapp`\n- `allow-is-checked`\n- `allow-set-checked`\n- `allow-set-icon`" + }, + { + "description": "Enables the append command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-append", + "markdownDescription": "Enables the append command without any pre-configured scope." + }, + { + "description": "Enables the create_default command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-create-default", + "markdownDescription": "Enables the create_default command without any pre-configured scope." + }, + { + "description": "Enables the get command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-get", + "markdownDescription": "Enables the get command without any pre-configured scope." + }, + { + "description": "Enables the insert command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-insert", + "markdownDescription": "Enables the insert command without any pre-configured scope." + }, + { + "description": "Enables the is_checked command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-is-checked", + "markdownDescription": "Enables the is_checked command without any pre-configured scope." + }, + { + "description": "Enables the is_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-is-enabled", + "markdownDescription": "Enables the is_enabled command without any pre-configured scope." + }, + { + "description": "Enables the items command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-items", + "markdownDescription": "Enables the items command without any pre-configured scope." + }, + { + "description": "Enables the new command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-new", + "markdownDescription": "Enables the new command without any pre-configured scope." + }, + { + "description": "Enables the popup command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-popup", + "markdownDescription": "Enables the popup command without any pre-configured scope." + }, + { + "description": "Enables the prepend command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-prepend", + "markdownDescription": "Enables the prepend command without any pre-configured scope." + }, + { + "description": "Enables the remove command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-remove", + "markdownDescription": "Enables the remove command without any pre-configured scope." + }, + { + "description": "Enables the remove_at command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-remove-at", + "markdownDescription": "Enables the remove_at command without any pre-configured scope." + }, + { + "description": "Enables the set_accelerator command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-accelerator", + "markdownDescription": "Enables the set_accelerator command without any pre-configured scope." + }, + { + "description": "Enables the set_as_app_menu command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-as-app-menu", + "markdownDescription": "Enables the set_as_app_menu command without any pre-configured scope." + }, + { + "description": "Enables the set_as_help_menu_for_nsapp command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-as-help-menu-for-nsapp", + "markdownDescription": "Enables the set_as_help_menu_for_nsapp command without any pre-configured scope." + }, + { + "description": "Enables the set_as_window_menu command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-as-window-menu", + "markdownDescription": "Enables the set_as_window_menu command without any pre-configured scope." + }, + { + "description": "Enables the set_as_windows_menu_for_nsapp command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-as-windows-menu-for-nsapp", + "markdownDescription": "Enables the set_as_windows_menu_for_nsapp command without any pre-configured scope." + }, + { + "description": "Enables the set_checked command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-checked", + "markdownDescription": "Enables the set_checked command without any pre-configured scope." + }, + { + "description": "Enables the set_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-enabled", + "markdownDescription": "Enables the set_enabled command without any pre-configured scope." + }, + { + "description": "Enables the set_icon command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-icon", + "markdownDescription": "Enables the set_icon command without any pre-configured scope." + }, + { + "description": "Enables the set_text command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-set-text", + "markdownDescription": "Enables the set_text command without any pre-configured scope." + }, + { + "description": "Enables the text command without any pre-configured scope.", + "type": "string", + "const": "core:menu:allow-text", + "markdownDescription": "Enables the text command without any pre-configured scope." + }, + { + "description": "Denies the append command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-append", + "markdownDescription": "Denies the append command without any pre-configured scope." + }, + { + "description": "Denies the create_default command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-create-default", + "markdownDescription": "Denies the create_default command without any pre-configured scope." + }, + { + "description": "Denies the get command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-get", + "markdownDescription": "Denies the get command without any pre-configured scope." + }, + { + "description": "Denies the insert command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-insert", + "markdownDescription": "Denies the insert command without any pre-configured scope." + }, + { + "description": "Denies the is_checked command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-is-checked", + "markdownDescription": "Denies the is_checked command without any pre-configured scope." + }, + { + "description": "Denies the is_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-is-enabled", + "markdownDescription": "Denies the is_enabled command without any pre-configured scope." + }, + { + "description": "Denies the items command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-items", + "markdownDescription": "Denies the items command without any pre-configured scope." + }, + { + "description": "Denies the new command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-new", + "markdownDescription": "Denies the new command without any pre-configured scope." + }, + { + "description": "Denies the popup command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-popup", + "markdownDescription": "Denies the popup command without any pre-configured scope." + }, + { + "description": "Denies the prepend command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-prepend", + "markdownDescription": "Denies the prepend command without any pre-configured scope." + }, + { + "description": "Denies the remove command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-remove", + "markdownDescription": "Denies the remove command without any pre-configured scope." + }, + { + "description": "Denies the remove_at command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-remove-at", + "markdownDescription": "Denies the remove_at command without any pre-configured scope." + }, + { + "description": "Denies the set_accelerator command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-accelerator", + "markdownDescription": "Denies the set_accelerator command without any pre-configured scope." + }, + { + "description": "Denies the set_as_app_menu command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-as-app-menu", + "markdownDescription": "Denies the set_as_app_menu command without any pre-configured scope." + }, + { + "description": "Denies the set_as_help_menu_for_nsapp command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-as-help-menu-for-nsapp", + "markdownDescription": "Denies the set_as_help_menu_for_nsapp command without any pre-configured scope." + }, + { + "description": "Denies the set_as_window_menu command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-as-window-menu", + "markdownDescription": "Denies the set_as_window_menu command without any pre-configured scope." + }, + { + "description": "Denies the set_as_windows_menu_for_nsapp command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-as-windows-menu-for-nsapp", + "markdownDescription": "Denies the set_as_windows_menu_for_nsapp command without any pre-configured scope." + }, + { + "description": "Denies the set_checked command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-checked", + "markdownDescription": "Denies the set_checked command without any pre-configured scope." + }, + { + "description": "Denies the set_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-enabled", + "markdownDescription": "Denies the set_enabled command without any pre-configured scope." + }, + { + "description": "Denies the set_icon command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-icon", + "markdownDescription": "Denies the set_icon command without any pre-configured scope." + }, + { + "description": "Denies the set_text command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-set-text", + "markdownDescription": "Denies the set_text command without any pre-configured scope." + }, + { + "description": "Denies the text command without any pre-configured scope.", + "type": "string", + "const": "core:menu:deny-text", + "markdownDescription": "Denies the text command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-resolve-directory`\n- `allow-resolve`\n- `allow-normalize`\n- `allow-join`\n- `allow-dirname`\n- `allow-extname`\n- `allow-basename`\n- `allow-is-absolute`", + "type": "string", + "const": "core:path:default", + "markdownDescription": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-resolve-directory`\n- `allow-resolve`\n- `allow-normalize`\n- `allow-join`\n- `allow-dirname`\n- `allow-extname`\n- `allow-basename`\n- `allow-is-absolute`" + }, + { + "description": "Enables the basename command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-basename", + "markdownDescription": "Enables the basename command without any pre-configured scope." + }, + { + "description": "Enables the dirname command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-dirname", + "markdownDescription": "Enables the dirname command without any pre-configured scope." + }, + { + "description": "Enables the extname command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-extname", + "markdownDescription": "Enables the extname command without any pre-configured scope." + }, + { + "description": "Enables the is_absolute command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-is-absolute", + "markdownDescription": "Enables the is_absolute command without any pre-configured scope." + }, + { + "description": "Enables the join command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-join", + "markdownDescription": "Enables the join command without any pre-configured scope." + }, + { + "description": "Enables the normalize command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-normalize", + "markdownDescription": "Enables the normalize command without any pre-configured scope." + }, + { + "description": "Enables the resolve command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-resolve", + "markdownDescription": "Enables the resolve command without any pre-configured scope." + }, + { + "description": "Enables the resolve_directory command without any pre-configured scope.", + "type": "string", + "const": "core:path:allow-resolve-directory", + "markdownDescription": "Enables the resolve_directory command without any pre-configured scope." + }, + { + "description": "Denies the basename command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-basename", + "markdownDescription": "Denies the basename command without any pre-configured scope." + }, + { + "description": "Denies the dirname command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-dirname", + "markdownDescription": "Denies the dirname command without any pre-configured scope." + }, + { + "description": "Denies the extname command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-extname", + "markdownDescription": "Denies the extname command without any pre-configured scope." + }, + { + "description": "Denies the is_absolute command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-is-absolute", + "markdownDescription": "Denies the is_absolute command without any pre-configured scope." + }, + { + "description": "Denies the join command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-join", + "markdownDescription": "Denies the join command without any pre-configured scope." + }, + { + "description": "Denies the normalize command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-normalize", + "markdownDescription": "Denies the normalize command without any pre-configured scope." + }, + { + "description": "Denies the resolve command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-resolve", + "markdownDescription": "Denies the resolve command without any pre-configured scope." + }, + { + "description": "Denies the resolve_directory command without any pre-configured scope.", + "type": "string", + "const": "core:path:deny-resolve-directory", + "markdownDescription": "Denies the resolve_directory command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-close`", + "type": "string", + "const": "core:resources:default", + "markdownDescription": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-close`" + }, + { + "description": "Enables the close command without any pre-configured scope.", + "type": "string", + "const": "core:resources:allow-close", + "markdownDescription": "Enables the close command without any pre-configured scope." + }, + { + "description": "Denies the close command without any pre-configured scope.", + "type": "string", + "const": "core:resources:deny-close", + "markdownDescription": "Denies the close command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-new`\n- `allow-get-by-id`\n- `allow-remove-by-id`\n- `allow-set-icon`\n- `allow-set-menu`\n- `allow-set-tooltip`\n- `allow-set-title`\n- `allow-set-visible`\n- `allow-set-temp-dir-path`\n- `allow-set-icon-as-template`\n- `allow-set-show-menu-on-left-click`", + "type": "string", + "const": "core:tray:default", + "markdownDescription": "Default permissions for the plugin, which enables all commands.\n#### This default permission set includes:\n\n- `allow-new`\n- `allow-get-by-id`\n- `allow-remove-by-id`\n- `allow-set-icon`\n- `allow-set-menu`\n- `allow-set-tooltip`\n- `allow-set-title`\n- `allow-set-visible`\n- `allow-set-temp-dir-path`\n- `allow-set-icon-as-template`\n- `allow-set-show-menu-on-left-click`" + }, + { + "description": "Enables the get_by_id command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-get-by-id", + "markdownDescription": "Enables the get_by_id command without any pre-configured scope." + }, + { + "description": "Enables the new command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-new", + "markdownDescription": "Enables the new command without any pre-configured scope." + }, + { + "description": "Enables the remove_by_id command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-remove-by-id", + "markdownDescription": "Enables the remove_by_id command without any pre-configured scope." + }, + { + "description": "Enables the set_icon command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-icon", + "markdownDescription": "Enables the set_icon command without any pre-configured scope." + }, + { + "description": "Enables the set_icon_as_template command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-icon-as-template", + "markdownDescription": "Enables the set_icon_as_template command without any pre-configured scope." + }, + { + "description": "Enables the set_menu command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-menu", + "markdownDescription": "Enables the set_menu command without any pre-configured scope." + }, + { + "description": "Enables the set_show_menu_on_left_click command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-show-menu-on-left-click", + "markdownDescription": "Enables the set_show_menu_on_left_click command without any pre-configured scope." + }, + { + "description": "Enables the set_temp_dir_path command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-temp-dir-path", + "markdownDescription": "Enables the set_temp_dir_path command without any pre-configured scope." + }, + { + "description": "Enables the set_title command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-title", + "markdownDescription": "Enables the set_title command without any pre-configured scope." + }, + { + "description": "Enables the set_tooltip command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-tooltip", + "markdownDescription": "Enables the set_tooltip command without any pre-configured scope." + }, + { + "description": "Enables the set_visible command without any pre-configured scope.", + "type": "string", + "const": "core:tray:allow-set-visible", + "markdownDescription": "Enables the set_visible command without any pre-configured scope." + }, + { + "description": "Denies the get_by_id command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-get-by-id", + "markdownDescription": "Denies the get_by_id command without any pre-configured scope." + }, + { + "description": "Denies the new command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-new", + "markdownDescription": "Denies the new command without any pre-configured scope." + }, + { + "description": "Denies the remove_by_id command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-remove-by-id", + "markdownDescription": "Denies the remove_by_id command without any pre-configured scope." + }, + { + "description": "Denies the set_icon command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-icon", + "markdownDescription": "Denies the set_icon command without any pre-configured scope." + }, + { + "description": "Denies the set_icon_as_template command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-icon-as-template", + "markdownDescription": "Denies the set_icon_as_template command without any pre-configured scope." + }, + { + "description": "Denies the set_menu command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-menu", + "markdownDescription": "Denies the set_menu command without any pre-configured scope." + }, + { + "description": "Denies the set_show_menu_on_left_click command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-show-menu-on-left-click", + "markdownDescription": "Denies the set_show_menu_on_left_click command without any pre-configured scope." + }, + { + "description": "Denies the set_temp_dir_path command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-temp-dir-path", + "markdownDescription": "Denies the set_temp_dir_path command without any pre-configured scope." + }, + { + "description": "Denies the set_title command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-title", + "markdownDescription": "Denies the set_title command without any pre-configured scope." + }, + { + "description": "Denies the set_tooltip command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-tooltip", + "markdownDescription": "Denies the set_tooltip command without any pre-configured scope." + }, + { + "description": "Denies the set_visible command without any pre-configured scope.", + "type": "string", + "const": "core:tray:deny-set-visible", + "markdownDescription": "Denies the set_visible command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin.\n#### This default permission set includes:\n\n- `allow-get-all-webviews`\n- `allow-webview-position`\n- `allow-webview-size`\n- `allow-internal-toggle-devtools`", + "type": "string", + "const": "core:webview:default", + "markdownDescription": "Default permissions for the plugin.\n#### This default permission set includes:\n\n- `allow-get-all-webviews`\n- `allow-webview-position`\n- `allow-webview-size`\n- `allow-internal-toggle-devtools`" + }, + { + "description": "Enables the clear_all_browsing_data command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-clear-all-browsing-data", + "markdownDescription": "Enables the clear_all_browsing_data command without any pre-configured scope." + }, + { + "description": "Enables the create_webview command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-create-webview", + "markdownDescription": "Enables the create_webview command without any pre-configured scope." + }, + { + "description": "Enables the create_webview_window command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-create-webview-window", + "markdownDescription": "Enables the create_webview_window command without any pre-configured scope." + }, + { + "description": "Enables the get_all_webviews command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-get-all-webviews", + "markdownDescription": "Enables the get_all_webviews command without any pre-configured scope." + }, + { + "description": "Enables the internal_toggle_devtools command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-internal-toggle-devtools", + "markdownDescription": "Enables the internal_toggle_devtools command without any pre-configured scope." + }, + { + "description": "Enables the print command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-print", + "markdownDescription": "Enables the print command without any pre-configured scope." + }, + { + "description": "Enables the reparent command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-reparent", + "markdownDescription": "Enables the reparent command without any pre-configured scope." + }, + { + "description": "Enables the set_webview_auto_resize command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-set-webview-auto-resize", + "markdownDescription": "Enables the set_webview_auto_resize command without any pre-configured scope." + }, + { + "description": "Enables the set_webview_background_color command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-set-webview-background-color", + "markdownDescription": "Enables the set_webview_background_color command without any pre-configured scope." + }, + { + "description": "Enables the set_webview_focus command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-set-webview-focus", + "markdownDescription": "Enables the set_webview_focus command without any pre-configured scope." + }, + { + "description": "Enables the set_webview_position command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-set-webview-position", + "markdownDescription": "Enables the set_webview_position command without any pre-configured scope." + }, + { + "description": "Enables the set_webview_size command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-set-webview-size", + "markdownDescription": "Enables the set_webview_size command without any pre-configured scope." + }, + { + "description": "Enables the set_webview_zoom command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-set-webview-zoom", + "markdownDescription": "Enables the set_webview_zoom command without any pre-configured scope." + }, + { + "description": "Enables the webview_close command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-webview-close", + "markdownDescription": "Enables the webview_close command without any pre-configured scope." + }, + { + "description": "Enables the webview_hide command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-webview-hide", + "markdownDescription": "Enables the webview_hide command without any pre-configured scope." + }, + { + "description": "Enables the webview_position command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-webview-position", + "markdownDescription": "Enables the webview_position command without any pre-configured scope." + }, + { + "description": "Enables the webview_show command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-webview-show", + "markdownDescription": "Enables the webview_show command without any pre-configured scope." + }, + { + "description": "Enables the webview_size command without any pre-configured scope.", + "type": "string", + "const": "core:webview:allow-webview-size", + "markdownDescription": "Enables the webview_size command without any pre-configured scope." + }, + { + "description": "Denies the clear_all_browsing_data command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-clear-all-browsing-data", + "markdownDescription": "Denies the clear_all_browsing_data command without any pre-configured scope." + }, + { + "description": "Denies the create_webview command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-create-webview", + "markdownDescription": "Denies the create_webview command without any pre-configured scope." + }, + { + "description": "Denies the create_webview_window command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-create-webview-window", + "markdownDescription": "Denies the create_webview_window command without any pre-configured scope." + }, + { + "description": "Denies the get_all_webviews command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-get-all-webviews", + "markdownDescription": "Denies the get_all_webviews command without any pre-configured scope." + }, + { + "description": "Denies the internal_toggle_devtools command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-internal-toggle-devtools", + "markdownDescription": "Denies the internal_toggle_devtools command without any pre-configured scope." + }, + { + "description": "Denies the print command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-print", + "markdownDescription": "Denies the print command without any pre-configured scope." + }, + { + "description": "Denies the reparent command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-reparent", + "markdownDescription": "Denies the reparent command without any pre-configured scope." + }, + { + "description": "Denies the set_webview_auto_resize command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-set-webview-auto-resize", + "markdownDescription": "Denies the set_webview_auto_resize command without any pre-configured scope." + }, + { + "description": "Denies the set_webview_background_color command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-set-webview-background-color", + "markdownDescription": "Denies the set_webview_background_color command without any pre-configured scope." + }, + { + "description": "Denies the set_webview_focus command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-set-webview-focus", + "markdownDescription": "Denies the set_webview_focus command without any pre-configured scope." + }, + { + "description": "Denies the set_webview_position command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-set-webview-position", + "markdownDescription": "Denies the set_webview_position command without any pre-configured scope." + }, + { + "description": "Denies the set_webview_size command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-set-webview-size", + "markdownDescription": "Denies the set_webview_size command without any pre-configured scope." + }, + { + "description": "Denies the set_webview_zoom command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-set-webview-zoom", + "markdownDescription": "Denies the set_webview_zoom command without any pre-configured scope." + }, + { + "description": "Denies the webview_close command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-webview-close", + "markdownDescription": "Denies the webview_close command without any pre-configured scope." + }, + { + "description": "Denies the webview_hide command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-webview-hide", + "markdownDescription": "Denies the webview_hide command without any pre-configured scope." + }, + { + "description": "Denies the webview_position command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-webview-position", + "markdownDescription": "Denies the webview_position command without any pre-configured scope." + }, + { + "description": "Denies the webview_show command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-webview-show", + "markdownDescription": "Denies the webview_show command without any pre-configured scope." + }, + { + "description": "Denies the webview_size command without any pre-configured scope.", + "type": "string", + "const": "core:webview:deny-webview-size", + "markdownDescription": "Denies the webview_size command without any pre-configured scope." + }, + { + "description": "Default permissions for the plugin.\n#### This default permission set includes:\n\n- `allow-get-all-windows`\n- `allow-scale-factor`\n- `allow-inner-position`\n- `allow-outer-position`\n- `allow-inner-size`\n- `allow-outer-size`\n- `allow-is-fullscreen`\n- `allow-is-minimized`\n- `allow-is-maximized`\n- `allow-is-focused`\n- `allow-is-decorated`\n- `allow-is-resizable`\n- `allow-is-maximizable`\n- `allow-is-minimizable`\n- `allow-is-closable`\n- `allow-is-visible`\n- `allow-is-enabled`\n- `allow-title`\n- `allow-current-monitor`\n- `allow-primary-monitor`\n- `allow-monitor-from-point`\n- `allow-available-monitors`\n- `allow-cursor-position`\n- `allow-theme`\n- `allow-is-always-on-top`\n- `allow-internal-toggle-maximize`", + "type": "string", + "const": "core:window:default", + "markdownDescription": "Default permissions for the plugin.\n#### This default permission set includes:\n\n- `allow-get-all-windows`\n- `allow-scale-factor`\n- `allow-inner-position`\n- `allow-outer-position`\n- `allow-inner-size`\n- `allow-outer-size`\n- `allow-is-fullscreen`\n- `allow-is-minimized`\n- `allow-is-maximized`\n- `allow-is-focused`\n- `allow-is-decorated`\n- `allow-is-resizable`\n- `allow-is-maximizable`\n- `allow-is-minimizable`\n- `allow-is-closable`\n- `allow-is-visible`\n- `allow-is-enabled`\n- `allow-title`\n- `allow-current-monitor`\n- `allow-primary-monitor`\n- `allow-monitor-from-point`\n- `allow-available-monitors`\n- `allow-cursor-position`\n- `allow-theme`\n- `allow-is-always-on-top`\n- `allow-internal-toggle-maximize`" + }, + { + "description": "Enables the available_monitors command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-available-monitors", + "markdownDescription": "Enables the available_monitors command without any pre-configured scope." + }, + { + "description": "Enables the center command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-center", + "markdownDescription": "Enables the center command without any pre-configured scope." + }, + { + "description": "Enables the close command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-close", + "markdownDescription": "Enables the close command without any pre-configured scope." + }, + { + "description": "Enables the create command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-create", + "markdownDescription": "Enables the create command without any pre-configured scope." + }, + { + "description": "Enables the current_monitor command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-current-monitor", + "markdownDescription": "Enables the current_monitor command without any pre-configured scope." + }, + { + "description": "Enables the cursor_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-cursor-position", + "markdownDescription": "Enables the cursor_position command without any pre-configured scope." + }, + { + "description": "Enables the destroy command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-destroy", + "markdownDescription": "Enables the destroy command without any pre-configured scope." + }, + { + "description": "Enables the get_all_windows command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-get-all-windows", + "markdownDescription": "Enables the get_all_windows command without any pre-configured scope." + }, + { + "description": "Enables the hide command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-hide", + "markdownDescription": "Enables the hide command without any pre-configured scope." + }, + { + "description": "Enables the inner_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-inner-position", + "markdownDescription": "Enables the inner_position command without any pre-configured scope." + }, + { + "description": "Enables the inner_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-inner-size", + "markdownDescription": "Enables the inner_size command without any pre-configured scope." + }, + { + "description": "Enables the internal_toggle_maximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-internal-toggle-maximize", + "markdownDescription": "Enables the internal_toggle_maximize command without any pre-configured scope." + }, + { + "description": "Enables the is_always_on_top command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-always-on-top", + "markdownDescription": "Enables the is_always_on_top command without any pre-configured scope." + }, + { + "description": "Enables the is_closable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-closable", + "markdownDescription": "Enables the is_closable command without any pre-configured scope." + }, + { + "description": "Enables the is_decorated command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-decorated", + "markdownDescription": "Enables the is_decorated command without any pre-configured scope." + }, + { + "description": "Enables the is_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-enabled", + "markdownDescription": "Enables the is_enabled command without any pre-configured scope." + }, + { + "description": "Enables the is_focused command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-focused", + "markdownDescription": "Enables the is_focused command without any pre-configured scope." + }, + { + "description": "Enables the is_fullscreen command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-fullscreen", + "markdownDescription": "Enables the is_fullscreen command without any pre-configured scope." + }, + { + "description": "Enables the is_maximizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-maximizable", + "markdownDescription": "Enables the is_maximizable command without any pre-configured scope." + }, + { + "description": "Enables the is_maximized command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-maximized", + "markdownDescription": "Enables the is_maximized command without any pre-configured scope." + }, + { + "description": "Enables the is_minimizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-minimizable", + "markdownDescription": "Enables the is_minimizable command without any pre-configured scope." + }, + { + "description": "Enables the is_minimized command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-minimized", + "markdownDescription": "Enables the is_minimized command without any pre-configured scope." + }, + { + "description": "Enables the is_resizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-resizable", + "markdownDescription": "Enables the is_resizable command without any pre-configured scope." + }, + { + "description": "Enables the is_visible command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-is-visible", + "markdownDescription": "Enables the is_visible command without any pre-configured scope." + }, + { + "description": "Enables the maximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-maximize", + "markdownDescription": "Enables the maximize command without any pre-configured scope." + }, + { + "description": "Enables the minimize command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-minimize", + "markdownDescription": "Enables the minimize command without any pre-configured scope." + }, + { + "description": "Enables the monitor_from_point command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-monitor-from-point", + "markdownDescription": "Enables the monitor_from_point command without any pre-configured scope." + }, + { + "description": "Enables the outer_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-outer-position", + "markdownDescription": "Enables the outer_position command without any pre-configured scope." + }, + { + "description": "Enables the outer_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-outer-size", + "markdownDescription": "Enables the outer_size command without any pre-configured scope." + }, + { + "description": "Enables the primary_monitor command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-primary-monitor", + "markdownDescription": "Enables the primary_monitor command without any pre-configured scope." + }, + { + "description": "Enables the request_user_attention command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-request-user-attention", + "markdownDescription": "Enables the request_user_attention command without any pre-configured scope." + }, + { + "description": "Enables the scale_factor command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-scale-factor", + "markdownDescription": "Enables the scale_factor command without any pre-configured scope." + }, + { + "description": "Enables the set_always_on_bottom command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-always-on-bottom", + "markdownDescription": "Enables the set_always_on_bottom command without any pre-configured scope." + }, + { + "description": "Enables the set_always_on_top command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-always-on-top", + "markdownDescription": "Enables the set_always_on_top command without any pre-configured scope." + }, + { + "description": "Enables the set_background_color command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-background-color", + "markdownDescription": "Enables the set_background_color command without any pre-configured scope." + }, + { + "description": "Enables the set_badge_count command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-badge-count", + "markdownDescription": "Enables the set_badge_count command without any pre-configured scope." + }, + { + "description": "Enables the set_badge_label command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-badge-label", + "markdownDescription": "Enables the set_badge_label command without any pre-configured scope." + }, + { + "description": "Enables the set_closable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-closable", + "markdownDescription": "Enables the set_closable command without any pre-configured scope." + }, + { + "description": "Enables the set_content_protected command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-content-protected", + "markdownDescription": "Enables the set_content_protected command without any pre-configured scope." + }, + { + "description": "Enables the set_cursor_grab command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-cursor-grab", + "markdownDescription": "Enables the set_cursor_grab command without any pre-configured scope." + }, + { + "description": "Enables the set_cursor_icon command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-cursor-icon", + "markdownDescription": "Enables the set_cursor_icon command without any pre-configured scope." + }, + { + "description": "Enables the set_cursor_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-cursor-position", + "markdownDescription": "Enables the set_cursor_position command without any pre-configured scope." + }, + { + "description": "Enables the set_cursor_visible command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-cursor-visible", + "markdownDescription": "Enables the set_cursor_visible command without any pre-configured scope." + }, + { + "description": "Enables the set_decorations command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-decorations", + "markdownDescription": "Enables the set_decorations command without any pre-configured scope." + }, + { + "description": "Enables the set_effects command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-effects", + "markdownDescription": "Enables the set_effects command without any pre-configured scope." + }, + { + "description": "Enables the set_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-enabled", + "markdownDescription": "Enables the set_enabled command without any pre-configured scope." + }, + { + "description": "Enables the set_focus command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-focus", + "markdownDescription": "Enables the set_focus command without any pre-configured scope." + }, + { + "description": "Enables the set_focusable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-focusable", + "markdownDescription": "Enables the set_focusable command without any pre-configured scope." + }, + { + "description": "Enables the set_fullscreen command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-fullscreen", + "markdownDescription": "Enables the set_fullscreen command without any pre-configured scope." + }, + { + "description": "Enables the set_icon command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-icon", + "markdownDescription": "Enables the set_icon command without any pre-configured scope." + }, + { + "description": "Enables the set_ignore_cursor_events command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-ignore-cursor-events", + "markdownDescription": "Enables the set_ignore_cursor_events command without any pre-configured scope." + }, + { + "description": "Enables the set_max_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-max-size", + "markdownDescription": "Enables the set_max_size command without any pre-configured scope." + }, + { + "description": "Enables the set_maximizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-maximizable", + "markdownDescription": "Enables the set_maximizable command without any pre-configured scope." + }, + { + "description": "Enables the set_min_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-min-size", + "markdownDescription": "Enables the set_min_size command without any pre-configured scope." + }, + { + "description": "Enables the set_minimizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-minimizable", + "markdownDescription": "Enables the set_minimizable command without any pre-configured scope." + }, + { + "description": "Enables the set_overlay_icon command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-overlay-icon", + "markdownDescription": "Enables the set_overlay_icon command without any pre-configured scope." + }, + { + "description": "Enables the set_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-position", + "markdownDescription": "Enables the set_position command without any pre-configured scope." + }, + { + "description": "Enables the set_progress_bar command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-progress-bar", + "markdownDescription": "Enables the set_progress_bar command without any pre-configured scope." + }, + { + "description": "Enables the set_resizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-resizable", + "markdownDescription": "Enables the set_resizable command without any pre-configured scope." + }, + { + "description": "Enables the set_shadow command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-shadow", + "markdownDescription": "Enables the set_shadow command without any pre-configured scope." + }, + { + "description": "Enables the set_simple_fullscreen command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-simple-fullscreen", + "markdownDescription": "Enables the set_simple_fullscreen command without any pre-configured scope." + }, + { + "description": "Enables the set_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-size", + "markdownDescription": "Enables the set_size command without any pre-configured scope." + }, + { + "description": "Enables the set_size_constraints command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-size-constraints", + "markdownDescription": "Enables the set_size_constraints command without any pre-configured scope." + }, + { + "description": "Enables the set_skip_taskbar command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-skip-taskbar", + "markdownDescription": "Enables the set_skip_taskbar command without any pre-configured scope." + }, + { + "description": "Enables the set_theme command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-theme", + "markdownDescription": "Enables the set_theme command without any pre-configured scope." + }, + { + "description": "Enables the set_title command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-title", + "markdownDescription": "Enables the set_title command without any pre-configured scope." + }, + { + "description": "Enables the set_title_bar_style command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-title-bar-style", + "markdownDescription": "Enables the set_title_bar_style command without any pre-configured scope." + }, + { + "description": "Enables the set_visible_on_all_workspaces command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-set-visible-on-all-workspaces", + "markdownDescription": "Enables the set_visible_on_all_workspaces command without any pre-configured scope." + }, + { + "description": "Enables the show command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-show", + "markdownDescription": "Enables the show command without any pre-configured scope." + }, + { + "description": "Enables the start_dragging command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-start-dragging", + "markdownDescription": "Enables the start_dragging command without any pre-configured scope." + }, + { + "description": "Enables the start_resize_dragging command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-start-resize-dragging", + "markdownDescription": "Enables the start_resize_dragging command without any pre-configured scope." + }, + { + "description": "Enables the theme command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-theme", + "markdownDescription": "Enables the theme command without any pre-configured scope." + }, + { + "description": "Enables the title command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-title", + "markdownDescription": "Enables the title command without any pre-configured scope." + }, + { + "description": "Enables the toggle_maximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-toggle-maximize", + "markdownDescription": "Enables the toggle_maximize command without any pre-configured scope." + }, + { + "description": "Enables the unmaximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-unmaximize", + "markdownDescription": "Enables the unmaximize command without any pre-configured scope." + }, + { + "description": "Enables the unminimize command without any pre-configured scope.", + "type": "string", + "const": "core:window:allow-unminimize", + "markdownDescription": "Enables the unminimize command without any pre-configured scope." + }, + { + "description": "Denies the available_monitors command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-available-monitors", + "markdownDescription": "Denies the available_monitors command without any pre-configured scope." + }, + { + "description": "Denies the center command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-center", + "markdownDescription": "Denies the center command without any pre-configured scope." + }, + { + "description": "Denies the close command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-close", + "markdownDescription": "Denies the close command without any pre-configured scope." + }, + { + "description": "Denies the create command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-create", + "markdownDescription": "Denies the create command without any pre-configured scope." + }, + { + "description": "Denies the current_monitor command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-current-monitor", + "markdownDescription": "Denies the current_monitor command without any pre-configured scope." + }, + { + "description": "Denies the cursor_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-cursor-position", + "markdownDescription": "Denies the cursor_position command without any pre-configured scope." + }, + { + "description": "Denies the destroy command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-destroy", + "markdownDescription": "Denies the destroy command without any pre-configured scope." + }, + { + "description": "Denies the get_all_windows command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-get-all-windows", + "markdownDescription": "Denies the get_all_windows command without any pre-configured scope." + }, + { + "description": "Denies the hide command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-hide", + "markdownDescription": "Denies the hide command without any pre-configured scope." + }, + { + "description": "Denies the inner_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-inner-position", + "markdownDescription": "Denies the inner_position command without any pre-configured scope." + }, + { + "description": "Denies the inner_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-inner-size", + "markdownDescription": "Denies the inner_size command without any pre-configured scope." + }, + { + "description": "Denies the internal_toggle_maximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-internal-toggle-maximize", + "markdownDescription": "Denies the internal_toggle_maximize command without any pre-configured scope." + }, + { + "description": "Denies the is_always_on_top command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-always-on-top", + "markdownDescription": "Denies the is_always_on_top command without any pre-configured scope." + }, + { + "description": "Denies the is_closable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-closable", + "markdownDescription": "Denies the is_closable command without any pre-configured scope." + }, + { + "description": "Denies the is_decorated command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-decorated", + "markdownDescription": "Denies the is_decorated command without any pre-configured scope." + }, + { + "description": "Denies the is_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-enabled", + "markdownDescription": "Denies the is_enabled command without any pre-configured scope." + }, + { + "description": "Denies the is_focused command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-focused", + "markdownDescription": "Denies the is_focused command without any pre-configured scope." + }, + { + "description": "Denies the is_fullscreen command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-fullscreen", + "markdownDescription": "Denies the is_fullscreen command without any pre-configured scope." + }, + { + "description": "Denies the is_maximizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-maximizable", + "markdownDescription": "Denies the is_maximizable command without any pre-configured scope." + }, + { + "description": "Denies the is_maximized command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-maximized", + "markdownDescription": "Denies the is_maximized command without any pre-configured scope." + }, + { + "description": "Denies the is_minimizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-minimizable", + "markdownDescription": "Denies the is_minimizable command without any pre-configured scope." + }, + { + "description": "Denies the is_minimized command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-minimized", + "markdownDescription": "Denies the is_minimized command without any pre-configured scope." + }, + { + "description": "Denies the is_resizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-resizable", + "markdownDescription": "Denies the is_resizable command without any pre-configured scope." + }, + { + "description": "Denies the is_visible command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-is-visible", + "markdownDescription": "Denies the is_visible command without any pre-configured scope." + }, + { + "description": "Denies the maximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-maximize", + "markdownDescription": "Denies the maximize command without any pre-configured scope." + }, + { + "description": "Denies the minimize command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-minimize", + "markdownDescription": "Denies the minimize command without any pre-configured scope." + }, + { + "description": "Denies the monitor_from_point command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-monitor-from-point", + "markdownDescription": "Denies the monitor_from_point command without any pre-configured scope." + }, + { + "description": "Denies the outer_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-outer-position", + "markdownDescription": "Denies the outer_position command without any pre-configured scope." + }, + { + "description": "Denies the outer_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-outer-size", + "markdownDescription": "Denies the outer_size command without any pre-configured scope." + }, + { + "description": "Denies the primary_monitor command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-primary-monitor", + "markdownDescription": "Denies the primary_monitor command without any pre-configured scope." + }, + { + "description": "Denies the request_user_attention command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-request-user-attention", + "markdownDescription": "Denies the request_user_attention command without any pre-configured scope." + }, + { + "description": "Denies the scale_factor command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-scale-factor", + "markdownDescription": "Denies the scale_factor command without any pre-configured scope." + }, + { + "description": "Denies the set_always_on_bottom command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-always-on-bottom", + "markdownDescription": "Denies the set_always_on_bottom command without any pre-configured scope." + }, + { + "description": "Denies the set_always_on_top command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-always-on-top", + "markdownDescription": "Denies the set_always_on_top command without any pre-configured scope." + }, + { + "description": "Denies the set_background_color command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-background-color", + "markdownDescription": "Denies the set_background_color command without any pre-configured scope." + }, + { + "description": "Denies the set_badge_count command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-badge-count", + "markdownDescription": "Denies the set_badge_count command without any pre-configured scope." + }, + { + "description": "Denies the set_badge_label command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-badge-label", + "markdownDescription": "Denies the set_badge_label command without any pre-configured scope." + }, + { + "description": "Denies the set_closable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-closable", + "markdownDescription": "Denies the set_closable command without any pre-configured scope." + }, + { + "description": "Denies the set_content_protected command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-content-protected", + "markdownDescription": "Denies the set_content_protected command without any pre-configured scope." + }, + { + "description": "Denies the set_cursor_grab command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-cursor-grab", + "markdownDescription": "Denies the set_cursor_grab command without any pre-configured scope." + }, + { + "description": "Denies the set_cursor_icon command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-cursor-icon", + "markdownDescription": "Denies the set_cursor_icon command without any pre-configured scope." + }, + { + "description": "Denies the set_cursor_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-cursor-position", + "markdownDescription": "Denies the set_cursor_position command without any pre-configured scope." + }, + { + "description": "Denies the set_cursor_visible command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-cursor-visible", + "markdownDescription": "Denies the set_cursor_visible command without any pre-configured scope." + }, + { + "description": "Denies the set_decorations command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-decorations", + "markdownDescription": "Denies the set_decorations command without any pre-configured scope." + }, + { + "description": "Denies the set_effects command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-effects", + "markdownDescription": "Denies the set_effects command without any pre-configured scope." + }, + { + "description": "Denies the set_enabled command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-enabled", + "markdownDescription": "Denies the set_enabled command without any pre-configured scope." + }, + { + "description": "Denies the set_focus command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-focus", + "markdownDescription": "Denies the set_focus command without any pre-configured scope." + }, + { + "description": "Denies the set_focusable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-focusable", + "markdownDescription": "Denies the set_focusable command without any pre-configured scope." + }, + { + "description": "Denies the set_fullscreen command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-fullscreen", + "markdownDescription": "Denies the set_fullscreen command without any pre-configured scope." + }, + { + "description": "Denies the set_icon command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-icon", + "markdownDescription": "Denies the set_icon command without any pre-configured scope." + }, + { + "description": "Denies the set_ignore_cursor_events command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-ignore-cursor-events", + "markdownDescription": "Denies the set_ignore_cursor_events command without any pre-configured scope." + }, + { + "description": "Denies the set_max_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-max-size", + "markdownDescription": "Denies the set_max_size command without any pre-configured scope." + }, + { + "description": "Denies the set_maximizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-maximizable", + "markdownDescription": "Denies the set_maximizable command without any pre-configured scope." + }, + { + "description": "Denies the set_min_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-min-size", + "markdownDescription": "Denies the set_min_size command without any pre-configured scope." + }, + { + "description": "Denies the set_minimizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-minimizable", + "markdownDescription": "Denies the set_minimizable command without any pre-configured scope." + }, + { + "description": "Denies the set_overlay_icon command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-overlay-icon", + "markdownDescription": "Denies the set_overlay_icon command without any pre-configured scope." + }, + { + "description": "Denies the set_position command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-position", + "markdownDescription": "Denies the set_position command without any pre-configured scope." + }, + { + "description": "Denies the set_progress_bar command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-progress-bar", + "markdownDescription": "Denies the set_progress_bar command without any pre-configured scope." + }, + { + "description": "Denies the set_resizable command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-resizable", + "markdownDescription": "Denies the set_resizable command without any pre-configured scope." + }, + { + "description": "Denies the set_shadow command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-shadow", + "markdownDescription": "Denies the set_shadow command without any pre-configured scope." + }, + { + "description": "Denies the set_simple_fullscreen command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-simple-fullscreen", + "markdownDescription": "Denies the set_simple_fullscreen command without any pre-configured scope." + }, + { + "description": "Denies the set_size command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-size", + "markdownDescription": "Denies the set_size command without any pre-configured scope." + }, + { + "description": "Denies the set_size_constraints command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-size-constraints", + "markdownDescription": "Denies the set_size_constraints command without any pre-configured scope." + }, + { + "description": "Denies the set_skip_taskbar command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-skip-taskbar", + "markdownDescription": "Denies the set_skip_taskbar command without any pre-configured scope." + }, + { + "description": "Denies the set_theme command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-theme", + "markdownDescription": "Denies the set_theme command without any pre-configured scope." + }, + { + "description": "Denies the set_title command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-title", + "markdownDescription": "Denies the set_title command without any pre-configured scope." + }, + { + "description": "Denies the set_title_bar_style command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-title-bar-style", + "markdownDescription": "Denies the set_title_bar_style command without any pre-configured scope." + }, + { + "description": "Denies the set_visible_on_all_workspaces command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-set-visible-on-all-workspaces", + "markdownDescription": "Denies the set_visible_on_all_workspaces command without any pre-configured scope." + }, + { + "description": "Denies the show command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-show", + "markdownDescription": "Denies the show command without any pre-configured scope." + }, + { + "description": "Denies the start_dragging command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-start-dragging", + "markdownDescription": "Denies the start_dragging command without any pre-configured scope." + }, + { + "description": "Denies the start_resize_dragging command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-start-resize-dragging", + "markdownDescription": "Denies the start_resize_dragging command without any pre-configured scope." + }, + { + "description": "Denies the theme command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-theme", + "markdownDescription": "Denies the theme command without any pre-configured scope." + }, + { + "description": "Denies the title command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-title", + "markdownDescription": "Denies the title command without any pre-configured scope." + }, + { + "description": "Denies the toggle_maximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-toggle-maximize", + "markdownDescription": "Denies the toggle_maximize command without any pre-configured scope." + }, + { + "description": "Denies the unmaximize command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-unmaximize", + "markdownDescription": "Denies the unmaximize command without any pre-configured scope." + }, + { + "description": "Denies the unminimize command without any pre-configured scope.", + "type": "string", + "const": "core:window:deny-unminimize", + "markdownDescription": "Denies the unminimize command without any pre-configured scope." + }, + { + "description": "This permission set configures which\nshell functionality is exposed by default.\n\n#### Granted Permissions\n\nIt allows to use the `open` functionality with a reasonable\nscope pre-configured. It will allow opening `http(s)://`,\n`tel:` and `mailto:` links.\n\n#### This default permission set includes:\n\n- `allow-open`", + "type": "string", + "const": "shell:default", + "markdownDescription": "This permission set configures which\nshell functionality is exposed by default.\n\n#### Granted Permissions\n\nIt allows to use the `open` functionality with a reasonable\nscope pre-configured. It will allow opening `http(s)://`,\n`tel:` and `mailto:` links.\n\n#### This default permission set includes:\n\n- `allow-open`" + }, + { + "description": "Enables the execute command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-execute", + "markdownDescription": "Enables the execute command without any pre-configured scope." + }, + { + "description": "Enables the kill command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-kill", + "markdownDescription": "Enables the kill command without any pre-configured scope." + }, + { + "description": "Enables the open command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-open", + "markdownDescription": "Enables the open command without any pre-configured scope." + }, + { + "description": "Enables the spawn command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-spawn", + "markdownDescription": "Enables the spawn command without any pre-configured scope." + }, + { + "description": "Enables the stdin_write command without any pre-configured scope.", + "type": "string", + "const": "shell:allow-stdin-write", + "markdownDescription": "Enables the stdin_write command without any pre-configured scope." + }, + { + "description": "Denies the execute command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-execute", + "markdownDescription": "Denies the execute command without any pre-configured scope." + }, + { + "description": "Denies the kill command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-kill", + "markdownDescription": "Denies the kill command without any pre-configured scope." + }, + { + "description": "Denies the open command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-open", + "markdownDescription": "Denies the open command without any pre-configured scope." + }, + { + "description": "Denies the spawn command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-spawn", + "markdownDescription": "Denies the spawn command without any pre-configured scope." + }, + { + "description": "Denies the stdin_write command without any pre-configured scope.", + "type": "string", + "const": "shell:deny-stdin-write", + "markdownDescription": "Denies the stdin_write command without any pre-configured scope." + } + ] + }, + "Value": { + "description": "All supported ACL values.", + "anyOf": [ + { + "description": "Represents a null JSON value.", + "type": "null" + }, + { + "description": "Represents a [`bool`].", + "type": "boolean" + }, + { + "description": "Represents a valid ACL [`Number`].", + "allOf": [ + { + "$ref": "#/definitions/Number" + } + ] + }, + { + "description": "Represents a [`String`].", + "type": "string" + }, + { + "description": "Represents a list of other [`Value`]s.", + "type": "array", + "items": { + "$ref": "#/definitions/Value" + } + }, + { + "description": "Represents a map of [`String`] keys to [`Value`]s.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Value" + } + } + ] + }, + "Number": { + "description": "A valid ACL number.", + "anyOf": [ + { + "description": "Represents an [`i64`].", + "type": "integer", + "format": "int64" + }, + { + "description": "Represents a [`f64`].", + "type": "number", + "format": "double" + } + ] + }, + "Target": { + "description": "Platform target.", + "oneOf": [ + { + "description": "MacOS.", + "type": "string", + "enum": [ + "macOS" + ] + }, + { + "description": "Windows.", + "type": "string", + "enum": [ + "windows" + ] + }, + { + "description": "Linux.", + "type": "string", + "enum": [ + "linux" + ] + }, + { + "description": "Android.", + "type": "string", + "enum": [ + "android" + ] + }, + { + "description": "iOS.", + "type": "string", + "enum": [ + "iOS" + ] + } + ] + }, + "ShellScopeEntryAllowedArg": { + "description": "A command argument allowed to be executed by the webview API.", + "anyOf": [ + { + "description": "A non-configurable argument that is passed to the command in the order it was specified.", + "type": "string" + }, + { + "description": "A variable that is set while calling the command from the webview API.", + "type": "object", + "required": [ + "validator" + ], + "properties": { + "raw": { + "description": "Marks the validator as a raw regex, meaning the plugin should not make any modification at runtime.\n\nThis means the regex will not match on the entire string by default, which might be exploited if your regex allow unexpected input to be considered valid. When using this option, make sure your regex is correct.", + "default": false, + "type": "boolean" + }, + "validator": { + "description": "[regex] validator to require passed values to conform to an expected input.\n\nThis will require the argument value passed to this variable to match the `validator` regex before it will be executed.\n\nThe regex string is by default surrounded by `^...$` to match the full string. For example the `https?://\\w+` regex would be registered as `^https?://\\w+$`.\n\n[regex]: ", + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "ShellScopeEntryAllowedArgs": { + "description": "A set of command arguments allowed to be executed by the webview API.\n\nA value of `true` will allow any arguments to be passed to the command. `false` will disable all arguments. A list of [`ShellScopeEntryAllowedArg`] will set those arguments as the only valid arguments to be passed to the attached command configuration.", + "anyOf": [ + { + "description": "Use a simple boolean to allow all or disable all arguments to this command configuration.", + "type": "boolean" + }, + { + "description": "A specific set of [`ShellScopeEntryAllowedArg`] that are valid to call for the command configuration.", + "type": "array", + "items": { + "$ref": "#/definitions/ShellScopeEntryAllowedArg" + } + } + ] + } + } +} \ No newline at end of file diff --git a/frontend/src-tauri/icons/128x128.png b/frontend/src-tauri/icons/128x128.png new file mode 100644 index 000000000..85cb2d58a Binary files /dev/null and b/frontend/src-tauri/icons/128x128.png differ diff --git a/frontend/src-tauri/icons/128x128@2x.png b/frontend/src-tauri/icons/128x128@2x.png new file mode 100644 index 000000000..d6e26a006 Binary files /dev/null and b/frontend/src-tauri/icons/128x128@2x.png differ diff --git a/frontend/src-tauri/icons/32x32.png b/frontend/src-tauri/icons/32x32.png new file mode 100644 index 000000000..e997d0668 Binary files /dev/null and b/frontend/src-tauri/icons/32x32.png differ diff --git a/frontend/src-tauri/icons/64x64.png b/frontend/src-tauri/icons/64x64.png new file mode 100644 index 000000000..9a26d1971 Binary files /dev/null and b/frontend/src-tauri/icons/64x64.png differ diff --git a/frontend/src-tauri/icons/GENERATE_ICONS.md b/frontend/src-tauri/icons/GENERATE_ICONS.md new file mode 100644 index 000000000..658273c1d --- /dev/null +++ b/frontend/src-tauri/icons/GENERATE_ICONS.md @@ -0,0 +1,11 @@ +# PlotPilot 图标 + +请将应用图标文件放置在此目录: +- 32x32.png +- 128x128.png +- 128x128@2x.png (256x256) +- icon.icns (macOS) +- icon.ico (Windows) + +可以使用 tauri icon 命令从源图标自动生成所有尺寸: +npx tauri icon diff --git a/frontend/src-tauri/icons/Square107x107Logo.png b/frontend/src-tauri/icons/Square107x107Logo.png new file mode 100644 index 000000000..900812011 Binary files /dev/null and b/frontend/src-tauri/icons/Square107x107Logo.png differ diff --git a/frontend/src-tauri/icons/Square142x142Logo.png b/frontend/src-tauri/icons/Square142x142Logo.png new file mode 100644 index 000000000..301abb787 Binary files /dev/null and b/frontend/src-tauri/icons/Square142x142Logo.png differ diff --git a/frontend/src-tauri/icons/Square150x150Logo.png b/frontend/src-tauri/icons/Square150x150Logo.png new file mode 100644 index 000000000..dc1692d23 Binary files /dev/null and b/frontend/src-tauri/icons/Square150x150Logo.png differ diff --git a/frontend/src-tauri/icons/Square284x284Logo.png b/frontend/src-tauri/icons/Square284x284Logo.png new file mode 100644 index 000000000..a0e0bc29f Binary files /dev/null and b/frontend/src-tauri/icons/Square284x284Logo.png differ diff --git a/frontend/src-tauri/icons/Square30x30Logo.png b/frontend/src-tauri/icons/Square30x30Logo.png new file mode 100644 index 000000000..4a4be5bd6 Binary files /dev/null and b/frontend/src-tauri/icons/Square30x30Logo.png differ diff --git a/frontend/src-tauri/icons/Square310x310Logo.png b/frontend/src-tauri/icons/Square310x310Logo.png new file mode 100644 index 000000000..e95ff2d8e Binary files /dev/null and b/frontend/src-tauri/icons/Square310x310Logo.png differ diff --git a/frontend/src-tauri/icons/Square44x44Logo.png b/frontend/src-tauri/icons/Square44x44Logo.png new file mode 100644 index 000000000..ecdf63bc6 Binary files /dev/null and b/frontend/src-tauri/icons/Square44x44Logo.png differ diff --git a/frontend/src-tauri/icons/Square71x71Logo.png b/frontend/src-tauri/icons/Square71x71Logo.png new file mode 100644 index 000000000..599688794 Binary files /dev/null and b/frontend/src-tauri/icons/Square71x71Logo.png differ diff --git a/frontend/src-tauri/icons/Square89x89Logo.png b/frontend/src-tauri/icons/Square89x89Logo.png new file mode 100644 index 000000000..a615ecefa Binary files /dev/null and b/frontend/src-tauri/icons/Square89x89Logo.png differ diff --git a/frontend/src-tauri/icons/StoreLogo.png b/frontend/src-tauri/icons/StoreLogo.png new file mode 100644 index 000000000..c95341836 Binary files /dev/null and b/frontend/src-tauri/icons/StoreLogo.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-anydpi-v26/ic_launcher.xml b/frontend/src-tauri/icons/android/mipmap-anydpi-v26/ic_launcher.xml new file mode 100644 index 000000000..2ffbf24b6 --- /dev/null +++ b/frontend/src-tauri/icons/android/mipmap-anydpi-v26/ic_launcher.xml @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/frontend/src-tauri/icons/android/mipmap-hdpi/ic_launcher.png b/frontend/src-tauri/icons/android/mipmap-hdpi/ic_launcher.png new file mode 100644 index 000000000..ae291c545 Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-hdpi/ic_launcher.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-hdpi/ic_launcher_foreground.png b/frontend/src-tauri/icons/android/mipmap-hdpi/ic_launcher_foreground.png new file mode 100644 index 000000000..23c522a3b Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-hdpi/ic_launcher_foreground.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-hdpi/ic_launcher_round.png b/frontend/src-tauri/icons/android/mipmap-hdpi/ic_launcher_round.png new file mode 100644 index 000000000..bb352b97f Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-hdpi/ic_launcher_round.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-mdpi/ic_launcher.png b/frontend/src-tauri/icons/android/mipmap-mdpi/ic_launcher.png new file mode 100644 index 000000000..a8a3047d7 Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-mdpi/ic_launcher.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-mdpi/ic_launcher_foreground.png b/frontend/src-tauri/icons/android/mipmap-mdpi/ic_launcher_foreground.png new file mode 100644 index 000000000..181f78826 Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-mdpi/ic_launcher_foreground.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-mdpi/ic_launcher_round.png b/frontend/src-tauri/icons/android/mipmap-mdpi/ic_launcher_round.png new file mode 100644 index 000000000..0b3607bd8 Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-mdpi/ic_launcher_round.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-xhdpi/ic_launcher.png b/frontend/src-tauri/icons/android/mipmap-xhdpi/ic_launcher.png new file mode 100644 index 000000000..c9ece534c Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-xhdpi/ic_launcher.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-xhdpi/ic_launcher_foreground.png b/frontend/src-tauri/icons/android/mipmap-xhdpi/ic_launcher_foreground.png new file mode 100644 index 000000000..f435f0f62 Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-xhdpi/ic_launcher_foreground.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-xhdpi/ic_launcher_round.png b/frontend/src-tauri/icons/android/mipmap-xhdpi/ic_launcher_round.png new file mode 100644 index 000000000..1e0d61e2a Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-xhdpi/ic_launcher_round.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-xxhdpi/ic_launcher.png b/frontend/src-tauri/icons/android/mipmap-xxhdpi/ic_launcher.png new file mode 100644 index 000000000..82e28f288 Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-xxhdpi/ic_launcher.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-xxhdpi/ic_launcher_foreground.png b/frontend/src-tauri/icons/android/mipmap-xxhdpi/ic_launcher_foreground.png new file mode 100644 index 000000000..86b09e1e4 Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-xxhdpi/ic_launcher_foreground.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-xxhdpi/ic_launcher_round.png b/frontend/src-tauri/icons/android/mipmap-xxhdpi/ic_launcher_round.png new file mode 100644 index 000000000..3a4670bce Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-xxhdpi/ic_launcher_round.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-xxxhdpi/ic_launcher.png b/frontend/src-tauri/icons/android/mipmap-xxxhdpi/ic_launcher.png new file mode 100644 index 000000000..e7a4957cf Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-xxxhdpi/ic_launcher.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-xxxhdpi/ic_launcher_foreground.png b/frontend/src-tauri/icons/android/mipmap-xxxhdpi/ic_launcher_foreground.png new file mode 100644 index 000000000..33d427d00 Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-xxxhdpi/ic_launcher_foreground.png differ diff --git a/frontend/src-tauri/icons/android/mipmap-xxxhdpi/ic_launcher_round.png b/frontend/src-tauri/icons/android/mipmap-xxxhdpi/ic_launcher_round.png new file mode 100644 index 000000000..8812ea5a5 Binary files /dev/null and b/frontend/src-tauri/icons/android/mipmap-xxxhdpi/ic_launcher_round.png differ diff --git a/frontend/src-tauri/icons/android/values/ic_launcher_background.xml b/frontend/src-tauri/icons/android/values/ic_launcher_background.xml new file mode 100644 index 000000000..ea9c223a6 --- /dev/null +++ b/frontend/src-tauri/icons/android/values/ic_launcher_background.xml @@ -0,0 +1,4 @@ + + + #fff + \ No newline at end of file diff --git a/frontend/src-tauri/icons/icon-source-1024.png b/frontend/src-tauri/icons/icon-source-1024.png new file mode 100644 index 000000000..32afb80b5 Binary files /dev/null and b/frontend/src-tauri/icons/icon-source-1024.png differ diff --git a/frontend/src-tauri/icons/icon-source.png b/frontend/src-tauri/icons/icon-source.png new file mode 100644 index 000000000..bcd45c880 Binary files /dev/null and b/frontend/src-tauri/icons/icon-source.png differ diff --git a/frontend/src-tauri/icons/icon.icns b/frontend/src-tauri/icons/icon.icns new file mode 100644 index 000000000..778f49796 Binary files /dev/null and b/frontend/src-tauri/icons/icon.icns differ diff --git a/frontend/src-tauri/icons/icon.ico b/frontend/src-tauri/icons/icon.ico new file mode 100644 index 000000000..f62c1e3d3 Binary files /dev/null and b/frontend/src-tauri/icons/icon.ico differ diff --git a/frontend/src-tauri/icons/icon.png b/frontend/src-tauri/icons/icon.png new file mode 100644 index 000000000..817295818 Binary files /dev/null and b/frontend/src-tauri/icons/icon.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-20x20@1x.png b/frontend/src-tauri/icons/ios/AppIcon-20x20@1x.png new file mode 100644 index 000000000..831530fc6 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-20x20@1x.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-20x20@2x-1.png b/frontend/src-tauri/icons/ios/AppIcon-20x20@2x-1.png new file mode 100644 index 000000000..3a03db3e3 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-20x20@2x-1.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-20x20@2x.png b/frontend/src-tauri/icons/ios/AppIcon-20x20@2x.png new file mode 100644 index 000000000..3a03db3e3 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-20x20@2x.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-20x20@3x.png b/frontend/src-tauri/icons/ios/AppIcon-20x20@3x.png new file mode 100644 index 000000000..a89689426 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-20x20@3x.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-29x29@1x.png b/frontend/src-tauri/icons/ios/AppIcon-29x29@1x.png new file mode 100644 index 000000000..0a0ae3a53 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-29x29@1x.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-29x29@2x-1.png b/frontend/src-tauri/icons/ios/AppIcon-29x29@2x-1.png new file mode 100644 index 000000000..2fd63cec5 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-29x29@2x-1.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-29x29@2x.png b/frontend/src-tauri/icons/ios/AppIcon-29x29@2x.png new file mode 100644 index 000000000..2fd63cec5 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-29x29@2x.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-29x29@3x.png b/frontend/src-tauri/icons/ios/AppIcon-29x29@3x.png new file mode 100644 index 000000000..aacd544e0 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-29x29@3x.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-40x40@1x.png b/frontend/src-tauri/icons/ios/AppIcon-40x40@1x.png new file mode 100644 index 000000000..3a03db3e3 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-40x40@1x.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-40x40@2x-1.png b/frontend/src-tauri/icons/ios/AppIcon-40x40@2x-1.png new file mode 100644 index 000000000..293119d67 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-40x40@2x-1.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-40x40@2x.png b/frontend/src-tauri/icons/ios/AppIcon-40x40@2x.png new file mode 100644 index 000000000..293119d67 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-40x40@2x.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-40x40@3x.png b/frontend/src-tauri/icons/ios/AppIcon-40x40@3x.png new file mode 100644 index 000000000..fee00bb56 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-40x40@3x.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-512@2x.png b/frontend/src-tauri/icons/ios/AppIcon-512@2x.png new file mode 100644 index 000000000..fd3ef907e Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-512@2x.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-60x60@2x.png b/frontend/src-tauri/icons/ios/AppIcon-60x60@2x.png new file mode 100644 index 000000000..fee00bb56 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-60x60@2x.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-60x60@3x.png b/frontend/src-tauri/icons/ios/AppIcon-60x60@3x.png new file mode 100644 index 000000000..ddc877d73 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-60x60@3x.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-76x76@1x.png b/frontend/src-tauri/icons/ios/AppIcon-76x76@1x.png new file mode 100644 index 000000000..3c24bb0e3 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-76x76@1x.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-76x76@2x.png b/frontend/src-tauri/icons/ios/AppIcon-76x76@2x.png new file mode 100644 index 000000000..ef29316c5 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-76x76@2x.png differ diff --git a/frontend/src-tauri/icons/ios/AppIcon-83.5x83.5@2x.png b/frontend/src-tauri/icons/ios/AppIcon-83.5x83.5@2x.png new file mode 100644 index 000000000..0d1ecdb89 Binary files /dev/null and b/frontend/src-tauri/icons/ios/AppIcon-83.5x83.5@2x.png differ diff --git a/frontend/src-tauri/src/backend.rs b/frontend/src-tauri/src/backend.rs new file mode 100644 index 000000000..f9a288cc9 --- /dev/null +++ b/frontend/src-tauri/src/backend.rs @@ -0,0 +1,640 @@ +//! FastAPI 后端进程管理器 +//! +//! 职责: +//! 1. 优先启动 PyInstaller 冻结产物 `plotpilot-backend.exe`(发布推荐) +//! 2. 否则回退:内嵌 / venv / 系统 Python + `python -m uvicorn`(开发) +//! 3. 健康检查轮询,等待 HTTP 就绪 +//! 4. 管理子进程生命周期(退出时自动清理) + +use std::path::PathBuf; +use std::process::{Child, Command, Stdio}; +use std::sync::Mutex; +use std::thread; +use std::time::{Duration, Instant}; +use tauri::path::BaseDirectory; +use tauri::{AppHandle, Manager}; +use ureq::Agent; + +#[cfg(target_os = "windows")] +use std::os::windows::io::AsRawHandle; +#[cfg(target_os = "windows")] +use std::os::windows::process::CommandExt; +#[cfg(target_os = "windows")] +use win32job::Job; + +/// 后端管理器 +pub struct BackendManager { + /// 预留:后续若需从 Rust 侧发事件到前端会用到 + pub(crate) _app_handle: AppHandle, + child: Mutex>, + port: Mutex, + pub(crate) project_root: PathBuf, + /// Windows:子进程纳入 Job,句柄关闭时随 JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE 一并结束 + #[cfg(target_os = "windows")] + job_kill_tree: Mutex>, +} + +impl BackendManager { + pub fn new(app_handle: AppHandle) -> Self { + // 确定项目根目录(Tauri exe 所在位置的上级或 resource 目录) + let project_root = Self::detect_project_root(&app_handle); + + Self { + _app_handle: app_handle, + child: Mutex::new(None), + port: Mutex::new(0), + project_root, + #[cfg(target_os = "windows")] + job_kill_tree: Mutex::new(None), + } + } + + /// 是否在启动 Python 时注入 AITEXT_PROD_DATA_DIR(release 默认开启;debug 需设 AITEXT_FORCE_PROD_DATA=1) + fn should_inject_prod_data_dir() -> bool { + if cfg!(debug_assertions) { + std::env::var("AITEXT_FORCE_PROD_DATA") + .map(|v| v == "1" || v.eq_ignore_ascii_case("true")) + .unwrap_or(false) + } else { + true + } + } + + /// Tauri 2:用户可写应用数据目录下的 `data/`(与 Python `application/paths.py` 约定一致) + fn resolve_prod_data_dir(handle: &AppHandle) -> Result { + let base = handle + .path() + .app_data_dir() + .map_err(|e| format!("无法解析 app_data_dir: {}", e))?; + let data = base.join("data"); + std::fs::create_dir_all(&data) + .map_err(|e| format!("无法创建数据目录 {}: {}", data.display(), e))?; + Ok(data) + } + + fn inject_prod_data_env(cmd: &mut Command, handle: &AppHandle) -> Result<(), String> { + if !Self::should_inject_prod_data_dir() { + return Ok(()); + } + let path = Self::resolve_prod_data_dir(handle)?; + log::info!( + "📁 注入 {}={}", + "AITEXT_PROD_DATA_DIR", + path.display() + ); + cmd.env("AITEXT_PROD_DATA_DIR", path.as_os_str()); + + let logs_dir = path.join("logs"); + std::fs::create_dir_all(&logs_dir) + .map_err(|e| format!("无法创建日志目录 {}: {}", logs_dir.display(), e))?; + let log_file = logs_dir.join("aitext.log"); + cmd.env("LOG_FILE", log_file.as_os_str()); + + Ok(()) + } + + /// PyInstaller onedir:`$RESOURCE/plotpilot-backend/plotpilot-backend.exe`(见 tauri.conf resources 映射) + fn find_frozen_backend_exe(handle: &AppHandle) -> Option { + // 方案 1a:与 bundle.resources 映射一致(推荐;安装包与 tauri build 均可用) + if let Ok(p) = handle.path().resolve( + "plotpilot-backend/plotpilot-backend.exe", + BaseDirectory::Resource, + ) { + if p.is_file() { + log::info!("📦 后端路径 (resolve Resource): {}", p.display()); + return Some(p); + } + } + + // 方案 1b:旧配置曾用数组 + `../**/*`,打包后实际路径经 Tauri 归一化;用同一字符串解析 + if let Ok(p) = handle.path().resolve( + "../../out/tauri/plotpilot-backend/plotpilot-backend.exe", + BaseDirectory::Resource, + ) { + if p.is_file() { + log::info!("📦 后端路径 (resolve legacy rel): {}", p.display()); + return Some(p); + } + } + + // 方案 1c:resource_dir 下直接探测(手工拷贝或扁平布局) + if let Ok(rd) = handle.path().resource_dir() { + let nested = rd.join("plotpilot-backend").join("plotpilot-backend.exe"); + if nested.is_file() { + log::info!("📦 后端路径 (resource_dir nested): {}", nested.display()); + return Some(nested); + } + let flat = rd.join("plotpilot-backend.exe"); + if flat.is_file() { + log::info!("📦 后端路径 (resource_dir flat): {}", flat.display()); + return Some(flat); + } + } + + // 方案 2:从 exe 父目录逐级向上找 out/tauri/...(裸 cargo build / 未拷贝 resources 时) + if let Some(exe_path) = std::env::current_exe().ok().and_then(|p| p.canonicalize().ok()) { + let mut dir = exe_path.parent().map(PathBuf::from); + for _ in 0..32 { + let Some(ref d) = dir else { break }; + let candidate = d + .join("out") + .join("tauri") + .join("plotpilot-backend") + .join("plotpilot-backend.exe"); + if candidate.is_file() { + log::info!("📦 后端路径 (dev walk-up): {}", candidate.display()); + return Some(candidate); + } + dir = d.parent().map(PathBuf::from); + } + + // 方案 3:与 plotpilot.exe 同目录下的 plotpilot-backend/(便携解压布局) + if let Some(parent) = exe_path.parent() { + let sibling = parent + .join("plotpilot-backend") + .join("plotpilot-backend.exe"); + if sibling.is_file() { + log::info!("📦 后端路径 (sibling dir): {}", sibling.display()); + return Some(sibling); + } + } + } + + None + } + + /// 检测项目根目录(松散源码)或资源根(仅冻结后端时无 main.py) + fn detect_project_root(handle: &AppHandle) -> PathBuf { + if let Ok(resource_dir) = handle.path().resource_dir() { + if Self::find_frozen_backend_exe(handle).is_some() { + log::info!("📂 资源根目录(冻结后端): {}", resource_dir.display()); + return resource_dir; + } + for candidate in [ + resource_dir.join("../../../"), + resource_dir.join("../../"), + resource_dir.clone(), + ] { + if candidate.join("interfaces/main.py").exists() { + log::info!("📂 项目根目录: {}", candidate.display()); + return candidate.canonicalize().unwrap_or(candidate); + } + } + } + + handle + .path() + .resource_dir() + .unwrap_or_else(|_| PathBuf::from(".")) + } + + /// 从指定端口开始,找到一个可用端口 + fn pick_free_port(start: u16) -> Option { + (start..start + 100).find(|&port| { + std::net::TcpListener::bind(("127.0.0.1", port)).is_ok() + }) + } + + /// 查找 Python 解释器,优先使用内嵌 Python + pub(crate) fn find_python(&self) -> Option { + // 优先级:已解压的内嵌 Python > 资源目录中的内嵌 Python > 虚拟环境 > 系统 PATH + + // 1) 检查项目目录下的内嵌 Python + let embedded = self.project_root.join("tools/python_embed/python.exe"); + if embedded.exists() { + log::info!("🐍 使用项目目录下内嵌 Python: {}", embedded.display()); + return Some(embedded); + } + + // 2) 尝试从资源目录获取内嵌 Python + if let Ok(resource_dir) = self._app_handle.path().resource_dir() { + let resource_python = resource_dir.join("python_embed/python.exe"); + if resource_python.exists() { + // 复制到项目目录 + if let Err(e) = self.extract_embedded_python(&resource_dir) { + log::warn!("从资源目录提取内嵌 Python 失败: {}", e); + } else { + if embedded.exists() { + log::info!("🐍 使用资源目录内嵌 Python: {}", embedded.display()); + return Some(embedded); + } + } + } + } + + // 3) 尝试从资源目录的 zip 解压 + if let Ok(resource_dir) = self._app_handle.path().resource_dir() { + let zip_path = resource_dir.join("python-3.11.9-embed-amd64.zip"); + if zip_path.exists() { + log::info!("📦 发现内嵌 Python zip,正在解压..."); + if let Err(e) = self.extract_python_from_zip(&zip_path, &embedded) { + log::warn!("解压内嵌 Python 失败: {}", e); + } else { + if embedded.exists() { + log::info!("🐍 使用内嵌 Python (从zip解压): {}", embedded.display()); + return Some(embedded); + } + } + } + } + + // 4) 虚拟环境 + let venv = self.project_root.join(".venv/Scripts/python.exe"); + if venv.exists() { + log::info!("🐍 使用虚拟环境 Python: {}", venv.display()); + return Some(venv); + } + + // 5) 系统 PATH + if let Ok(path) = which::which("python") { + log::info!("🐍 使用系统 Python: {}", path.display()); + return Some(path); + } + if let Ok(path) = which::which("python3") { + log::info!("🐍 使用系统 python3: {}", path.display()); + return Some(path); + } + + None + } + + /// 启动后端并等待就绪(重启后端等需在同一线程内连续完成的场景) + pub fn start_and_wait(&mut self, timeout_secs: u64) -> Result { + let port = self.spawn_only()?; + Self::wait_for_ready(port, timeout_secs)?; + Ok(port) + } + + /// 仅启动子进程并写入端口(快速返回,不阻塞健康检查)。 + /// 用于在独立线程中先释放 `Mutex`,避免与关窗逻辑长时间争锁。 + pub fn spawn_only(&mut self) -> Result { + let port = Self::pick_free_port(8005).ok_or("无法分配空闲端口")?; + log::info!("🔌 分配端口: {}", port); + + let frozen = Self::find_frozen_backend_exe(&self._app_handle); + + match &frozen { + Some(p) => eprintln!("[DEBUG] ✅ 找到冻结后端: {}", p.display()), + None => eprintln!("[DEBUG] ❌ 未找到冻结后端!将尝试 Python 解释器路线"), + } + + let mut cmd = if let Some(ref exe) = frozen { + let work_dir = exe + .parent() + .ok_or_else(|| "冻结后端路径无效".to_string())?; + log::info!("📦 启动冻结后端: {}", exe.display()); + let mut c = Command::new(exe); + c.arg(port.to_string()) + .current_dir(work_dir) + .env("HF_HUB_OFFLINE", "1") + .env("TRANSFORMERS_OFFLINE", "1") + .env("HF_DATASETS_OFFLINE", "1") + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + #[cfg(target_os = "windows")] + { + c.creation_flags(windows_subsystem_flag()); + } + c + } else { + let python = self.find_python().ok_or_else(|| { + "未找到 plotpilot-backend.exe,也未找到 Python。发布构建请运行 scripts/build_backend_pyinstaller.py;开发请安装 Python 3.10+".to_string() + })?; + log::info!("🐍 启动 uvicorn(解释器): {}", python.display()); + let mut c = Command::new(&python); + c.arg("-m") + .arg("uvicorn") + .arg("interfaces.main:app") + .arg("--host") + .arg("127.0.0.1") + .arg("--port") + .arg(port.to_string()) + .arg("--log-level") + .arg("info") + .current_dir(&self.project_root) + .env("PYTHONIOENCODING", "utf-8") + .env("PYTHONUNBUFFERED", "1") + .env("HF_HUB_OFFLINE", "1") + .env("TRANSFORMERS_OFFLINE", "1") + .env("HF_DATASETS_OFFLINE", "1") + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + #[cfg(target_os = "windows")] + { + c.creation_flags(windows_subsystem_flag()); + } + c + }; + + Self::inject_prod_data_env(&mut cmd, &self._app_handle)?; + + let child = cmd + .spawn() + .map_err(|e| format!("启动后端失败: {}", e))?; + + #[cfg(target_os = "windows")] + { + let job = Job::create().map_err(|e| format!("创建 Job Object 失败: {}", e))?; + let mut info = job + .query_extended_limit_info() + .map_err(|e| format!("Job 查询限制信息失败: {}", e))?; + info.limit_kill_on_job_close(); + job.set_extended_limit_info(&mut info) + .map_err(|e| format!("Job 设置限制失败: {}", e))?; + let h = child.as_raw_handle() as isize; + job.assign_process(h) + .map_err(|e| format!("Job 绑定子进程失败: {}", e))?; + *self.job_kill_tree.lock().unwrap() = Some(job); + log::info!("🔗 已将后端子进程纳入 Job(KILL_ON_JOB_CLOSE)"); + } + + let pid = child.id(); + log::info!("▶️ 后端子进程已启动 (PID={})", pid); + + *self.child.lock().unwrap() = Some(child); + *self.port.lock().unwrap() = port; + + Ok(port) + } + + /// 轮询等待后端就绪(不持有 `BackendManager` 的独占锁,可与关窗路径并行) + pub fn wait_for_ready(port: u16, timeout_secs: u64) -> Result<(), String> { + let health_url = format!("http://127.0.0.1:{}/health", port); + let deadline = std::time::Instant::now() + Duration::from_secs(timeout_secs); + + // 第一阶段:等端口监听 + log::info!("⏳ 等待后端端口 {} 监听...", port); + loop { + if std::time::Instant::now() > deadline { + return Err(format!("超时:后端在 {}s 内未开始监听端口", timeout_secs)); + } + if Self::is_port_listening(port) { + break; + } + thread::sleep(Duration::from_millis(400)); + } + + // 第二阶段:等 HTTP 响应(ureq 3:超时在 Agent 上配置) + log::info!("⏳ 等待 HTTP 健康检查..."); + let agent: Agent = Agent::config_builder() + .timeout_global(Some(Duration::from_secs(2))) + .build() + .into(); + loop { + if std::time::Instant::now() > deadline { + return Err(format!( + "超时:端口已监听但 HTTP 无响应 ({}s)", + timeout_secs + )); + } + + match agent.get(&health_url).call() { + Ok(resp) if resp.status().as_u16() == 200 => { + log::info!("✅ 后端健康检查通过!"); + return Ok(()); + } + _ => { + thread::sleep(Duration::from_millis(400)); + } + } + } + } + + /// 检查端口是否在监听 + fn is_port_listening(port: u16) -> bool { + std::net::TcpStream::connect_timeout( + &format!("127.0.0.1:{}", port).parse().unwrap(), + Duration::from_millis(300), + ) + .is_ok() + } + + /// 获取当前端口号 + pub fn get_port(&self) -> u16 { + *self.port.lock().unwrap() + } + + /// 获取运行状态 + pub fn is_running(&self) -> bool { + let mut guard = self.child.lock().unwrap(); + match guard.as_mut() { + None => false, + Some(child) => match child.try_wait() { + Ok(None) => true, // 还在运行 + Ok(Some(_)) => false, // 已退出 + Err(_) => true, // 无法判断,假设还在运行 + }, + } + } + + /// 从资源目录提取内嵌 Python + fn extract_embedded_python(&self, resource_dir: &PathBuf) -> Result<(), String> { + let source_dir = resource_dir.join("python_embed"); + let target_dir = self.project_root.join("tools/python_embed"); + + log::info!("📂 从资源目录复制内嵌 Python: {} -> {}", source_dir.display(), target_dir.display()); + + // 确保目标目录存在 + if let Some(parent) = target_dir.parent() { + std::fs::create_dir_all(parent).map_err(|e| format!("创建目录失败: {}", e))?; + } + + // 复制整个目录 + self.copy_directory(&source_dir, &target_dir)?; + + Ok(()) + } + + /// 从 zip 文件解压内嵌 Python + pub(crate) fn extract_python_from_zip( + &self, + zip_path: &PathBuf, + target_python: &PathBuf, + ) -> Result<(), String> { + log::info!("📦 从 zip 解压内嵌 Python: {}", zip_path.display()); + + // 确保目标目录存在 + let target_dir = target_python.parent().unwrap(); + if let Some(parent) = target_dir.parent() { + std::fs::create_dir_all(parent).map_err(|e| format!("创建目录失败: {}", e))?; + } + + // 解压 zip 文件 + let zip_content = std::fs::read(zip_path) + .map_err(|e| format!("读取 zip 文件失败: {}", e))?; + + let mut archive = zip::ZipArchive::new(std::io::Cursor::new(zip_content)) + .map_err(|e| format!("打开 zip 文件失败: {}", e))?; + + for i in 0..archive.len() { + let mut file = archive.by_index(i) + .map_err(|e| format!("读取 zip 条目失败: {}", e))?; + + if file.is_dir() { + continue; + } + + let outpath = target_dir.join(file.name()); + if let Some(parent) = outpath.parent() { + std::fs::create_dir_all(parent) + .map_err(|e| format!("创建目录失败: {}", e))?; + } + + let mut outfile = std::fs::File::create(&outpath) + .map_err(|e| format!("创建文件失败: {}", e))?; + + std::io::copy(&mut file, &mut outfile) + .map_err(|e| format!("复制文件内容失败: {}", e))?; + } + + log::info!("✅ 内嵌 Python 解压完成"); + Ok(()) + } + + /// 递归复制目录 + fn copy_directory(&self, src: &PathBuf, dst: &PathBuf) -> Result<(), String> { + if !src.exists() { + return Err(format!("源目录不存在: {}", src.display())); + } + + std::fs::create_dir_all(dst).map_err(|e| format!("创建目标目录失败: {}", e))?; + + for entry in std::fs::read_dir(src).map_err(|e| format!("读取源目录失败: {}", e))? { + let entry = entry.map_err(|e| format!("读取目录条目失败: {}", e))?; + let ty = entry.file_type().map_err(|e| format!("获取文件类型失败: {}", e))?; + let src_path = entry.path(); + let dst_path = dst.join(entry.file_name()); + + if ty.is_dir() { + self.copy_directory(&src_path, &dst_path)?; + } else { + std::fs::copy(&src_path, &dst_path) + .map_err(|e| format!("复制文件失败 {} -> {}: {}", src_path.display(), dst_path.display(), e))?; + } + } + + Ok(()) + } + + /// 优雅关闭:先 POST `/internal/shutdown`,等待子进程退出;超时后 [`Self::terminate`]。 + pub fn graceful_shutdown(&self, timeout: Duration) { + let port = *self.port.lock().unwrap(); + if port > 0 && self.is_running() { + let url = format!("http://127.0.0.1:{}/internal/shutdown", port); + let agent: Agent = Agent::config_builder() + .timeout_global(Some(Duration::from_secs(2))) + .build() + .into(); + match agent.post(&url).send_empty() { + Ok(resp) => { + log::info!( + "📤 已请求后端优雅关闭 (HTTP {})", + resp.status().as_u16() + ); + } + Err(e) => { + log::warn!("优雅关闭 POST 失败(将等待超时后强杀): {}", e); + } + } + } else { + log::info!("跳过后端优雅关闭(未运行或未分配端口)"); + } + + let deadline = Instant::now() + timeout; + loop { + { + let mut guard = self.child.lock().unwrap(); + match guard.as_mut() { + None => { + log::info!("后端子进程已释放"); + return; + } + Some(child) => match child.try_wait() { + Ok(Some(status)) => { + log::info!("✅ 后端已退出: {:?}", status); + guard.take(); + return; + } + Ok(None) => {} + Err(e) => log::warn!("try_wait: {}", e), + }, + } + } + if Instant::now() > deadline { + break; + } + thread::sleep(Duration::from_millis(50)); + } + + log::warn!("优雅关闭超时,执行强杀"); + self.terminate_hard(); + } + + fn terminate_hard(&self) { + #[cfg(target_os = "windows")] + { + // 方法1: 使用 Job Object 杀死进程树 + let job = self.job_kill_tree.lock().unwrap().take(); + drop(job); + + // 方法2: 使用 taskkill 强制杀死整个进程树(双保险) + let guard = self.child.lock().unwrap(); + if let Some(child) = guard.as_ref() { + let pid = child.id(); + log::info!("🛑 使用 taskkill 强制终止进程树 (PID={})...", pid); + + // /F 强制终止 /T 包含子进程 + let kill_result = Command::new("taskkill") + .args(["/F", "/T", "/PID", &pid.to_string()]) + .output(); + + match kill_result { + Ok(output) => { + if output.status.success() { + log::info!("✅ taskkill 成功终止进程树"); + } else { + log::warn!( + "taskkill 返回非零状态: {}", + String::from_utf8_lossy(&output.stderr) + ); + } + } + Err(e) => { + log::warn!("taskkill 执行失败: {}", e); + } + } + } + } + + let mut guard = self.child.lock().unwrap(); + if let Some(mut child) = guard.take() { + log::info!("🛑 正在强杀后端进程..."); + let _ = child.kill(); + let _ = child.wait(); + log::info!("✅ 后端进程已终止"); + } + } + + /// 立即强杀子进程(重启后端等场景)。 + pub fn terminate(&self) { + self.terminate_hard(); + } +} + +impl Drop for BackendManager { + fn drop(&mut self) { + self.terminate_hard(); + } +} + +/// Windows 上隐藏子进程控制台窗口的 flag +#[cfg(target_os = "windows")] +fn windows_subsystem_flag() -> u32 { + const CREATE_NO_WINDOW: u32 = 0x08000000; + CREATE_NO_WINDOW +} + +#[cfg(not(target_os = "windows"))] +fn windows_subsystem_flag() -> u32 { + 0 +} diff --git a/frontend/src-tauri/src/commands.rs b/frontend/src-tauri/src/commands.rs new file mode 100644 index 000000000..be4fd3b9d --- /dev/null +++ b/frontend/src-tauri/src/commands.rs @@ -0,0 +1,171 @@ +//! Tauri IPC 命令 —— 前端通过 invoke 调用这些函数 +//! +//! 这些命令暴露给 Vue3 前端,用于: +//! - 查询后端端口 +//! - 查询后端状态 +//! - 重启后端 +//! - 打开外部浏览器 + +use crate::backend::BackendManager; +use tauri::{Manager, State}; +use std::sync::Mutex; + +/// 获取后端端口号(前端需要这个来构造 API 请求地址) +#[tauri::command] +pub fn get_backend_port(port: State<'_, Mutex>) -> Result { + let p = port.lock().map_err(|e| e.to_string())?; + Ok(*p) +} + +/// 获取后端运行状态 +#[tauri::command] +pub fn get_backend_status( + manager: State<'_, Mutex>, +) -> Result { + let mgr = manager.lock().map_err(|e| e.to_string())?; + Ok(BackendStatus { + running: mgr.is_running(), + port: mgr.get_port(), + }) +} + +/// 重启后端 +#[tauri::command] +pub async fn restart_backend( + manager: State<'_, Mutex>, + port_state: State<'_, Mutex>, +) -> Result { + // 先停旧的 + { + let mgr = manager.lock().map_err(|e| e.to_string())?; + mgr.terminate(); + } + // 给一点时间释放端口 + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + + // 再启动新的 + let mut mgr = manager.lock().map_err(|e| e.to_string())?; + match mgr.start_and_wait(120) { + Ok(new_port) => { + *port_state.lock().unwrap() = new_port; + Ok(new_port) + } + Err(e) => Err(e), + } +} + +/// 在系统浏览器中打开 URL +#[tauri::command] +pub fn open_in_browser(url: String) -> Result<(), String> { + webbrowser::open(&url).map_err(|e| format!("打开浏览器失败: {}", e)) +} + +/// 运行安装流程 +#[tauri::command] +pub fn run_installation( + manager: State<'_, Mutex>, +) -> Result { + let mgr = manager.lock().map_err(|e| e.to_string())?; + + // 检查是否需要安装 + let python_path = mgr.find_python(); + let needs_install = python_path.is_none(); + + // 尝试提取内嵌 Python + let embedded_extracted = if needs_install { + if let Ok(resource_dir) = mgr._app_handle.path().resource_dir() { + let zip_path = resource_dir.join("python-3.11.9-embed-amd64.zip"); + if zip_path.exists() { + let target_python = mgr.project_root.join("tools/python_embed/python.exe"); + mgr.extract_python_from_zip(&zip_path, &target_python).is_ok() + } else { + false + } + } else { + false + } + } else { + true + }; + + Ok(InstallationStatus { + needs_install: !embedded_extracted, + python_available: python_path.is_some() || embedded_extracted, + embedded_extracted, + python_path: python_path.map(|p| p.to_string_lossy().to_string()), + }) +} + +/// 检查环境状态 +#[tauri::command] +pub fn check_environment( + manager: State<'_, Mutex>, +) -> Result { + let mgr = manager.lock().map_err(|e| e.to_string())?; + + let python_available = mgr.find_python().is_some(); + let has_embedded = { + if let Ok(resource_dir) = mgr._app_handle.path().resource_dir() { + resource_dir.join("python-3.11.9-embed-amd64.zip").exists() || + resource_dir.join("python_embed").exists() + } else { + false + } + }; + + let project_root = mgr.project_root.to_string_lossy().to_string(); + + Ok(EnvironmentInfo { + python_available, + has_embedded_python: has_embedded, + project_root, + }) +} + +/// 手动提取内嵌 Python +#[tauri::command] +pub fn extract_embedded_python( + manager: State<'_, Mutex>, +) -> Result { + let mgr = manager.lock().map_err(|e| e.to_string())?; + + if let Ok(resource_dir) = mgr._app_handle.path().resource_dir() { + let zip_path = resource_dir.join("python-3.11.9-embed-amd64.zip"); + let target_python = mgr.project_root.join("tools/python_embed/python.exe"); + + if zip_path.exists() { + match mgr.extract_python_from_zip(&zip_path, &target_python) { + Ok(()) => Ok(true), + Err(e) => Err(e), + } + } else { + Err("未找到内嵌 Python zip 文件".to_string()) + } + } else { + Err("无法访问资源目录".to_string()) + } +} + +/// 后端状态返回结构 +#[derive(serde::Serialize, Clone)] +pub struct BackendStatus { + running: bool, + port: u16, +} + +/// 安装状态返回结构 +#[derive(serde::Serialize, Clone)] +pub struct InstallationStatus { + needs_install: bool, + python_available: bool, + embedded_extracted: bool, + python_path: Option, +} + +/// 环境信息返回结构 +#[derive(serde::Serialize, Clone)] +pub struct EnvironmentInfo { + python_available: bool, + has_embedded_python: bool, + project_root: String, +} diff --git a/frontend/src-tauri/src/lib.rs b/frontend/src-tauri/src/lib.rs new file mode 100644 index 000000000..c4897b378 --- /dev/null +++ b/frontend/src-tauri/src/lib.rs @@ -0,0 +1,118 @@ +//! PlotPilot Tauri 主入口 +//! +//! 架构概览: +//! 用户双击 exe → Tauri WebView 渲染 Vue3 前端 +//! → Rust 端自动查找/启动 Python FastAPI 后端 +//! → 前端通过 HTTP 请求后端 API(同 localhost) +//! +//! 核心设计原则: +//! 1. 零配置:用户不需要安装 Python、不需要命令行 +//! 2. Sidecar 模式:Python 作为子进程被管理 +//! 3. 动态端口:自动寻找可用端口,避免冲突 +//! 4. 生产数据目录:release 构建向子进程注入 `AITEXT_PROD_DATA_DIR`(见 `application/paths.py`) +//! 5. Windows:子进程纳入 Job Object(KILL_ON_JOB_CLOSE),与 `Drop`/显式 terminate 双保险 +//! 6. 关闭窗口:拦截 CloseRequested → 后端 HTTP 优雅停机 → 超时强杀 → `exit(0)` + +mod backend; +mod commands; + +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Mutex; +use std::time::Duration; + +use tauri::{Manager, WindowEvent}; +use backend::BackendManager; + +/// 防止重复 spawn 多条优雅退出线程(用户连点关闭) +static GRACEFUL_SHUTDOWN_STARTED: AtomicBool = AtomicBool::new(false); + +#[cfg_attr(mobile, tauri::mobile_entry_point)] +pub fn run() { + // 初始化日志 + env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")) + .init(); + + log::info!("🚀 PlotPilot (墨枢) 启动中..."); + + tauri::Builder::default() + .plugin(tauri_plugin_single_instance::init(|app, _argv, _cwd| { + if let Some(win) = app.get_webview_window("main") { + let _ = win.unminimize(); + let _ = win.set_focus(); + } + })) + .plugin(tauri_plugin_shell::init()) + .on_window_event(|window, event| { + if let WindowEvent::CloseRequested { api, .. } = event { + // 已经在关闭中,直接忽略后续点击 + if GRACEFUL_SHUTDOWN_STARTED.swap(true, Ordering::SeqCst) { + api.prevent_close(); + return; + } + api.prevent_close(); + + // 最小化窗口给用户反馈(关闭正在进行) + let _ = window.minimize(); + + let app_handle = window.app_handle().clone(); + std::thread::spawn(move || { + let backend = app_handle.state::>(); + if let Ok(mgr) = backend.lock() { + // 减少超时时间,快速关闭 + mgr.graceful_shutdown(Duration::from_secs(3)); + } + app_handle.exit(0); + }); + } + }) + .setup(|app| { + let handle = app.handle().clone(); + + // IPC 端口:就绪前为 0,前端轮询直至 >0(避免在 setup 里阻塞导致 WebView 白屏) + app.manage(std::sync::Mutex::new(0u16)); + let manager = BackendManager::new(handle.clone()); + app.manage(std::sync::Mutex::new(manager)); + + let app_handle = app.handle().clone(); + std::thread::spawn(move || { + let backend = app_handle.state::>(); + let port = match backend.lock() { + Ok(mut mgr) => match mgr.spawn_only() { + Ok(p) => p, + Err(e) => { + log::error!("❌ 后端进程启动失败: {}", e); + return; + } + }, + Err(e) => { + log::error!("后端管理器锁 poisoned: {}", e); + return; + } + }; + + match BackendManager::wait_for_ready(port, 120) { + Ok(()) => { + let ipc_port = app_handle.state::>(); + if let Ok(mut g) = ipc_port.lock() { + *g = port; + } + log::info!("✅ 后端已就绪,端口: {}", port); + } + Err(e) => log::error!("❌ 后端就绪超时或失败: {}", e), + } + }); + + Ok(()) + }) + .invoke_handler(tauri::generate_handler![ + commands::get_backend_port, + commands::get_backend_status, + commands::restart_backend, + commands::open_in_browser, + commands::run_installation, + commands::check_environment, + commands::extract_embedded_python, + ]) + .run(tauri::generate_context!()) + .expect("error while running PlotPilot"); +} diff --git a/frontend/src-tauri/src/main.rs b/frontend/src-tauri/src/main.rs new file mode 100644 index 000000000..c2431dee3 --- /dev/null +++ b/frontend/src-tauri/src/main.rs @@ -0,0 +1,6 @@ +// Prevents additional console window on Windows in release, DO NOT REMOVE!! +#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] + +fn main() { + plotpilot_lib::run() +} diff --git a/frontend/src-tauri/tauri.conf.json b/frontend/src-tauri/tauri.conf.json index d94bf825e..418cda943 100644 --- a/frontend/src-tauri/tauri.conf.json +++ b/frontend/src-tauri/tauri.conf.json @@ -1,7 +1,7 @@ { "$schema": "https://raw.githubusercontent.com/tauri-apps/tauri/dev/crates/tauri-cli/schema.json", "productName": "PlotPilot", - "version": "1.0.2", + "version": "1.0.4", "identifier": "com.plotpilot.app", "build": { "frontendDist": "../dist", @@ -30,7 +30,9 @@ }, "bundle": { "active": true, - "targets": ["nsis"], + "targets": [ + "nsis" + ], "icon": [ "icons/32x32.png", "icons/128x128.png", @@ -52,7 +54,10 @@ "displayLanguageSelector": false, "installerIcon": "icons/icon.ico", "installMode": "both", - "languages": ["SimpChinese", "English"] + "languages": [ + "SimpChinese", + "English" + ] } }, "shortDescription": "AI 驱动的智能小说创作平台", @@ -62,4 +67,4 @@ "copyright": "© 2025 PlotPilot Team. All rights reserved." }, "plugins": {} -} \ No newline at end of file +} diff --git a/frontend/src/App.vue b/frontend/src/App.vue index d21e1307a..e5796306b 100644 --- a/frontend/src/App.vue +++ b/frontend/src/App.vue @@ -3,6 +3,8 @@ import { computed } from 'vue' import { NConfigProvider, NMessageProvider, NDialogProvider, zhCN, dateZhCN, darkTheme } from 'naive-ui' import type { GlobalThemeOverrides } from 'naive-ui' import { useThemeStore } from './stores/themeStore' +import GlobalLLMFloatingButton from './components/global/GlobalLLMFloatingButton.vue' +import PromptPlazaFAB from './components/global/PromptPlazaFAB.vue' const themeStore = useThemeStore() @@ -150,6 +152,8 @@ const themeOverrides = computed(() => { + + diff --git a/frontend/src/api/chapter.ts b/frontend/src/api/chapter.ts index 9342547e2..d170daba3 100644 --- a/frontend/src/api/chapter.ts +++ b/frontend/src/api/chapter.ts @@ -38,6 +38,183 @@ export interface ChapterReviewAiResponse { saved: boolean } +export interface ChapterCandidateDraftDTO { + id: string + novel_id: string + chapter_number: number + branch_name: string + source: string + status: string + title: string + content: string + rationale: string + metadata: Record + created_at: string + updated_at: string +} + +export interface CreateChapterCandidateDraftRequest { + source: string + title?: string + content: string + rationale?: string + metadata?: Record + branch_name?: string +} + +export interface AcceptChapterCandidateDraftResponse { + draft: ChapterCandidateDraftDTO + chapter: ChapterDTO + snapshot_id: string +} + +export interface CandidateBranchSummary { + branch_name: string + draft_count: number + accepted_count: number + updated_at: string +} + +export interface CandidateParagraphCompareItem { + index: number + type: 'unchanged' | 'added' | 'removed' | 'modified' + primary: string + candidate: string + similarity: number +} + +export interface CandidateDraftCompareResponse { + draft: ChapterCandidateDraftDTO + primary_word_count: number + candidate_word_count: number + similarity: number + paragraphs: CandidateParagraphCompareItem[] +} + +export interface BranchMemoryImpactItem { + label: string + level: 'info' | 'warning' | 'success' | 'error' | string + detail: string +} + +export interface BranchMemoryDiffResponse { + novel_id: string + chapter_number: number + source_branch: string + target_branch: string + source_draft_count: number + target_draft_count: number + source_latest_draft_id: string + target_latest_draft_id: string + similarity: number + memory_impacts: BranchMemoryImpactItem[] +} + +export interface ExternalModelTaskDTO { + id: string + novel_id: string + chapter_number: number + model: string + prompt: string + instruction: string + source_draft_id: string + candidate_draft_id: string + response_preview: string + status: 'prompted' | 'imported' | 'accepted' | string + execution_mode: 'copy_paste' | 'direct_api' | string + created_at: string + updated_at: string +} + +export interface UpsertExternalModelTaskRequest { + id?: string + chapter_number: number + model?: string + prompt?: string + instruction?: string + source_draft_id?: string + candidate_draft_id?: string + response_preview?: string + status?: string + execution_mode?: string +} + +export interface GenerateCandidateDraftRequest { + chapter_number: number + outline: string + current_content?: string + branch_name?: string + title?: string + source?: string + model_label?: string + llm_profile_id?: string + task_prompt?: string + max_tokens?: number + temperature?: number +} + +export interface GenerateCandidateDraftResponse { + draft: ChapterCandidateDraftDTO + task: ExternalModelTaskDTO +} + +export interface EditorialReviewForPolishDTO { + summary: string + scores: { + opening: number + conflict: number + character: number + dialogue: number + hook: number + pacing: number + } + strengths: string[] + problems: string[] + actions: string[] + verdict: string +} + +export interface GenerateEditorialPolishCandidateRequest { + chapter_number: number + outline: string + current_content: string + editorial_review: EditorialReviewForPolishDTO + target_word_count?: number + branch_name?: string + title?: string + model_label?: string + max_tokens?: number + temperature?: number +} + +export interface CreateWebWritingPromptRequest { + chapter_number: number + outline: string + current_content?: string + model_label?: string + task_prompt?: string +} + +export interface WebWritingPromptResponse { + prompt: string + task: ExternalModelTaskDTO +} + +export interface SupervisorReviewCandidateDraftRequest { + model_label?: string + llm_profile_id?: string + focus?: string + max_tokens?: number + temperature?: number +} + +export interface SupervisorReviewCandidateDraftResponse { + draft_id: string + model_label: string + review: string + task: ExternalModelTaskDTO +} + export const chapterApi = { /** * List all chapters for a novel @@ -94,4 +271,80 @@ export const chapterApi = { */ ensureChapter: (novelId: string, chapterNumber: number, title = '') => apiClient.post(`/novels/${novelId}/chapters/${chapterNumber}/ensure`, { title }) as Promise, + + /** + * GET /api/v1/novels/{novelId}/chapters/{chapterNumber}/candidate-drafts + */ + listCandidateDrafts: (novelId: string, chapterNumber: number, branchName?: string) => + apiClient.get( + `/novels/${novelId}/chapters/${chapterNumber}/candidate-drafts`, + { + params: branchName ? { branch_name: branchName } : undefined, + } + ) as Promise, + + /** + * POST /api/v1/novels/{novelId}/chapters/{chapterNumber}/candidate-drafts + */ + createCandidateDraft: (novelId: string, chapterNumber: number, data: CreateChapterCandidateDraftRequest) => + apiClient.post(`/novels/${novelId}/chapters/${chapterNumber}/candidate-drafts`, data) as Promise, + + /** + * POST /api/v1/novels/{novelId}/chapters/{chapterNumber}/candidate-drafts/{draftId}/accept + */ + acceptCandidateDraft: (novelId: string, chapterNumber: number, draftId: string) => + apiClient.post(`/novels/${novelId}/chapters/${chapterNumber}/candidate-drafts/${draftId}/accept`, {}) as Promise, + + /** + * POST /api/v1/novels/{novelId}/chapters/{chapterNumber}/candidate-drafts/{draftId}/reject + */ + rejectCandidateDraft: (novelId: string, chapterNumber: number, draftId: string) => + apiClient.post(`/novels/${novelId}/chapters/${chapterNumber}/candidate-drafts/${draftId}/reject`, {}) as Promise, + + listCandidateBranches: (novelId: string, chapterNumber: number) => + apiClient.get(`/novels/${novelId}/chapters/${chapterNumber}/candidate-drafts/branches`) as Promise, + + compareCandidateDraft: (novelId: string, chapterNumber: number, draftId: string) => + apiClient.get(`/novels/${novelId}/chapters/${chapterNumber}/candidate-drafts/${draftId}/compare`) as Promise, + + mergeCandidateBranch: (novelId: string, chapterNumber: number, sourceBranch: string, targetBranch = 'main', rule = 'latest_candidate') => + apiClient.post( + `/novels/${novelId}/chapters/${chapterNumber}/candidate-drafts/merge-branch`, + { source_branch: sourceBranch, target_branch: targetBranch, rule } + ) as Promise, + + getBranchMemoryDiff: (novelId: string, chapterNumber: number, sourceBranch: string, targetBranch = 'main') => + apiClient.get( + `/novels/${novelId}/chapters/${chapterNumber}/candidate-drafts/branch-memory-diff`, + { params: { source_branch: sourceBranch, target_branch: targetBranch } } + ) as Promise, + + listExternalModelTasks: (novelId: string, chapterNumber?: number) => + apiClient.get( + `/novels/${novelId}/external-model-tasks`, + { params: chapterNumber ? { chapter_number: chapterNumber } : undefined } + ) as Promise, + + upsertExternalModelTask: (novelId: string, data: UpsertExternalModelTaskRequest) => + apiClient.post(`/novels/${novelId}/external-model-tasks`, data) as Promise, + + generateCandidateDraft: (novelId: string, data: GenerateCandidateDraftRequest) => + apiClient.post(`/novels/${novelId}/candidate-drafts/generate`, data) as Promise, + + generateEditorialPolishCandidate: (novelId: string, data: GenerateEditorialPolishCandidateRequest) => + apiClient.post(`/novels/${novelId}/candidate-drafts/editorial-polish`, data) as Promise, + + createWebWritingPrompt: (novelId: string, data: CreateWebWritingPromptRequest) => + apiClient.post(`/novels/${novelId}/candidate-drafts/web-writing-prompt`, data) as Promise, + + reviewCandidateDraft: ( + novelId: string, + chapterNumber: number, + draftId: string, + data: SupervisorReviewCandidateDraftRequest, + ) => + apiClient.post( + `/novels/${novelId}/chapters/${chapterNumber}/candidate-drafts/${draftId}/supervisor-review`, + data, + ) as Promise, } diff --git a/frontend/src/api/chronicles.ts b/frontend/src/api/chronicles.ts index 5d1dc0d65..04b1cbced 100644 --- a/frontend/src/api/chronicles.ts +++ b/frontend/src/api/chronicles.ts @@ -20,6 +20,8 @@ export interface ChronicleSnapshot { created_at: string | null description: string | null anchor_chapter: number | null + origin_type: string + candidate_source: string | null } export interface ChronicleRow { diff --git a/frontend/src/api/cocCanon.ts b/frontend/src/api/cocCanon.ts new file mode 100644 index 000000000..c38de5f85 --- /dev/null +++ b/frontend/src/api/cocCanon.ts @@ -0,0 +1,99 @@ +import { apiClient } from './config' + +export interface CocCanonEntry { + id: string + novel_id: string + canon_type: string + title: string + lock_level: 'soft' | 'strict' | 'absolute' | string + public_facts: string + hidden_truth: string + mutable_notes: string + status: 'active' | 'draft' | 'archived' | string + created_at: string + updated_at: string +} + +export interface CocCanonEvent { + id: string + novel_id: string + entry_id: string | null + title: string + chapter_number: number + event_type: string + evidence: string + notes: string + created_at: string +} + +export interface CocCanonOverview { + novel_id: string + entries: CocCanonEntry[] + recent_events: CocCanonEvent[] + cognition_layers: { + author_truth: string[] + reader_known: string[] + author_truth_snippets: string[] + } +} + +export interface CocPresetTemplate { + key: string + name: string + description: string + source_novel_id: string + canon_count: number + clue_count: number + prop_count: number +} + +export interface ApplyCocPresetRequest { + preset_key?: string + overwrite_existing?: boolean +} + +export interface ApplyCocPresetResponse { + preset_key: string + novel_id: string + created_canon: number + created_clues: number + created_props: number + skipped: number + overwrite_existing: boolean +} + +export interface UpsertCocCanonEntryRequest { + canon_type: string + title: string + lock_level?: string + public_facts?: string + hidden_truth?: string + mutable_notes?: string + status?: string +} + +export interface CreateCocCanonEventRequest { + title?: string + entry_id?: string + chapter_number: number + event_type?: string + evidence?: string + notes?: string +} + +export const cocCanonApi = { + getOverview: (novelId: string) => + apiClient.get(`/novels/${novelId}/coc-canon/overview`) as Promise, + + upsertEntry: (novelId: string, data: UpsertCocCanonEntryRequest) => + apiClient.post(`/novels/${novelId}/coc-canon/entries`, data) as Promise, + + createEvent: (novelId: string, data: CreateCocCanonEventRequest) => + apiClient.post(`/novels/${novelId}/coc-canon/events`, data) as Promise, + + listPresetTemplates: (novelId: string) => + apiClient.get(`/novels/${novelId}/coc-preset/templates`) as Promise, + + applyPreset: (novelId: string, data: ApplyCocPresetRequest = {}) => + apiClient.post(`/novels/${novelId}/coc-preset/apply`, data) as Promise, +} diff --git a/frontend/src/api/cocClue.ts b/frontend/src/api/cocClue.ts new file mode 100644 index 000000000..b95873b16 --- /dev/null +++ b/frontend/src/api/cocClue.ts @@ -0,0 +1,72 @@ +import { apiClient } from './config' + +export interface CocClueItem { + id: string + novel_id: string + clue_key: string + clue_text: string + visibility: string + reveal_chapter: number | null + known_by: string[] | string + confidence: number | null + lock_level: string + status: string + notes: string + created_at: string + updated_at: string +} + +export interface CocClueEvent { + id: string + novel_id: string + clue_id: string | null + clue_key: string + chapter_number: number + event_type: string + evidence: string + notes: string + created_at: string +} + +export interface CocClueOverview { + novel_id: string + items: CocClueItem[] + recent_events: CocClueEvent[] + cognition_layers: { + author_truth: string[] + character_known: string[] + reader_known: string[] + } +} + +export interface UpsertCocClueItemRequest { + clue_key: string + clue_text: string + visibility?: string + reveal_chapter?: number | null + known_by?: string + confidence?: number | null + lock_level?: string + status?: string + notes?: string +} + +export interface CreateCocClueEventRequest { + clue_id?: string + clue_key?: string + chapter_number: number + event_type?: string + evidence?: string + notes?: string +} + +export const cocClueApi = { + getOverview: (novelId: string) => + apiClient.get(`/novels/${novelId}/coc-clues/overview`) as Promise, + + upsertItem: (novelId: string, data: UpsertCocClueItemRequest) => + apiClient.post(`/novels/${novelId}/coc-clues/items`, data) as Promise, + + createEvent: (novelId: string, data: CreateCocClueEventRequest) => + apiClient.post(`/novels/${novelId}/coc-clues/events`, data) as Promise, +} diff --git a/frontend/src/api/config.ts b/frontend/src/api/config.ts index 02b65821d..19f9fb46a 100644 --- a/frontend/src/api/config.ts +++ b/frontend/src/api/config.ts @@ -105,7 +105,7 @@ async function initTauriConnection(): Promise { /** 桌面壳:后端在后台线程就绪,IPC 端口在健康检查通过前可能为 0 */ const TAURI_BACKEND_POLL_MS = 200 -const TAURI_BACKEND_WAIT_MS = 125_000 +const TAURI_BACKEND_WAIT_MS = 30_000 // 30秒,避免长时间卡住 async function waitForTauriBackendPort( invoke: (cmd: string) => Promise, @@ -136,19 +136,34 @@ export async function initApiClient(): Promise { if (first > 0) { port = first } else if (isTauri()) { + console.log('[API] 等待后端就绪...') port = await waitForTauriBackendPort( cmd => invoke(cmd), TAURI_BACKEND_WAIT_MS, TAURI_BACKEND_POLL_MS, ) } - } catch { - // 浏览器 / 无 IPC + } catch (e) { + console.warn('[API] Tauri IPC 调用失败:', e) } if (port != null && port > 0) { - axiosInstance.defaults.baseURL = `http://127.0.0.1:${port}/api/v1` - console.log(`[API] 桌面模式 baseURL: ${axiosInstance.defaults.baseURL}`) + const newBaseURL = `http://127.0.0.1:${port}/api/v1` + axiosInstance.defaults.baseURL = newBaseURL + console.log(`[API] 桌面模式 baseURL: ${newBaseURL}`) + + // 验证后端是否真的响应 + try { + const healthCheck = await fetch(`http://127.0.0.1:${port}/health`, { + method: 'GET', + signal: AbortSignal.timeout(5000) + }) + if (!healthCheck.ok) { + console.warn('[API] 后端健康检查失败,状态码:', healthCheck.status) + } + } catch (e) { + console.warn('[API] 后端健康检查异常:', e) + } } else if (isTauri()) { axiosInstance.defaults.baseURL = 'http://127.0.0.1:8005/api/v1' console.warn('[API] Tauri 下未能通过 IPC 取得端口,回退 8005') diff --git a/frontend/src/api/continuity.ts b/frontend/src/api/continuity.ts new file mode 100644 index 000000000..59a5bd7da --- /dev/null +++ b/frontend/src/api/continuity.ts @@ -0,0 +1,154 @@ +import { apiClient } from './config' + +export interface CharacterDropoutItem { + character_id: string + character_name: string + last_appearance_chapter: number + chapters_absent: number + appearance_count: number + severity: 'low' | 'medium' | 'high' | string + tracked_relationship_count: number + stale_relationship_count: number + stale_relationship_targets: string[] + dropout_scope: 'solo' | 'tracked' | 'linked' | string +} + +export interface RelationshipSpotlightItem { + source_character: string + target_character: string + relation: string + description: string +} + +export interface RelationshipSignalItem { + source_character: string + target_character: string + relation: string + description: string + last_joint_chapter: number + joint_appearance_count: number + change_signal: string + signal_excerpt: string + severity: 'info' | 'success' | 'warning' | 'error' | string + source: 'structured' | 'heuristic' | string +} + +export interface StaleRelationshipItem { + source_character: string + target_character: string + relation: string + description: string + last_joint_chapter: number + chapters_since_joint: number + severity: 'info' | 'success' | 'warning' | 'error' | string +} + +export interface RelationshipTrackingSummary { + source: 'structured' | 'heuristic' | string + tracked_pairs: number + active_signals: RelationshipSignalItem[] + stale_pairs: StaleRelationshipItem[] +} + +export interface TimelineEventItem { + id: string + chapter_number: number + event: string + timestamp: string + timestamp_type: string +} + +export interface TimelineSummary { + total_events: number + current_chapter_has_event: boolean + current_chapter_events: TimelineEventItem[] + recent_events: TimelineEventItem[] +} + +export interface VoiceDriftSummary { + drift_alert: boolean + latest_similarity_score: number | null + scored_chapters: number + alert_threshold: number + alert_consecutive: number +} + +export interface OutlineNodeStatusItem { + node_key: string + outline_text: string + status: 'pending' | 'completed' | 'matched' | 'changed' | 'missing' | 'blocked' | string + note: string + evidence: string +} + +export interface OutlineDeviationSummary { + source: 'structured' | 'heuristic' | string + status: 'aligned' | 'watch' | 'warning' | 'unavailable' | string + overlap_score: number | null + outline_excerpt: string + summary_excerpt: string + warning_reasons: string[] + outline_nodes: OutlineNodeStatusItem[] +} + +export interface ContinuityOverviewResponse { + novel_id: string + chapter_number: number + latest_chapter_number: number + character_dropouts: CharacterDropoutItem[] + relationship_spotlights: RelationshipSpotlightItem[] + relationship_tracking: RelationshipTrackingSummary + voice_drift: VoiceDriftSummary + timeline: TimelineSummary + outline_deviation: OutlineDeviationSummary +} + +export interface RelationshipEventRequest { + chapter_number: number + source_character: string + target_character?: string + relation?: string + event_type?: string + description?: string + evidence?: string + severity?: string +} + +export interface RelationshipEventResponse extends Required { + id: string + novel_id: string +} + +export interface OutlineNodeStatusRequest { + chapter_number: number + node_key: string + outline_text: string + status?: string + note?: string + evidence?: string +} + +export interface OutlineNodeStatusResponse extends Required { + id: string + novel_id: string +} + +export const continuityApi = { + getOverview: (novelId: string, chapterNumber?: number | null) => + apiClient.get( + `/novels/${novelId}/continuity/overview`, + { + params: chapterNumber ? { chapter_number: chapterNumber } : undefined, + }, + ) as Promise, + recordRelationshipEvent: (novelId: string, payload: RelationshipEventRequest) => + apiClient.post( + `/novels/${novelId}/continuity/relationship-events`, + payload, + ) as Promise, + upsertOutlineNodeStatus: (novelId: string, payload: OutlineNodeStatusRequest) => + apiClient.put( + `/novels/${novelId}/continuity/outline-nodes`, + payload, + ) as Promise, +} diff --git a/frontend/src/api/index.ts b/frontend/src/api/index.ts index 2db5aee5b..402592ca3 100644 --- a/frontend/src/api/index.ts +++ b/frontend/src/api/index.ts @@ -12,6 +12,7 @@ export type { export * from './bible' export * from './workflow' export * from './chronicles' +export * from './styleBible' // Legacy API exports export * from './book' diff --git a/frontend/src/api/llmControl.ts b/frontend/src/api/llmControl.ts index 7cc79ed17..70c749bf1 100644 --- a/frontend/src/api/llmControl.ts +++ b/frontend/src/api/llmControl.ts @@ -78,6 +78,7 @@ export interface FetchModelsPayload { protocol: string base_url: string api_key: string + model?: string timeout_ms?: number } diff --git a/frontend/src/api/novelproMonitor.ts b/frontend/src/api/novelproMonitor.ts new file mode 100644 index 000000000..8fac4edcb --- /dev/null +++ b/frontend/src/api/novelproMonitor.ts @@ -0,0 +1,86 @@ +import { apiClient } from './config' + +export interface MonitorHealth { + status: 'ok' | 'warning' | 'error' | string + score: number + error_count: number + warning_count: number + alert_count: number +} + +export interface ObsidianMemorySummary { + primary_memory: boolean + premise_locked: boolean + fact_count: number + chapter_count: number + relationship_graph_path: string + vault_path: string + vault_configured: boolean + obsidian_app_installed: boolean +} + +export interface KnowledgeGraphSummary { + fact_count: number + relationship_count: number + entity_count: number +} + +export interface ContinuityMonitorSummary { + dropout_count: number + stale_relationship_count: number + active_relationship_signal_count: number + voice_drift_alert: boolean + timeline_conflict_count: number + current_chapter_has_timeline_event: boolean + outline_status: string +} + +export interface PowerMonitorSummary { + profile_count: number + warning_count: number +} + +export interface NovelProMonitorAlert { + severity: 'info' | 'success' | 'warning' | 'error' | string + source: 'obsidian' | 'knowledge' | 'continuity' | 'power' | string + title: string + message: string + action: string +} + +export interface NovelProMonitorOverview { + novel_id: string + chapter_number: number + health: MonitorHealth + obsidian: ObsidianMemorySummary + knowledge_graph: KnowledgeGraphSummary + continuity: ContinuityMonitorSummary + power: PowerMonitorSummary + alerts: NovelProMonitorAlert[] +} + +export interface ObsidianSyncResponse { + synced: boolean + reason: string + vault_path: string + chapter_note: string + fact_count: number +} + +export const novelproMonitorApi = { + getOverview: (novelId: string, chapterNumber?: number | null) => + apiClient.get( + `/novels/${novelId}/novelpro/monitor`, + { + params: chapterNumber ? { chapter_number: chapterNumber } : undefined, + }, + ) as Promise, + syncObsidianChapter: (novelId: string, chapterNumber: number) => + apiClient.post( + `/novels/${novelId}/novelpro/obsidian/sync`, + null, + { + params: { chapter_number: chapterNumber }, + }, + ) as Promise, +} diff --git a/frontend/src/api/novelproSuggestions.ts b/frontend/src/api/novelproSuggestions.ts new file mode 100644 index 000000000..7c7bb8e12 --- /dev/null +++ b/frontend/src/api/novelproSuggestions.ts @@ -0,0 +1,24 @@ +import { apiClient } from './config' + +export interface NovelProSuggestionRequest { + suggestion_type: string + fields: string[] + chapter_number?: number | null + target?: Record + current_values?: Record + instruction?: string +} + +export interface NovelProSuggestionResponse { + suggestion_type: string + fields: Record + rationale: string +} + +export const novelproSuggestionsApi = { + suggestFields: (novelId: string, data: NovelProSuggestionRequest) => + apiClient.post( + `/novels/${novelId}/novelpro/suggestions`, + data, + ) as Promise, +} diff --git a/frontend/src/api/powerSystem.ts b/frontend/src/api/powerSystem.ts new file mode 100644 index 000000000..867342169 --- /dev/null +++ b/frontend/src/api/powerSystem.ts @@ -0,0 +1,99 @@ +import { apiClient } from './config' + +export interface PowerSystemRules { + id: string + novel_id: string + genre_type: string + tier_schema: string + core_rules: string + taboo_rules: string + escalation_rules: string + created_at: string + updated_at: string +} + +export interface PowerCharacterProfile { + id: string + novel_id: string + character_name: string + tier: string + rank_score: number + abilities: string + limitations: string + growth_stage: string + last_verified_chapter: number | null + notes: string + created_at: string + updated_at: string +} + +export interface PowerProgressionEvent { + id: string + novel_id: string + chapter_number: number + character_name: string + event_type: string + opponent: string + outcome: string + power_delta: number + evidence: string + created_at: string +} + +export interface PowerWarning { + severity: 'info' | 'warning' | 'error' | string + title: string + message: string +} + +export interface PowerSystemOverview { + novel_id: string + standard: string + rules: PowerSystemRules + profiles: PowerCharacterProfile[] + recent_events: PowerProgressionEvent[] + warnings: PowerWarning[] +} + +export interface UpsertPowerRulesRequest { + genre_type?: string + tier_schema?: string + core_rules?: string + taboo_rules?: string + escalation_rules?: string +} + +export interface UpsertPowerProfileRequest { + character_name: string + tier?: string + rank_score?: number + abilities?: string + limitations?: string + growth_stage?: string + last_verified_chapter?: number | null + notes?: string +} + +export interface CreatePowerEventRequest { + chapter_number: number + character_name: string + event_type?: string + opponent?: string + outcome?: string + power_delta?: number + evidence?: string +} + +export const powerSystemApi = { + getOverview: (novelId: string) => + apiClient.get(`/novels/${novelId}/power-system/overview`) as Promise, + + saveRules: (novelId: string, data: UpsertPowerRulesRequest) => + apiClient.put(`/novels/${novelId}/power-system/rules`, data) as Promise, + + saveProfile: (novelId: string, data: UpsertPowerProfileRequest) => + apiClient.post(`/novels/${novelId}/power-system/profiles`, data) as Promise, + + createEvent: (novelId: string, data: CreatePowerEventRequest) => + apiClient.post(`/novels/${novelId}/power-system/events`, data) as Promise, +} diff --git a/frontend/src/api/propLedger.ts b/frontend/src/api/propLedger.ts new file mode 100644 index 000000000..02510ce6c --- /dev/null +++ b/frontend/src/api/propLedger.ts @@ -0,0 +1,104 @@ +import { apiClient } from './config' + +export interface PropLedgerItem { + id: string + novel_id: string + name: string + category: string + status: string + current_holder: string + current_location: string + first_seen_chapter: number | null + last_seen_chapter: number | null + importance: string + description: string + notes: string + created_at: string + updated_at: string +} + +export interface PropLedgerEvent { + id: string + novel_id: string + prop_id: string + prop_name: string + chapter_number: number + event_type: string + holder: string + location: string + status: string + evidence: string + notes: string + created_at: string +} + +export interface PropLedgerWarning { + severity: 'info' | 'warning' | 'error' | string + title: string + message: string +} + +export interface PropLedgerOverview { + novel_id: string + items: PropLedgerItem[] + recent_events: PropLedgerEvent[] + warnings: PropLedgerWarning[] +} + +export interface PropLedgerEventSuggestion { + prop_name: string + chapter_number: number + event_type: string + holder: string + location: string + status: string + evidence: string + reason: string + confidence: number + is_new_prop: boolean + category: string + importance: string +} + +export interface UpsertPropItemRequest { + name: string + category?: string + status?: string + current_holder?: string + current_location?: string + first_seen_chapter?: number | null + last_seen_chapter?: number | null + importance?: string + description?: string + notes?: string +} + +export interface CreatePropEventRequest { + prop_name: string + chapter_number: number + event_type?: string + holder?: string + location?: string + status?: string + evidence?: string + notes?: string +} + +export interface SuggestPropEventsRequest { + chapter_number: number + content: string +} + +export const propLedgerApi = { + getOverview: (novelId: string) => + apiClient.get(`/novels/${novelId}/prop-ledger/overview`) as Promise, + + saveItem: (novelId: string, data: UpsertPropItemRequest) => + apiClient.post(`/novels/${novelId}/prop-ledger/items`, data) as Promise, + + createEvent: (novelId: string, data: CreatePropEventRequest) => + apiClient.post(`/novels/${novelId}/prop-ledger/events`, data) as Promise, + + suggestEvents: (novelId: string, data: SuggestPropEventsRequest) => + apiClient.post(`/novels/${novelId}/prop-ledger/events/suggestions`, data) as Promise, +} diff --git a/frontend/src/api/styleBible.ts b/frontend/src/api/styleBible.ts new file mode 100644 index 000000000..1252f5d37 --- /dev/null +++ b/frontend/src/api/styleBible.ts @@ -0,0 +1,150 @@ +import { apiClient } from './config' + +export interface StyleSampleDTO { + id: string + title: string + content: string + source_type: string + genre: string + scene_type: string + pov: string + allowed_for_generation: boolean + novel_id: string + profile_id: string + content_hash: string + char_count: number +} + +export interface StyleChunkDTO { + id: string + sample_id: string + chunk_type: 'chapter' | 'scene' | 'paragraph' | string + sequence: number + chapter_number: number + title: string + content: string + char_count: number + metrics: Record +} + +export interface StyleTechniqueCardDTO { + id: string + profile_id: string + title: string + category: string + scene_type: string + rule_text: string + example_summary: string + prompt_instruction: string + enabled: boolean + weight: number +} + +export interface StyleProfileDTO { + id: string + name: string + description: string + status: string + novel_id: string + profile: Record + metrics: Record + rules: any[] + forbidden_patterns: string[] + version: number +} + +export interface StyleProfileDetail { + profile: StyleProfileDTO + cards: StyleTechniqueCardDTO[] +} + +export interface StyleSampleImportResultDTO { + sample: StyleSampleDTO + chunks: StyleChunkDTO[] + profile?: StyleProfileDTO | null + cards: StyleTechniqueCardDTO[] +} + +export interface StyleProfileGenerateResultDTO { + profile: StyleProfileDTO + cards: StyleTechniqueCardDTO[] +} + +export interface StyleProfileMatchReportDTO { + profile_id: string + score: number + metrics: Record + issues: string[] +} + +export interface StylePromptOverlayDTO { + prompt: string + profile_id: string + profile_name: string + card_ids: string[] +} + +export interface ImportStyleSamplePayload { + title: string + content: string + source_type?: string + genre?: string + scene_type?: string + pov?: string + allowed_for_generation?: boolean + novel_id?: string + profile_id?: string + create_profile?: boolean + profile_name?: string +} + +export interface GenerateStyleProfilePayload { + novel_id?: string + name: string + description?: string + sample_ids?: string[] + use_llm?: boolean + llm_profile_id?: string +} + +export interface UpdateTechniqueCardPayload { + title?: string + category?: string + scene_type?: string + rule_text?: string + example_summary?: string + prompt_instruction?: string + enabled?: boolean + weight?: number +} + +export interface MatchStyleProfilePayload { + novel_id?: string + content: string +} + +export const styleBibleApi = { + importSample: (payload: ImportStyleSamplePayload) => + apiClient.post('/style-bible/samples', payload) as Promise, + + listSamples: (params?: { novel_id?: string; profile_id?: string }) => + apiClient.get('/style-bible/samples', { params }) as Promise, + + generateProfile: (payload: GenerateStyleProfilePayload) => + apiClient.post('/style-bible/profiles', payload) as Promise, + + listProfiles: (params?: { novel_id?: string; status?: string }) => + apiClient.get('/style-bible/profiles', { params }) as Promise, + + getProfile: (profileId: string) => + apiClient.get(`/style-bible/profiles/${profileId}`) as Promise, + + updateCard: (cardId: string, payload: UpdateTechniqueCardPayload) => + apiClient.patch(`/style-bible/cards/${cardId}`, payload) as Promise, + + matchProfile: (profileId: string, payload: MatchStyleProfilePayload) => + apiClient.post(`/style-bible/profiles/${profileId}/match`, payload) as Promise, + + previewOverlay: (payload: { novel_id?: string; style_profile_id: string; scene_type?: string; max_cards?: number }) => + apiClient.post('/style-bible/overlay/preview', payload) as Promise, +} diff --git a/frontend/src/api/topic.ts b/frontend/src/api/topic.ts new file mode 100644 index 000000000..44378d13f --- /dev/null +++ b/frontend/src/api/topic.ts @@ -0,0 +1,215 @@ +import { apiClient } from './config' +import type { NovelDTO } from './novel' + +export type TopicIdeaStatus = 'draft' | 'adopted' | 'archived' +export type TopicLengthTier = 'short' | 'standard' | 'epic' + +export interface TopicIdea { + id: string + title: string + genre: string + world_preset: string + length_tier: TopicLengthTier | string + logline: string + premise: string + protagonist_hook: string + core_conflict: string + opening_hook: string + selling_points: string[] + long_term_potential: string + risk_notes: string[] + market_tags: string[] + score: number + status: TopicIdeaStatus + adopted_novel_id?: string | null + source_brief: Record + development_notes: Record + evaluation: Record + created_at: string + updated_at: string +} + +export interface TopicCompareRanking { + topic_id: string + title: string + score: number + reason: string + risks: string[] +} + +export interface TopicCompareResult { + recommended_topic_id: string + summary: string + rankings: TopicCompareRanking[] +} + +export interface TopicMarketSignal { + id: string + source: string + title: string + genre: string + tags: string[] + summary: string + raw_text: string + created_at: string +} + +export type TopicMarketSignalSourceType = 'public_page' | 'api' | 'authenticated_source' + +export interface TopicMarketSignalSource { + key: string + name: string + url: string + category: string + source_type?: TopicMarketSignalSourceType + requires_auth?: boolean + rank_urls?: Record +} + +export interface TopicMarketSignalSummary { + total: number + source_counts: Record + genre_counts: Record + tag_counts: Record + category_counts: Record + window_days: number + weighted_source_scores: Record + weighted_genre_scores: Record + weighted_tag_scores: Record + comic_opportunities: string[] + daily_counts: Array<{ date: string; count: number }> + recent_samples: TopicMarketSignal[] +} + +export interface TopicMarketSignalAutomationSettings { + enabled: boolean + interval_minutes: number + limit_per_source: number + lookback_days: number + source_weights: Record + selected_source_keys: string[] + last_run_at: string + last_status: string + last_error: string + updated_at: string +} + +export interface TopicMarketSignalSourceCredentialStatus { + source_key: string + api_key_configured: boolean + cookie_configured: boolean + endpoint_configured: boolean + header_keys: string[] + updated_at: string +} + +export interface TopicMarketSignalSourceConnection { + source_key: string + source_name: string + ok: boolean + count: number + message: string + sample_titles: string[] +} + +export interface TopicMarketSignalSourceHealth { + source_key: string + source_name: string + status: 'success' | 'error' | 'unknown' | string + last_run_at: string + last_success_at: string + last_count: number + last_error: string + next_run_at: string +} + +export interface TopicGeneratePayload { + brief?: string + genre?: string + world_preset?: string + keywords?: string[] + desired_selling_points?: string[] + avoid_patterns?: string[] + market_signals?: Array> + length_tier?: TopicLengthTier + count?: number +} + +export interface TopicUpdatePayload { + status?: TopicIdeaStatus + title?: string + genre?: string + world_preset?: string + length_tier?: TopicLengthTier | string + logline?: string + premise?: string + protagonist_hook?: string + core_conflict?: string + opening_hook?: string + selling_points?: string[] + long_term_potential?: string + risk_notes?: string[] + market_tags?: string[] + score?: number + development_notes?: Record + evaluation?: Record +} + +export const topicApi = { + generate: (data: TopicGeneratePayload) => + apiClient.post('/topics/generate', data, { timeout: 300000 }) as Promise, + + list: (status?: TopicIdeaStatus) => + apiClient.get('/topics', { params: status ? { status } : undefined }) as Promise, + + updateStatus: (topicId: string, status: TopicIdeaStatus) => + apiClient.patch(`/topics/${topicId}`, { status }) as Promise, + + update: (topicId: string, data: TopicUpdatePayload) => + apiClient.patch(`/topics/${topicId}`, data) as Promise, + + deepen: (topicId: string) => + apiClient.post(`/topics/${topicId}/deepen`, undefined, { timeout: 300000 }) as Promise, + + evaluate: (topicId: string) => + apiClient.post(`/topics/${topicId}/evaluate`, undefined, { timeout: 300000 }) as Promise, + + compare: (topicIds: string[]) => + apiClient.post('/topics/compare', { topic_ids: topicIds }, { timeout: 300000 }) as Promise, + + importSignals: (data: { raw_text: string; source?: string }) => + apiClient.post('/topics/signals/import', data) as Promise, + + collectSignals: (data: { source_keys: string[]; limit_per_source?: number }) => + apiClient.post('/topics/signals/collect', data) as Promise, + + testSignalSources: (data: { source_keys: string[]; limit_per_source?: number }) => + apiClient.post('/topics/signals/sources/test', data) as Promise, + + listSignalSourceHealth: () => + apiClient.get('/topics/signals/source-health') as Promise, + + listSignalSources: () => + apiClient.get('/topics/signals/sources') as Promise, + + listSignals: (limit = 20) => + apiClient.get('/topics/signals', { params: { limit } }) as Promise, + + signalSummary: (limit = 100) => + apiClient.get('/topics/signals/summary', { params: { limit } }) as Promise, + + getAutomationSettings: () => + apiClient.get('/topics/signals/automation') as Promise, + + updateAutomationSettings: (data: Partial) => + apiClient.patch('/topics/signals/automation', data) as Promise, + + listSourceCredentials: () => + apiClient.get('/topics/signals/source-credentials') as Promise, + + updateSourceCredentials: (sourceKey: string, data: { api_key?: string; cookie?: string; endpoint_url?: string; headers?: Record }) => + apiClient.patch(`/topics/signals/sources/${sourceKey}/credentials`, data) as Promise, + + adopt: (topicId: string) => + apiClient.post(`/topics/${topicId}/adopt`) as Promise, +} diff --git a/frontend/src/api/workflow.ts b/frontend/src/api/workflow.ts index e95d9bd34..2de207b5f 100644 --- a/frontend/src/api/workflow.ts +++ b/frontend/src/api/workflow.ts @@ -69,6 +69,96 @@ export interface GenerateChapterWithContextPayload { chapter_number: number outline: string scene_director_result?: Record + style_profile_id?: string + scene_type?: string + avoid_compressed_expression?: boolean + target_word_count?: number + word_tolerance_percent?: number + direct_writing_mode?: boolean + direct_light_polish?: boolean + chapter_strategy?: ChapterStrategyPreviewDTO + long_draft_mode?: boolean + long_draft_split_count?: number +} + +export interface ChapterStrategyDramaticTaskDTO { + goal: string + obstacle: string + reader_expectation: string + ending_hook: string +} + +export interface ChapterContractDTO { + chapter_question: string + protagonist_want: string + opposition: string + reader_expectation: string + required_information_change: string + required_relationship_change: string + ending_question: string + show_dont_tell_rules: string[] +} + +export interface ChapterStrategySceneDTO { + label: string + task: string + resistance: string + info_shift: string + relationship_shift: string + anchor: string + visible_action?: string + subtext_dialogue?: string + unspoken_emotion?: string + object_or_clue_change?: string + hook: string + target_words: number +} + +export interface ChapterStrategyPreviewDTO { + chapter_contract: ChapterContractDTO + dramatic_task: ChapterStrategyDramaticTaskDTO + scene_plan: ChapterStrategySceneDTO[] + writing_focus: string[] +} + +export interface ChapterEditorialReviewScoresDTO { + opening: number + conflict: number + character: number + dialogue: number + hook: number + pacing: number + showing: number +} + +export interface ChapterEditorialReviewDTO { + summary: string + scores: ChapterEditorialReviewScoresDTO + strengths: string[] + problems: string[] + actions: string[] + verdict: string +} + +export interface CocCognitionPrecheckDTO { + checked: boolean + allow_generate: boolean + risk_level: 'none' | 'warning' | 'block' | string + blocking_issues: string[] + warnings: string[] + matched_tokens: string[] + chapter_number: number +} + +export interface CocCognitionRewriteResultDTO { + original_outline: string + rewritten_outline: string + changed: boolean + rewrite_mode: 'conservative' | 'aggressive' | string + rewrite_style: 'generic' | 'suspense' | 'coc' | string + applied_rules: string[] + precheck_before: CocCognitionPrecheckDTO + precheck_after: CocCognitionPrecheckDTO } export interface SceneDirectorAnalysis { @@ -96,6 +186,63 @@ export async function analyzeScene( ) as unknown as Promise } +export async function previewChapterStrategy( + novelId: string, + chapterNumber: number, + data: { + outline: string + scene_director_result?: Record + style_profile_id?: string + scene_type?: string + target_word_count?: number + word_tolerance_percent?: number + } +): Promise { + return apiClient.post( + `/novels/${novelId}/chapters/${chapterNumber}/strategy-preview`, + data + ) as unknown as Promise +} + +export async function reviewGeneratedChapterEditorially( + novelId: string, + chapterNumber: number, + data: { + outline: string + content: string + chapter_strategy?: ChapterStrategyPreviewDTO | null + } +): Promise { + return apiClient.post( + `/novels/${novelId}/chapters/${chapterNumber}/editorial-review`, + data + ) as unknown as Promise +} + +export async function precheckCocCognitionBoundary( + novelId: string, + chapterNumber: number, + outline: string, +): Promise { + return apiClient.post( + `/novels/${novelId}/chapters/${chapterNumber}/coc-cognition-precheck`, + { outline }, + ) as unknown as Promise +} + +export async function rewriteOutlineByCocBoundary( + novelId: string, + chapterNumber: number, + outline: string, + rewriteMode: 'conservative' | 'aggressive' = 'conservative', + rewriteStyle: 'generic' | 'suspense' | 'coc' = 'generic', +): Promise { + return apiClient.post( + `/novels/${novelId}/chapters/${chapterNumber}/coc-cognition-rewrite-outline`, + { outline, rewrite_mode: rewriteMode, rewrite_style: rewriteStyle }, + ) as unknown as Promise +} + /** 与 `interfaces/api/v1/generation.py` GenerateChapterResponse 对齐 */ export interface ConsistencyIssueDTO { type: string @@ -124,6 +271,10 @@ export interface GenerateChapterWorkflowResponse { token_count: number style_warnings?: StyleWarning[] ghost_annotations?: unknown[] + direct_writing_mode?: boolean + direct_light_polish?: boolean + long_draft_mode?: boolean + long_draft_split_count?: number | null } export interface ChunkStats { @@ -133,9 +284,10 @@ export interface ChunkStats { } export type GenerateChapterStreamEvent = - | { type: 'phase'; phase: 'planning' | 'context' | 'llm' | 'post' } + | { type: 'phase'; phase: 'planning' | 'context' | 'llm' | 'polish' | 'post' } + | { type: 'long_draft_plan'; enabled: boolean; split_count: number; target_word_count: number } | { type: 'chunk'; text: string; stats: ChunkStats } - | { type: 'done'; content: string; consistency_report: ConsistencyReportDTO; token_count: number; output_tokens: number; total_tokens: number; chars: number; style_warnings?: StyleWarning[]; ghost_annotations?: unknown[] } + | { type: 'done'; content: string; consistency_report: ConsistencyReportDTO; token_count: number; output_tokens: number; total_tokens: number; chars: number; style_warnings?: StyleWarning[]; ghost_annotations?: unknown[]; long_draft_mode?: boolean; long_draft_split_count?: number | null } | { type: 'error'; message: string } function parseSseDataLine(line: string): unknown | null { @@ -196,6 +348,14 @@ export async function consumeGenerateChapterStream( const ev: GenerateChapterStreamEvent = { type: 'phase', phase: ph as 'planning' | 'context' | 'llm' | 'post' } handlers.onEvent?.(ev) handlers.onPhase?.(ph) + } else if (typ === 'long_draft_plan') { + const ev: GenerateChapterStreamEvent = { + type: 'long_draft_plan', + enabled: Boolean(o.enabled ?? false), + split_count: Number(o.split_count ?? 2), + target_word_count: Number(o.target_word_count ?? 0), + } + handlers.onEvent?.(ev) } else if (typ === 'chunk') { const text = String(o.text ?? '') const stats = o.stats as ChunkStats | undefined @@ -212,6 +372,10 @@ export async function consumeGenerateChapterStream( content: String(o.content ?? ''), consistency_report, token_count: Number(o.token_count ?? 0), + direct_writing_mode: Boolean(o.direct_writing_mode ?? false), + direct_light_polish: Boolean(o.direct_light_polish ?? false), + long_draft_mode: Boolean(o.long_draft_mode ?? false), + long_draft_split_count: o.long_draft_split_count == null ? null : Number(o.long_draft_split_count), } if (Array.isArray(o.style_warnings)) { result.style_warnings = o.style_warnings as StyleWarning[] diff --git a/frontend/src/components/autopilot/AutopilotPanel.vue b/frontend/src/components/autopilot/AutopilotPanel.vue index 063bfadc3..6d675e2d0 100644 --- a/frontend/src/components/autopilot/AutopilotPanel.vue +++ b/frontend/src/components/autopilot/AutopilotPanel.vue @@ -248,12 +248,20 @@ const dotClass = computed(() => ({ })) const stageLabel = computed(() => { + const stage = status.value?.current_stage + if (stage === 'auditing') { + const progress = status.value?.audit_progress + if (progress === 'voice_check') return '审计中(文风检查)' + if (progress === 'aftermath_pipeline') return '审计中(章后管线)' + if (progress === 'tension_scoring') return '审计中(张力打分)' + return '审计中' + } const m = { macro_planning: '宏观规划', act_planning: '幕级规划', writing: '撰写中', auditing: '审计中', paused_for_review: '待审阅', completed: '已完成', } - return m[status.value?.current_stage] || '待机' + return m[stage] || '待机' }) const stageTagClass = computed(() => ({ @@ -291,16 +299,23 @@ function formatWords(n) { const autopilotApiRoot = () => `/api/v1/autopilot/${props.novelId}` async function fetchStatus() { - const res = await fetch(resolveHttpUrl(`${autopilotApiRoot()}/status`)) - if (res.status === 404) { - clearStatusPoll() - status.value = null - statusPollDisabled.value = true - return - } - if (res.ok) { - status.value = await res.json() - emit('status-change', status.value) + try { + const res = await fetch(resolveHttpUrl(`${autopilotApiRoot()}/status`)) + if (res.status === 404) { + clearStatusPoll() + status.value = null + statusPollDisabled.value = true + return + } + if (res.ok) { + status.value = await res.json() + emit('status-change', status.value) + } else { + console.warn('[AutopilotPanel] fetchStatus failed:', res.status, res.statusText) + } + } catch (err) { + console.error('[AutopilotPanel] fetchStatus error:', err) + // 不清除 status,保留上一次的有效数据 } } @@ -312,14 +327,13 @@ function clearStatusPoll() { } watch( - () => [isRunning.value, needsReview.value], - ([running, review]) => { + () => [isRunning.value, needsReview.value, statusPollDisabled.value], + ([running, review, disabled]) => { clearStatusPoll() - if (statusPollDisabled.value) return - if (running || review) { - statusPollTimer = setInterval(() => fetchStatus(), 3000) - void fetchStatus() - } + if (disabled) return + // 始终开始轮询,即使初始状态未知,以便从临时故障中恢复 + statusPollTimer = setInterval(() => fetchStatus(), 3000) + void fetchStatus() }, { immediate: true } ) @@ -412,8 +426,16 @@ async function start() { if (res.ok) { const modeText = startConfig.value.auto_approve_mode ? '(全自动模式)' : '' message.success(`自动驾驶已启动${modeText}`) + } else { + let detail = '启动失败' + try { + const err = await res.json() + detail = err?.detail || err?.message || detail + } catch { + // ignore parse errors and keep fallback message + } + message.error(detail) } - else message.error('启动失败') await fetchStatus() } finally { toggling.value = false @@ -461,6 +483,7 @@ async function clearCircuitBreaker() { // 章节内容流订阅(用于推送内容到编辑框) let chapterStreamCtrl = null +let chapterStreamReconnectTimer = null // 写作内容状态(传递给 AutopilotWritingStream) const writingContent = ref('') @@ -471,6 +494,10 @@ function startChapterStream() { if (chapterStreamCtrl) { chapterStreamCtrl.abort() } + if (chapterStreamReconnectTimer) { + clearTimeout(chapterStreamReconnectTimer) + chapterStreamReconnectTimer = null + } writingContent.value = '' writingChapterNumber.value = 0 writingBeatIndex.value = 0 @@ -497,17 +524,39 @@ function startChapterStream() { }, onConnected: () => { // SSE连接成功 + console.log('[AutopilotPanel] SSE 流已连接') }, onDisconnected: () => { - // SSE连接断开 + // SSE连接断开,如果仍在运行状态则尝试重连 + if (isRunning.value) { + console.log('[AutopilotPanel] SSE 断开,3秒后重连...') + chapterStreamReconnectTimer = setTimeout(() => { + if (isRunning.value) { + startChapterStream() + } + }, 3000) + } }, onError: (err) => { - console.error('Chapter stream error:', err) + console.error('[AutopilotPanel] Chapter stream error:', err) + // 错误时也尝试重连 + if (isRunning.value) { + console.log('[AutopilotPanel] SSE 错误,5秒后重连...') + chapterStreamReconnectTimer = setTimeout(() => { + if (isRunning.value) { + startChapterStream() + } + }, 5000) + } } }) } function stopChapterStream() { + if (chapterStreamReconnectTimer) { + clearTimeout(chapterStreamReconnectTimer) + chapterStreamReconnectTimer = null + } if (chapterStreamCtrl) { chapterStreamCtrl.abort() chapterStreamCtrl = null diff --git a/frontend/src/components/global/GlobalLLMFloatingButton.vue b/frontend/src/components/global/GlobalLLMFloatingButton.vue index a5bdeaac9..300b5f8e0 100644 --- a/frontend/src/components/global/GlobalLLMFloatingButton.vue +++ b/frontend/src/components/global/GlobalLLMFloatingButton.vue @@ -54,7 +54,7 @@ AI 控制台 - LLM Gateway · OpenAI / Claude / Gemini + LLM Gateway · Kimi / DS / OpenAI-compatible diff --git a/frontend/src/components/knowledge/KnowledgePanel.vue b/frontend/src/components/knowledge/KnowledgePanel.vue index 5a0d090b7..9190fc76a 100644 --- a/frontend/src/components/knowledge/KnowledgePanel.vue +++ b/frontend/src/components/knowledge/KnowledgePanel.vue @@ -7,6 +7,9 @@ 可在「检索与编辑」「叙事知识」「关系图」间切换:检索与编辑含全书知识检索、三元组图谱与表格编辑;叙事含分章叙事与实体状态;梗概锁定已迁至右侧「剧本基建 → 作品设定」关系图从知识库三元组自动生成(人物网 / 地点图全页与工作台均可打开「三元组表格」编辑)。书目级梗概以 manifest 为准。

+

+ Obsidian 长期记忆会在章节保存和候选稿采纳后的章后管线中自动同步;PP 知识库仍是权威源,Obsidian 只作为可阅读、可链接的长期记忆镜像。 +

+ + + 检测到之前的进度,已自动跳至第 {{ resumedFromStep }} 步。您可以继续完成剩余设置。 +
@@ -471,6 +475,7 @@ const modalOpen = computed({ const currentStep = ref(1) const stepStatus = ref<'process' | 'finish' | 'error' | 'wait'>('process') +const resumedFromStep = ref(0) // 0 表示新会话,>0 表示从该步续传 // 第1步:生成世界观和文风 const generatingBible = ref(false) @@ -772,6 +777,74 @@ function resetWizardStateForOpen() { plotSuggestError.value = '' charactersError.value = '' locationsError.value = '' + resumedFromStep.value = 0 +} + +/** 检查已存在数据,确定向导应从哪一步继续 */ +async function detectWizardProgress(): Promise { + try { + // 检查 Bible 数据 + const bible = await bibleApi.getBible(props.novelId, { timeout: 30_000 }) + bibleData.value = bible + + // 解析世界观 + let fromApi = emptyWorldbuildingShape() + try { + const w = await worldbuildingApi.getWorldbuilding(props.novelId) + fromApi = normalizeWorldbuildingFromApi(w as unknown as Record) + } catch { + /* 404 忽略 */ + } + const fromWs = worldbuildingFromWorldSettings(bible.world_settings) + worldbuildingData.value = mergeWorldbuildingDisplay(fromApi, fromWs) + + const hasWorldbuilding = bible.world_settings?.length > 0 || Object.values(worldbuildingData.value).some(dim => Object.keys(dim).length > 0) + const hasStyle = styleConventionFromBible(bible).length > 0 + const hasCharacters = (bible.characters?.length ?? 0) > 0 + const hasLocations = (bible.locations?.length ?? 0) > 0 + + // 检查主线是否存在 + let hasMainPlot = false + try { + const storylines = await workflowApi.getStorylines(props.novelId) + hasMainPlot = storylines.some(s => s.storyline_type === 'main_plot') + if (hasMainPlot) { + mainPlotCommitted.value = true + } + } catch { + /* 忽略 */ + } + + // 确定当前步骤 + if (!hasWorldbuilding && !hasStyle) { + resumedFromStep.value = 0 // 新会话 + return 1 // 世界观未生成 + } + bibleGenerated.value = true + + if (!hasCharacters) { + resumedFromStep.value = 2 // 从人物步骤续传 + return 2 // 人物未生成 + } + charactersGenerated.value = true + + if (!hasLocations) { + resumedFromStep.value = 3 // 从地点步骤续传 + return 3 // 地点未生成 + } + locationsGenerated.value = true + + if (!hasMainPlot) { + resumedFromStep.value = 4 // 从主线步骤续传 + return 4 // 主线未设定 + } + + resumedFromStep.value = 5 // 全部完成 + return 5 + } catch (err) { + console.warn('[NovelSetupGuide] detectWizardProgress failed:', err) + return 1 // 出错时从头开始 + } } function stopGenerationOnClose() { @@ -784,20 +857,30 @@ function stopGenerationOnClose() { watch( () => props.show, - (val) => { + async (val) => { if (val) { resetWizardStateForOpen() - void startBibleGeneration() + // 检查已有进度,确定从哪一步继续 + const step = await detectWizardProgress() + currentStep.value = step + // 只有在第 1 步且世界观未生成时才启动生成 + if (step === 1 && !bibleGenerated.value) { + void startBibleGeneration() + } } else { stopGenerationOnClose() } } ) -onMounted(() => { +onMounted(async () => { if (props.show) { resetWizardStateForOpen() - void startBibleGeneration() + const step = await detectWizardProgress() + currentStep.value = step + if (step === 1 && !bibleGenerated.value) { + void startBibleGeneration() + } } }) @@ -806,7 +889,8 @@ onUnmounted(() => { }) watch(currentStep, (step) => { - if (step === 4 && props.show && plotOptions.value.length === 0 && !plotSuggesting.value) { + // 第 4 步:主线未提交且无候选时才加载 + if (step === 4 && props.show && !mainPlotCommitted.value && plotOptions.value.length === 0 && !plotSuggesting.value) { void loadPlotSuggestions() } }) @@ -816,6 +900,10 @@ const handleNext = async () => { step2PollEpoch.value += 1 const epoch2 = step2PollEpoch.value currentStep.value = 2 + // 如果人物已存在,跳过生成 + if (charactersGenerated.value) { + return + } generatingCharacters.value = true charactersGenerated.value = false charactersError.value = '' @@ -854,6 +942,10 @@ const handleNext = async () => { step3PollEpoch.value += 1 const epoch3 = step3PollEpoch.value currentStep.value = 3 + // 如果地点已存在,跳过生成 + if (locationsGenerated.value) { + return + } generatingLocations.value = true locationsGenerated.value = false locationsError.value = '' diff --git a/frontend/src/components/topic/TopicIdeaPanel.vue b/frontend/src/components/topic/TopicIdeaPanel.vue new file mode 100644 index 000000000..05d57ad1d --- /dev/null +++ b/frontend/src/components/topic/TopicIdeaPanel.vue @@ -0,0 +1,1941 @@ + + + + + diff --git a/frontend/src/components/workbench/CandidateDraftBranchSwitcher.vue b/frontend/src/components/workbench/CandidateDraftBranchSwitcher.vue new file mode 100644 index 000000000..4f7627844 --- /dev/null +++ b/frontend/src/components/workbench/CandidateDraftBranchSwitcher.vue @@ -0,0 +1,61 @@ + + + diff --git a/frontend/src/components/workbench/CandidateRefinePanel.vue b/frontend/src/components/workbench/CandidateRefinePanel.vue new file mode 100644 index 000000000..3f1e8d042 --- /dev/null +++ b/frontend/src/components/workbench/CandidateRefinePanel.vue @@ -0,0 +1,241 @@ + + + + + diff --git a/frontend/src/components/workbench/CocCanonPanel.vue b/frontend/src/components/workbench/CocCanonPanel.vue new file mode 100644 index 000000000..4db22e219 --- /dev/null +++ b/frontend/src/components/workbench/CocCanonPanel.vue @@ -0,0 +1,556 @@ + + + + + diff --git a/frontend/src/components/workbench/CocCluePanel.vue b/frontend/src/components/workbench/CocCluePanel.vue new file mode 100644 index 000000000..ca10db1b3 --- /dev/null +++ b/frontend/src/components/workbench/CocCluePanel.vue @@ -0,0 +1,615 @@ + + + + + diff --git a/frontend/src/components/workbench/ContinuityPanel.vue b/frontend/src/components/workbench/ContinuityPanel.vue new file mode 100644 index 000000000..bf043ea0a --- /dev/null +++ b/frontend/src/components/workbench/ContinuityPanel.vue @@ -0,0 +1,1103 @@ + + + + + diff --git a/frontend/src/components/workbench/HolographicChroniclesPanel.vue b/frontend/src/components/workbench/HolographicChroniclesPanel.vue index 6167fa447..9d3710417 100644 --- a/frontend/src/components/workbench/HolographicChroniclesPanel.vue +++ b/frontend/src/components/workbench/HolographicChroniclesPanel.vue @@ -8,7 +8,12 @@ 悬浮右侧快照节点时高亮本行左侧剧情;回滚将删除快照未包含的章节(不可撤销)。

- 刷新 + + + {{ activeBranchName ? `当前分支 · ${activeBranchName}` : '当前分支 · 全部' }} + + 刷新 + @@ -22,9 +27,11 @@
-
+
@@ -38,7 +45,7 @@
{{ sn.kind === 'MANUAL' ? '🟣 Manual' : '🔵 Auto' }} + + 候选稿采纳{{ sn.candidate_source ? ` · ${sn.candidate_source}` : '' }} + {{ sn.name }} + + diff --git a/frontend/src/components/workbench/NovelProMonitorPanel.vue b/frontend/src/components/workbench/NovelProMonitorPanel.vue new file mode 100644 index 000000000..245728045 --- /dev/null +++ b/frontend/src/components/workbench/NovelProMonitorPanel.vue @@ -0,0 +1,385 @@ + + + + + diff --git a/frontend/src/components/workbench/PowerSystemPanel.vue b/frontend/src/components/workbench/PowerSystemPanel.vue new file mode 100644 index 000000000..6f5e4b652 --- /dev/null +++ b/frontend/src/components/workbench/PowerSystemPanel.vue @@ -0,0 +1,532 @@ + + + + + diff --git a/frontend/src/components/workbench/PropLedgerPanel.vue b/frontend/src/components/workbench/PropLedgerPanel.vue new file mode 100644 index 000000000..bdcd1d12e --- /dev/null +++ b/frontend/src/components/workbench/PropLedgerPanel.vue @@ -0,0 +1,528 @@ + + + + + diff --git a/frontend/src/components/workbench/SandboxDialoguePanel.vue b/frontend/src/components/workbench/SandboxDialoguePanel.vue index 646b4b79e..8a0845c60 100644 --- a/frontend/src/components/workbench/SandboxDialoguePanel.vue +++ b/frontend/src/components/workbench/SandboxDialoguePanel.vue @@ -67,6 +67,14 @@ :autosize="{ minRows: 2, maxRows: 4 }" /> + + AI 生成语气/场景 + () const message = useMessage() +const contextStore = useWorkbenchContextStore() +const { sandboxDraft, sandboxDraftVersion } = storeToRefs(contextStore) // 状态 const loading = ref(false) @@ -193,17 +205,23 @@ const anchor = ref(null) const anchorLoading = ref(false) const genLoading = ref(false) const saveLoading = ref(false) +const suggestLoading = ref(false) const editMental = ref('') const editVerbal = ref('') const editIdle = ref('') const scenePrompt = ref('') const generatedLine = ref('') +const lastConsumedSandboxVersion = ref(0) // 角色选项 const characterOptions = computed(() => characters.value.map(c => ({ label: c.name || c.id, value: c.id })) ) +const selectedCharacter = computed(() => + characters.value.find(character => character.id === selectedCharacterId.value) || null, +) + // 章节选项(从已有对话中提取) const chapterOptions = computed(() => { if (!result.value) return [] @@ -287,6 +305,21 @@ async function onCharacterSelect(charId: string | null) { } } +async function applySandboxDraftContext() { + const draft = sandboxDraft.value + if (!draft || draft.slug !== props.slug) return + if (sandboxDraftVersion.value === lastConsumedSandboxVersion.value) return + + lastConsumedSandboxVersion.value = sandboxDraftVersion.value + selectedCharacterId.value = draft.characterId + await onCharacterSelect(draft.characterId) + + editMental.value = draft.mentalState || editMental.value || 'NORMAL' + editVerbal.value = draft.verbalTic || editVerbal.value || '' + editIdle.value = draft.idleBehavior || editIdle.value || '' + scenePrompt.value = draft.scenePrompt || scenePrompt.value +} + // 保存锚点 async function saveAnchors() { const id = selectedCharacterId.value @@ -307,6 +340,47 @@ async function saveAnchors() { } } +function suggestionText(fields: Record, key: string) { + const value = fields[key] + if (value == null) return '' + return String(value) +} + +async function suggestSandboxSetup() { + const id = selectedCharacterId.value + if (!id) { + message.warning('先选择角色再生成语气和场景') + return + } + suggestLoading.value = true + try { + const result = await novelproSuggestionsApi.suggestFields(props.slug, { + suggestion_type: 'voice_anchor', + fields: ['mental_state', 'verbal_tic', 'idle_behavior', 'scene_prompt'], + target: { + character_id: id, + character_name: selectedCharacter.value?.name || id, + }, + current_values: { + mental_state: editMental.value, + verbal_tic: editVerbal.value, + idle_behavior: editIdle.value, + scene_prompt: scenePrompt.value, + }, + instruction: '根据当前设定、掉线提醒和 OOC 风险,生成角色语气锚点和一条适合试写的对话场景。', + }) + editMental.value = suggestionText(result.fields, 'mental_state') || editMental.value + editVerbal.value = suggestionText(result.fields, 'verbal_tic') || editVerbal.value + editIdle.value = suggestionText(result.fields, 'idle_behavior') || editIdle.value + scenePrompt.value = suggestionText(result.fields, 'scene_prompt') || scenePrompt.value + message.success(result.rationale || '已生成语气和场景建议') + } catch { + message.error('生成语气和场景失败,请稍后重试') + } finally { + suggestLoading.value = false + } +} + // 生成对话 async function runGenerate() { const id = selectedCharacterId.value @@ -362,6 +436,7 @@ watch( onMounted(() => { loadCharacters() loadWhitelist() + void applySandboxDraftContext() }) // 刷新监听 @@ -371,6 +446,13 @@ watch(deskTick, () => { loadCharacters() loadWhitelist() }) + +watch( + [sandboxDraftVersion, () => props.slug], + () => { + void applySandboxDraftContext() + }, +) diff --git a/frontend/src/components/workbench/VoiceLockPanel.vue b/frontend/src/components/workbench/VoiceLockPanel.vue new file mode 100644 index 000000000..b6ec30243 --- /dev/null +++ b/frontend/src/components/workbench/VoiceLockPanel.vue @@ -0,0 +1,701 @@ + + + + + diff --git a/frontend/src/components/workbench/WorkArea.vue b/frontend/src/components/workbench/WorkArea.vue index 1b181c1a6..296ab9ef9 100644 --- a/frontend/src/components/workbench/WorkArea.vue +++ b/frontend/src/components/workbench/WorkArea.vue @@ -45,6 +45,17 @@ 重新加载 + + 候选稿 + + 字数: {{ wordCount }} + + 生成风格 + - + + + + 套用CoC结构模板 + + + 手动规划会随生成一起读取 Bible、正典、线索和道具账本。 + + + + + + + + + CoC 认知预检 + + {{ + cocPrecheckResult?.risk_level === 'block' + ? '阻断' + : cocPrecheckResult?.risk_level === 'warning' + ? '提醒' + : cocPrecheckResult?.checked + ? '通过' + : '未检查' + }} + + + + + + + 立即预检 + + + 一键安全改写 + + + + + + 生成前检查是否越过“读者已知 / 角色已知 / 作者真相”边界,命中阻断项会默认禁止生成。 + + + + + + - {{ item }} + + + + + + + - {{ item }} + + + + + + + + + 仅本次生成忽略阻断(建议仅用于实验) + + + + + + + + 本次改写模式 + + {{ cocRewriteResult.rewrite_mode === 'aggressive' ? '激进' : '保守' }} + + + {{ + cocRewriteResult.rewrite_style === 'coc' + ? 'CoC向' + : cocRewriteResult.rewrite_style === 'suspense' + ? '悬疑向' + : '通用' + }} + + + + - {{ item }} + + + + + + @@ -227,10 +416,229 @@ + + + + + 避免把对话、动作和心理转折压成一句概括 + + + + + + + + + + + + {{ targetWordRangeHint }} + + + + + + + + + + {{ longDraftMode ? `先写连续母稿,预计拆成 ${longDraftSplitCount || 2} 章` : '灰度功能:先写长稿再拆章(默认关闭)' }} + + + + + + + + + 对照测试:跳过节拍拆分、AI味后处理和章后质检,只让模型按上下文直接写一版 + + + + + + 直接写作模式不会自动生成一致性报告,也不会套用手法档案后处理;适合拿去检测,判断 PP 流程是否影响正文质感。 + + + + + + + 直接写完后只做 10%-20% 局部编辑,压低 AI 特征但不进入完整 PP 后处理 + + + + + + + + + + + 场记分析失败(不影响生成):{{ sceneDirectorError }} + + + + 本章写作策略 + 已预览 + + + + {{ chapterStrategy ? '重新生成策略' : '生成策略预览' }} + + + 清空策略 + + + +
+ + + 本章问题:{{ chapterStrategy.chapter_contract.chapter_question }} + 主角想要:{{ chapterStrategy.chapter_contract.protagonist_want }} + 阻力来源:{{ chapterStrategy.chapter_contract.opposition }} + 信息变化:{{ chapterStrategy.chapter_contract.required_information_change }} + 章末追问:{{ chapterStrategy.chapter_contract.ending_question }} + + 展示优先 + + - {{ rule }} + + + + +
+
+ 角色想要 + {{ chapterStrategy.dramatic_task.goal }} +
+
+ 主要阻碍 + {{ chapterStrategy.dramatic_task.obstacle }} +
+
+ 读者期待 + {{ chapterStrategy.dramatic_task.reader_expectation }} +
+
+ 章末钩子 + {{ chapterStrategy.dramatic_task.ending_hook }} +
+
+ +
+ + {{ index + 1 }}. {{ scene.label }} + {{ scene.target_words }} 字 + + 任务:{{ scene.task }} + 阻力:{{ scene.resistance }} + 变化:{{ scene.info_shift }} + 关系:{{ scene.relationship_shift }} + 锚点:{{ scene.anchor }} + 动作:{{ scene.visible_action }} + 潜台词:{{ scene.subtext_dialogue }} + 不直说:{{ scene.unspoken_emotion }} + 线索/道具:{{ scene.object_or_clue_change }} + 钩子:{{ scene.hook }} +
+
+
+ + 先生成一份可见策略,再让正文按“戏剧任务 + 场景推进”写,会比只丢大纲更稳。 + +
+ + + + + + + + + + + + 精修任务会先进入候选稿区;生成和采纳仍复用现有候选稿、快照和章后记忆更新流程。 + + + + + + + + + + + + + + + + + + + + + + 候选稿不会直接改写主稿。点击“采纳为主稿”后,才会复用现有章节保存、快照和章后记忆更新链路。 + + + + + + 当前章节:{{ currentChapter ? `第${currentChapter.number}章` : '未选择章节' }} + + + 候选稿分支与全局切换保持同步;留空表示查看全部,新建时会回落到 `main`。 + + + + + + PP AI 生成候选稿 + + + Web 写作 + + + 合并到 main + + + 刷新 + + + + + + + + + {{ branch.branch_name }} · {{ branch.draft_count }}稿 / 已采纳{{ branch.accepted_count }} + + + 模型任务台账 {{ externalModelTasks.length }} 条 + + + + + 分支记忆差异:相似度 {{ Math.round(branchMemoryDiff.similarity * 100) }}% + + + {{ item.label }} + + + + + + + + + + + + + + + + + {{ candidateDraftSourceLabel(draft.source) }} + + {{ draft.branch_name }} + + {{ draft.status }} + + + {{ formatDraftTime(draft.created_at) }} + + {{ draft.title || `第${draft.chapter_number}章候选稿` }} + + + {{ tag }} + + + + + {{ tag }} + + + + {{ draft.rationale || '无说明' }} + + + + 预览 + + + 按任务生成 + + + 采纳为主稿 + + + 拒绝 + + + + + + + + + + + + 候选稿预览 + + 审稿/记忆检查 + + + + 与当前主稿对比:候选稿 {{ selectedCandidateDiffSummary.candidateWordCount }} 字, + {{ selectedCandidateDiffSummary.wordDelta >= 0 ? '增加' : '减少' }} + {{ Math.abs(selectedCandidateDiffSummary.wordDelta) }} 字, + 相似度 {{ selectedCandidateDiffSummary.similarityPercent }}%。 + + + 采纳影响:{{ candidateDraftMemoryImpactHints(selectedCandidateDraft).join(';') }}。 + + + + {{ item.label }} + + + + PP AI 检查: + {{ selectedCandidateSupervisorReview.review }} + + + + + A/B 对照:主稿 {{ selectedCandidateCompare.primary_word_count }} 字, + 候选 {{ selectedCandidateCompare.candidate_word_count }} 字, + 相似度 {{ Math.round(selectedCandidateCompare.similarity * 100) }}%。 + + + + 勾选候选段落后,可生成一版“部分采纳候选稿”,再走原采纳链路。 + + + 保存所选段落为候选稿 + + + + +
+ + + + + {{ paragraphDiffLabel(item.type) }} + + + 第 {{ item.index + 1 }} 段 · 相似度 {{ item.similarityPercent }}% + + + + + + 主稿 +

+ {{ item.baseParagraph || '(无)' }} +

+
+ + 候选 +

+ {{ item.candidateParagraph || '(删除该段)' }} +

+
+
+
+
+
+
+
+ +
+
+
+
+ + +
+ + + + + PP 只生成提示词和管理候选稿,不调用写作 API。你把提示词复制到 ChatGPT / Kimi / DeepSeek 网页,生成后把正文粘回这里保存为候选稿。 + + + + + + + + + + + + + + + + + + + + + + 当前章节:{{ currentChapter ? `第${currentChapter.number}章 ${currentChapter.title || ''}` : '未选择章节' }} + + + + 生成提示词 + + + 复制提示词 + + + + + + + + + + + + + + + +