From 48881437a7ee942a15d0ed0efd7883bc7af7897c Mon Sep 17 00:00:00 2001 From: web4zn Date: Thu, 7 May 2026 21:47:21 +0800 Subject: [PATCH] feat: dual-stream mindmap output with JSON mode and mindmap-as-context - Single API call produces both chat answer and full updated mindmap tree - JSON mode (response_format: json_object) for providers that support it - Fallback marker mode for other providers - Mindmap tree replaces raw conversation history as LLM context (60-80% token savings) - Removed corpus, batch generation, knowledge-applier, and all intermediate types - Simplified MindMap data model (monitoredConversationIds only) - Conversation-mindmap association via new dialog and panel - Edited node preservation via findEditedNodes/mergeEditedNodes - Increased dagre layout spacing for dense trees --- AGENTS.md | 68 ++ opencode.json | 4 + .../.openspec.yaml | 2 + .../2026-05-07-dual-stream-mindmap/design.md | 101 ++ .../proposal.md | 36 + .../specs/chat-interface/spec.md | 39 + .../specs/conversation-management/spec.md | 22 + .../specs/knowledge-stream-extraction/spec.md | 99 ++ .../specs/mindmap-as-context/spec.md | 64 ++ .../specs/mindmap-corpus/spec.md | 24 + .../specs/mindmap-generation/spec.md | 42 + .../2026-05-07-dual-stream-mindmap/tasks.md | 68 ++ package-lock.json | 545 ---------- src/features/chat/ChatPage.tsx | 213 ++-- src/features/chat/MessageBubble.tsx | 56 +- src/features/chat/NewConversationDialog.tsx | 75 +- src/features/mindmap/MindMapPanel.tsx | 772 ++------------ src/features/mindmap/MindMapTree.tsx | 2 +- src/lib/__tests__/mindmap-generator.test.ts | 981 +++--------------- src/lib/llm-client.ts | 22 + src/lib/mindmap-generator.ts | 872 +++------------- src/lib/mindmap-layout.ts | 2 +- src/stores/__tests__/mindmapStore.test.ts | 174 ---- src/stores/mindmapStore.ts | 128 +-- src/types/index.ts | 9 +- src/types/mindmap.ts | 55 - 26 files changed, 1107 insertions(+), 3368 deletions(-) create mode 100644 AGENTS.md create mode 100644 opencode.json create mode 100644 openspec/changes/archive/2026-05-07-dual-stream-mindmap/.openspec.yaml create mode 100644 openspec/changes/archive/2026-05-07-dual-stream-mindmap/design.md create mode 100644 openspec/changes/archive/2026-05-07-dual-stream-mindmap/proposal.md create mode 100644 openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/chat-interface/spec.md create mode 100644 openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/conversation-management/spec.md create mode 100644 openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/knowledge-stream-extraction/spec.md create mode 100644 openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/mindmap-as-context/spec.md create mode 100644 openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/mindmap-corpus/spec.md create mode 100644 openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/mindmap-generation/spec.md create mode 100644 openspec/changes/archive/2026-05-07-dual-stream-mindmap/tasks.md diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..d9f5eaf --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,68 @@ +# Progressive Mindmap — Agent Guide + +## Commands (run in order) + +```bash +npm run dev # Vite dev server → http://localhost:5173 +npm run build # tsc -b && vite build (typecheck then build) +npm test # vitest run (happy-dom, no jsdom) +npm run lint # eslint . (TypeScript + react-hooks + react-refresh) +npm run format # prettier --write . (no semi, singleQuote, trailingComma: all) +``` + +CI pipeline (`.github/workflows/ci.yml`): `npm ci → npx tsc --noEmit → npx eslint . → npm test` + +## Architecture + +- Fully client-side SPA (no backend). All data in **IndexedDB** via `idb` library. +- **Zustand** stores persisted through IndexedDB adapter (`src/lib/indexeddb-storage-adapter.ts`). Stores: `providerStore`, `conversationStore`, `mindmapStore`, `chatStore`. +- DB version 5 (`src/lib/db.ts`). Object stores: `providers`, `conversations`, `messages`, `mindmaps`, `zustand-persist`. +- 4 feature modules: `chat/`, `mindmap/`, `conversation/`, `provider/`. Pure logic in `lib/`. Types in `types/`. +- Path alias `@/` → `./src/` (configured in both `vite.config.ts` and `vitest.config.ts`). +- UI: React 18 + Tailwind CSS v4 (`@tailwindcss/vite` plugin) + shadcn/ui (base-nova style) + `@xyflow/react` + dagre layout. + +## Mindmap Generation + +- `src/lib/mindmap-generator.ts` handles all generation logic. +- Two modes: **full** (rebuild entire tree) and **incremental** (surgical operations). +- Incremental operations: `add_child`, `update`, `merge`, `delete_leaf`, `noop`. +- `editedByUser: true` nodes are **never** overwritten by AI (protected in `applyOperations`). +- Source tracking via `[源: convId/msgId]` annotations in prompts. +- `maxDepth=0` means "auto depth" (no hard limit). Default is 3. +- All generation prompts are in **Chinese**. + +## IndexedDB Persistence + +- Zustand stores use `createIndexedDBStorage()` (NOT `localStorage`). +- If IndexedDB is unavailable, falls back to memory-only with a console warning. +- `providerStore` rehydrate hook pre-seeds OpenRouter as default provider on first load. + +## TypeScript & Linting Rules + +- `strict: true`, `noUncheckedIndexedAccess: true` — all array/object access must be guarded. +- `noUnusedLocals: true`, `noUnusedParameters: true` — no dead code. +- `noUnusedLocals` is a **compile error**, not just lint warning. +- `noUncheckedIndexedAccess` means array access `arr[i]` returns `T | undefined` — always guard. +- ESLint: `@typescript-eslint/no-unused-vars: error` with `argsIgnorePattern: ^_`. +- `react-refresh/only-export-components: warn` (off for `src/components/ui/`). +- No `as any`, no `// @ts-ignore` — project convention (per CONTRIBUTING.md). + +## Testing + +- Vitest with `happy-dom` environment (`vitest.config.ts`). +- Tests in `__tests__/` dirs next to source: `src/lib/__tests__/`, `src/stores/__tests__/`, `src/features/*/__tests__/`. +- `test-setup.ts` imports `@testing-library/jest-dom/vitest`. +- No jsdom — happy-dom only. Some DOM APIs may differ. + +## Prettier + +```json +{ "semi": false, "singleQuote": true, "tabWidth": 2, "trailingComma": "all", "printWidth": 100 } +``` + +## OpenCode Workflow + +- Work on `opencode` branch only. Never commit to `main` (`.opencode/rules.md`). +- Never push to remote without asking. +- OpenSpec workflow in `openspec/` dir. +- Plugins in `.opencode/plugins/`, skills in `.opencode/skills/`. diff --git a/opencode.json b/opencode.json new file mode 100644 index 0000000..9cc8361 --- /dev/null +++ b/opencode.json @@ -0,0 +1,4 @@ +{ + "$schema": "https://opencode.ai/config.json", + "lsp": true +} diff --git a/openspec/changes/archive/2026-05-07-dual-stream-mindmap/.openspec.yaml b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/.openspec.yaml new file mode 100644 index 0000000..2188dbd --- /dev/null +++ b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/.openspec.yaml @@ -0,0 +1,2 @@ +schema: spec-driven +created: 2026-05-06 diff --git a/openspec/changes/archive/2026-05-07-dual-stream-mindmap/design.md b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/design.md new file mode 100644 index 0000000..8bd9896 --- /dev/null +++ b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/design.md @@ -0,0 +1,101 @@ +## Context + +Current mindmap generation is a two-phase batch process: + +``` +User asks -> AI streams answer -> answer complete -> 5s debounce + -> separate LLM call (stream: false, full/incremental) + -> parse JSON -> update mindmap tree +``` + +Problems: +- **Latency**: 10-30s extra wait after chat completes +- **Quality**: One LLM call must read full conversation + extract concepts + build hierarchy + output valid JSON - too many tasks +- **UX gap**: No real-time mindmap growth during conversation +- **Token waste**: Separate generation call re-reads conversation history already seen by chat model +- **Node ID fragility**: Incremental mode requires LLM to output exact node IDs - any mismatch fails silently + +The proposal calls for a dual-stream approach where every chat response simultaneously produces both a natural language answer and structured knowledge, with mindmap-as-context replacing raw conversation history for long-term memory. + +## Goals / Non-Goals + +**Goals:** +- Chat responses carry both Markdown answer and structured knowledge JSON in a single streaming call +- Mindmap updates in real-time as each chat response arrives (no separate generation step) +- Mindmap tree replaces raw conversation history as LLM long-term context (hybrid: tree + last 1-2 raw rounds) +- Merge algorithm handles dedup and path matching (eliminate fragile node ID dependency) +- Graceful fallback when LLM does not output knowledge blocks +- Backward compatible: existing corpus + manual full rebuild still works + +**Non-Goals:** +- Realtime streaming of individual knowledge nodes during LLM response (knowledge is extracted only after the complete response) +- Cross-mindmap knowledge linking +- Multi-modal knowledge extraction (images, audio) +- Plugin/extensibility system for custom extractors + +## Decisions + +### D1: Knowledge block via post-pended delimiter (not interleaved) + +**Approach**: The LLM outputs normal Markdown answer text, then appends `...` at the end. + +| Option | Pro | Con | +|--------|-----|-----| +| Post-pended delimiter | Simple, answer streams normally | Knowledge only arrives at end of response | +| Interleaved JSON | Knowledge arrives mid-stream | Complex streaming JSON parser needed | +| Function calling | Structured, reliable | Requires function-calling API, not universal | +| Separate API call | Full streaming, no prompt change | 2x API cost, extra latency | + +**Decision**: Post-pended delimiter. The simpler approach wins for v1. Knowledge arriving at end is acceptable since typical response time is 5-15s. + +### D2: Algorithmic merge (not LLM-based) + +Knowledge blocks from each response are independent. Merging them into the existing mindmap tree is an algorithmic task: +- Match existing tree nodes by `category` path + fuzzy label comparison (edit distance < 0.3) +- Same path + same label -> update node (unless editedByUser) +- Same path + different label -> add as sibling +- Different path + same/similar concept -> treat as independent branch + +**Why not LLM**: Tree merge is a deterministic tree operation. Using an LLM would add latency, cost, and potential inconsistency. + +### D3: Mindmap-as-context with hybrid strategy + +LLM context = mindmap tree serialized as Markdown + last 1-2 raw Q&A rounds. + +Rule: +- If mindmap.tree is empty -> pass full raw conversation history (legacy behavior) +- If mindmap.tree is non-empty -> pass tree + last 2 messages + +The mindmap tree is serialized as flat Markdown headings (same format as treeToMarkdown()). Token cost: ~8 tokens per node (vs ~50 tokens per raw message). + +### D4: Knowledge data model + +```typescript +interface KnowledgeNode { + label: string // concept name + category: string[] // hierarchical path, e.g. ["React", "Hooks"] + summary: string // one-line description + content?: string // optional Markdown content + contentType?: text | markdown +} +``` + +The `category` path replaces fragile deterministic node IDs as the primary location mechanism. This is more robust because: +- LLM can express paths naturally using concept names +- Fuzzy matching allows for minor wording variations +- New paths create new branches, no ID collision + +### D5: Fallback strategy + +After each chat response, if no `` block is detected in the stream, schedule the existing batch generation flow with a reduced 2s debounce (down from 5s). This ensures backward compatibility with models that do not follow the knowledge extraction instruction. + +## Risks / Trade-offs + +| Risk | Impact | Mitigation | +|------|--------|------------| +| LLM does not output knowledge block | No mindmap updates | Fallback to batch generation | +| Knowledge JSON malformed | Parse error, node lost | Try partial parse; fallback to batch for this response only | +| Category path inconsistent across responses | Duplicate branches | Merge algorithm with fuzzy path matching | +| Knowledge extraction distracts from answer quality | Poorer chat responses | A/B test with/without extraction prompt; keep extraction instructions minimal | +| Mindmap serialization in context consumes prompt tokens | Higher per-request cost | Token monitoring; cap at 200 nodes serialized | +| User expects single batch-style generation (all at once) | Confusion with incremental growth | Toast on first dual-stream response explaining real-time updates | diff --git a/openspec/changes/archive/2026-05-07-dual-stream-mindmap/proposal.md b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/proposal.md new file mode 100644 index 0000000..56bbe14 --- /dev/null +++ b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/proposal.md @@ -0,0 +1,36 @@ +## Why + +Current mindmap generation is a two-phase batch process: chat completes -> user triggers -> separate LLM call generates mindmap. Dual-stream output eliminates the separate generation step: every chat response carries a full updated mindmap in the same API call, with zero extra latency. Using JSON mode (response_format: "json_object") ensures the mindmap JSON is always valid. The accumulated mindmap tree replaces raw conversation history as LLM context, reducing prompt tokens by 60-80%. + +## What Changes + +- **Single-call mindmap output**: Chat response includes both answer text and full updated mindmap JSON in one API call +- **JSON mode**: Providers supporting response_format: "json_object" output {"answer": "...", "mindmap": {"nodes": [...]}} - guaranteed valid JSON +- **Fallback marker mode**: For providers without JSON mode, delimiters separate answer from mindmap +- **Mindmap-as-context**: The existing mindmap tree (not raw history) is fed to the LLM as context, with last recent messages retained +- **Simplified data model**: Corpus, batch generation, knowledge-applier, and all intermediate types removed +- **Edited node preservation**: findEditedNodes/mergeEditedNodes preserve user-edited nodes across regenerations +- Auto-triggered generation (5s debounce) and corpus curation are **removed** + +## Capabilities + +### New Capabilities + +- `full-mindmap-output`: Single API call produces both chat answer and complete updated mindmap tree +- `mindmap-as-context`: Using accumulated mindmap tree as LLM context instead of raw conversation history + +### Modified Capabilities + +- `chat-interface`: Response is now non-streaming single call; JSON mode with answer/mindmap fields; fallback marker parsing +- `mindmap-generation`: Full tree output replaces batch generation; editedByUser nodes preserved via findEditedNodes/mergeEditedNodes +- `mindmap-corpus`: **REMOVED** - corpus and all related UI/data are deleted +- `conversation-management`: Hybrid context construction (mindmap tree + last messages) replaces full history + +## Impact + +- src/features/chat/ChatPage.tsx: JSON mode + marker mode response processing; mindmap-as-context; association dialog +- src/lib/llm-client.ts: chat() with useJsonMode parameter +- src/lib/mindmap-generator.ts: buildFullMindmapPrompt() dual-mode; parseJsonToTree with comma repair; mindmapTreeToContext; findEditedNodes/mergeEditedNodes +- src/features/mindmap/MindMapPanel.tsx: Simplified - remove corpus/generate/settings; add linked conversation list +- src/stores/mindmapStore.ts: Remove corpus actions; simplify MindMap type +- src/types/mindmap.ts: Remove CorpusEntry, KnowledgeNode, IncrementalOperation, etc. diff --git a/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/chat-interface/spec.md b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/chat-interface/spec.md new file mode 100644 index 0000000..fe710d6 --- /dev/null +++ b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/chat-interface/spec.md @@ -0,0 +1,39 @@ +## MODIFIED Requirements + +### Requirement: Streaming response display +The system SHALL display LLM responses as streaming Markdown text in the chat bubble. The stream SHALL be processed through a dual-stream parser that: +- Detects `` and `` delimiters +- Strips delimiters and their content from the displayed text +- Accumulates knowledge JSON between delimiters for mindmap application +- Renders only the non-knowledge portion as Markdown in the chat bubble + +**Change from previous**: Stream format changes from pure Markdown to Markdown with optional knowledge block. The knowledge block is invisible to the user. + +#### Scenario: Knowledge block stripped from display +- **WHEN** LLM streams `Some answer text.[...]` +- **THEN** chat bubble displays only `Some answer text.` with no visible marker or JSON + +#### Scenario: Knowledge block arrives before display content +- **WHEN** first stream chunk is `[{"label":"X"}]\n\nAnswer text` +- **THEN** system buffers knowledge block, applies it, and displays only `Answer text` + +### Requirement: Stop generation +The stop generation function SHALL also discard any partially-received knowledge block. If the knowledge block was partially buffered when generation stops, it SHALL be discarded and not applied to the mindmap. + +**Change from previous**: Knowledge blocks add partial state that must be cleaned up on abort. + +#### Scenario: Stop during knowledge block +- **WHEN** user stops generation mid-stream while `[...` has been received but `` has not +- **THEN** partial knowledge buffer is discarded, mindmap is not updated with partial data + +### Requirement: Auto-sync mode (REMOVED) +**Reason**: Replaced by inline knowledge extraction during chat streaming. +**Migration**: Mindmap updates happen automatically during chat response, no separate auto-sync trigger needed. + +### Requirement: Monitored conversation auto-generation (REMOVED) +**Reason**: Replaced by inline knowledge extraction. Each monitored conversation response inherently updates the mindmap via its knowledge block. +**Migration**: No action needed. Mindmap updates are now implicit. + +### Requirement: Message display and layout +**Change**: The max-width of the chat area SHALL remain unchanged. The knowledge block stripping does not affect layout. + diff --git a/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/conversation-management/spec.md b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/conversation-management/spec.md new file mode 100644 index 0000000..6d727de --- /dev/null +++ b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/conversation-management/spec.md @@ -0,0 +1,22 @@ +## MODIFIED Requirements + +### Requirement: Chat history construction for LLM context +The system SHALL construct the LLM context using a hybrid strategy when `mindmap-as-context` mode is active: + +- Serialized mindmap tree (as Markdown headings) +- Last 1-2 raw Q&A messages (for tone and wording nuance) +- Current user question + +When `mindmap-as-context` is OFF (legacy mode or mindmap.tree is empty): +- Full raw conversation history is passed as before + +**Change from previous**: Conversation history is no longer always passed in full. A hybrid context replaces it when mindmap is available. + +#### Scenario: Hybrid context with existing mindmap +- **WHEN** conversation has 20 messages and mindmap has 50 nodes +- **THEN** LLM context = mindmap serialization + last 2 messages + current question + +#### Scenario: Full history when mindmap is empty +- **WHEN** conversation has 20 messages and mindmap.tree is empty +- **THEN** LLM context = full 20 messages (legacy behavior) + diff --git a/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/knowledge-stream-extraction/spec.md b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/knowledge-stream-extraction/spec.md new file mode 100644 index 0000000..5726d6c --- /dev/null +++ b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/knowledge-stream-extraction/spec.md @@ -0,0 +1,99 @@ +## Purpose + +Enable real-time extraction of structured knowledge from streaming chat responses. Every chat response carries a knowledge block alongside the natural language answer, eliminating the need for a separate mindmap generation step. + +## Requirements + +### Requirement: Knowledge block format +The system SHALL define a knowledge block format appended to each chat response. The knowledge block SHALL use the following delimiter markers: + +- Start marker: `` +- End marker: `` +- Content between markers SHALL be valid JSON array of KnowledgeNode objects + +KnowledgeNode schema: +```typescript +interface KnowledgeNode { + label: string // concept name, max 30 chars + category: string[] // hierarchical path from root to this concept + summary: string // one-line description, max 100 chars + content?: string // optional detailed Markdown content + contentType?: "text" | "markdown" +} +``` + +#### Scenario: Well-formed knowledge block +- **WHEN** LLM response contains `[{"label":"useState","category":["React","Hooks"],"summary":"..."}]` +- **THEN** system parses the JSON array and extracts each KnowledgeNode for mindmap application + +#### Scenario: Empty knowledge block +- **WHEN** LLM response contains `[]` +- **THEN** system recognizes the block but applies zero nodes (no mindmap update needed) + +#### Scenario: No knowledge block in response +- **WHEN** LLM response does not contain `` markers +- **THEN** system falls back to the batch generation flow with 2s debounce + +### Requirement: Stream parsing state machine +The chat stream handler SHALL implement a state machine to detect, buffer, and strip knowledge blocks during streaming. + +States: +- `normal`: Buffering chat text for display; scanning for `` +- `knowledge`: Buffering content between `` and ``; NOT displaying this content to user +- `complete`: Full knowledge block received; parsed JSON applied to mindmap + +#### Scenario: State transition on start marker +- **WHEN** stream chunk contains `` +- **THEN** system transitions from `normal` to `knowledge`, strips the marker from display content, and begins buffering the knowledge JSON + +#### Scenario: State transition on end marker +- **WHEN** stream chunk contains `` +- **THEN** system transitions from `knowledge` to `complete`, parses the buffered JSON, and updates the mindmap + +#### Scenario: Knowledge block split across multiple chunks +- **WHEN** `` and `` arrive in different stream chunks +- **THEN** system correctly accumulates the partial JSON across chunks and only parses when end marker is received + +### Requirement: Knowledge extraction prompt injection +The system SHALL inject knowledge extraction instructions into the chat system prompt. The instructions SHALL be appended after the user-configured system prompt. + +The injected instructions SHALL: +- Define the `` delimiter format +- Provide the KnowledgeNode schema +- Instruct the LLM to include every distinct concept mentioned in the answer +- Instruct the LLM to use consistent `category` paths across responses for the same concept +- Instruct the LLM to omit the knowledge block entirely if no substantive concepts are mentioned + +#### Scenario: Dual prompt with existing system prompt +- **WHEN** user has configured a custom system prompt "You are a React expert" +- **THEN** the effective prompt becomes: user prompt + knowledge extraction instructions +- **AND** the knowledge extraction instructions do not override the user system prompt + +### Requirement: Knowledge node dedup and merge +System SHALL apply knowledge nodes to the mindmap tree using algorithmic merge (not LLM). Merge rules: + +1. For each KnowledgeNode, traverse the tree following its `category` path +2. At each path level, find matching node by fuzzy label comparison (Levenshtein distance < 30% of label length) +3. If matching node exists at leaf level: update label/summary/content (unless `editedByUser`) +4. If no matching node at leaf level: create new node +5. If intermediate path node missing: create intermediate nodes + +#### Scenario: Add new concept to existing branch +- **WHEN** knowledge node `{label:"useEffect", category:["React","Hooks"]}` arrives and tree has `React > Hooks` with no `useEffect` child +- **THEN** system adds `useEffect` as a child of `Hooks` node + +#### Scenario: Update existing concept +- **WHEN** knowledge node `{label:"useState", category:["React","Hooks"], summary:"Updated summary"}` and `React > Hooks > useState` already exists +- **THEN** system updates summary to "Updated summary" (unless editedByUser) + +#### Scenario: Create new branch for new category path +- **WHEN** knowledge node `{label:"ref", category:["Vue"]}` and no `Vue` node exists +- **THEN** system creates `Vue` as a new root node with `ref` as child + +#### Scenario: Fuzzy match for minor label variation +- **WHEN** knowledge node `{label:"useState Hook", category:["React","Hooks"]}` and existing node label is `useState` +- **THEN** fuzzy comparison (edit distance < 30%) matches them as same concept + +#### Scenario: editedByUser protection +- **WHEN** knowledge node tries to update a node where `editedByUser === true` +- **THEN** system skips the update and preserves the user-edited content diff --git a/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/mindmap-as-context/spec.md b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/mindmap-as-context/spec.md new file mode 100644 index 0000000..dab6b24 --- /dev/null +++ b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/mindmap-as-context/spec.md @@ -0,0 +1,64 @@ +## Purpose + +Use the accumulated mindmap tree as LLM long-term context instead of raw conversation history. This reduces prompt token usage by 60-80% while providing better-organized context for more accurate responses. + +## Requirements + +### Requirement: Mindmap tree serialization for context +The system SHALL serialize the mindmap tree into a compact Markdown format for use as LLM context. The serialization SHALL: +- Use Markdown headings (`#`/`##`/`###`) to represent tree depth +- Include node label and summary for each node +- Omit source tracking, content field, and metadata +- Limit to the first 200 nodes (configurable ceiling) +- Prefix with a header: "## Knowledge Graph Context" + +#### Scenario: Serialize tree to context +- **WHEN** mindmap has 3 nodes: `React` > `Hooks` > `useState` +- **THEN** serialized context looks like: `## Knowledge Graph Context\n# React\n## Hooks\n### useState -- basic state hook` + +#### Scenario: Truncate at node limit +- **WHEN** mindmap has 300 nodes and ceiling is 200 +- **THEN** serialization includes first 200 nodes (breadth-first), omits the rest, and appends "... (truncated)" + +### Requirement: Hybrid context construction +The system SHALL construct the LLM context as a hybrid of mindmap tree and recent raw conversation: + +- If `mindmap.tree` is empty: pass full raw conversation history (legacy behavior) +- If `mindmap.tree` is non-empty: pass serialized mindmap context + last 2 raw messages (1 user + 1 assistant) + +The hybrid context SHALL be structured as: +``` + + +## Recent Conversation Context + + +## Current Question + +``` + +#### Scenario: First message uses legacy history +- **WHEN** user sends first message and mindmap is empty +- **THEN** context includes full raw conversation history (empty in this case) + +#### Scenario: Subsequent message uses hybrid context +- **WHEN** user sends 5th message and mindmap has 10 nodes +- **THEN** context includes serialized mindmap + last 2 raw messages (messages 3-4) only + +#### Scenario: Tree exists but conversation is new +- **WHEN** user creates new conversation while mindmap already has nodes from earlier conversations +- **THEN** context still includes serialized mindmap (cross-conversation knowledge) + empty recent context + +### Requirement: Configurable context mode +The system SHALL provide a per-conversation toggle for context mode: +- `auto` (default): Use hybrid strategy based on mindmap.tree state +- `full-history`: Always pass full raw conversation history (legacy behavior) +- `mindmap-only`: Only pass mindmap tree, skip raw history entirely + +#### Scenario: Toggle to full-history mode +- **WHEN** user enables `full-history` mode in conversation settings +- **THEN** system ignores mindmap-as-context and passes full conversation history regardless of mindmap state + +#### Scenario: Toggle to mindmap-only mode +- **WHEN** user enables `mindmap-only` mode +- **THEN** system passes only the serialized mindmap context with no raw messages diff --git a/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/mindmap-corpus/spec.md b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/mindmap-corpus/spec.md new file mode 100644 index 0000000..26cfaef --- /dev/null +++ b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/mindmap-corpus/spec.md @@ -0,0 +1,24 @@ +## MODIFIED Requirements + +### Requirement: MindMap corpus field +The `MindMap.corpus` field is retained as a secondary data source. Its role changes: + +- **Inline extraction**: New knowledge from chat responses bypasses the corpus entirely. Knowledge blocks go directly into the mindmap tree without creating CorpusEntry records. +- **Manual batch rebuild**: The corpus remains the input for the "从语料重构" feature. Users can still curate corpus entries and trigger full regeneration. +- **Monitoring**: The `monitoredConversationIds` field is retained. Monitored conversations have their responses automatically extracted via knowledge blocks (no corpus entry created). + +**Change from previous**: Corpus is no longer the primary pathway for live mindmap generation. It becomes a curation tool for manual rebuilds only. + +#### Scenario: New chat response adds knowledge directly +- **WHEN** AI responds in a monitored conversation +- **THEN** knowledge block is parsed and applied to mindmap directly (no CorpusEntry created, no corpus involved) + +#### Scenario: User curates corpus for manual rebuild +- **WHEN** user explicitly adds messages to corpus via "加入语料库" button +- **THEN** CorpusEntry is created as before. These entries are available for manual "从语料重构". + +### Requirement: Corpus UI in mindmap panel +The corpus list UI is retained but its purpose shifts from "source for live generation" to "source for manual rebuild curation". The corpus panel SHALL show only manually-curated entries (not auto-generated from monitored conversations). + +**Change from previous**: Corpus list no longer auto-populates from monitored conversations. + diff --git a/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/mindmap-generation/spec.md b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/mindmap-generation/spec.md new file mode 100644 index 0000000..501a008 --- /dev/null +++ b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/specs/mindmap-generation/spec.md @@ -0,0 +1,42 @@ +## MODIFIED Requirements + +### Requirement: Generate mindmap from conversation history +The system SHALL generate mindmaps through TWO pathways: + +1. **Inline extraction (primary)**: Every chat response carries a knowledge block with structured concepts. These are merged into the mindmap tree algorithmically in real-time. This is the primary generation pathway. +2. **Batch generation (fallback/manual)**: The existing `generateMindmap()` function is retained for manual "从语料重构" and as fallback when knowledge blocks are absent. This uses the full corpus + conversation history as input. + +**Change from previous**: The primary generation pathway shifts from batch LLM call to inline extraction. Batch generation becomes a secondary path. + +#### Scenario: Inline extraction updates mindmap +- **WHEN** chat response completes and knowledge block was parsed +- **THEN** knowledge nodes are applied to mindmap tree immediately (no trigger, no debounce, no second API call) + +#### Scenario: Manual batch rebuild still works +- **WHEN** user clicks "从语料重构" in MindMapPanel +- **THEN** system uses existing `generateMindmap()` with full corpus + `forceFullRebuild: true` + +### Requirement: Incremental update via full regeneration (MODIFIED) +Incremental mode is no longer used for live updates. It is retained only within the manual batch generation path. + +**Change from previous**: Incremental mode scope reduced to manual batch generation only. + +### Requirement: Generation state management (MODIFIED) +For inline extraction, the generation state is simpler: +- `idle`: No response in progress +- `streaming`: Chat response streaming, knowledge may be buffered +- `complete`: Chat response done, knowledge applied (if any) +- `error`: Chat response failed, no knowledge applied + +The MindMapPanel no longer shows a separate "generating" state for inline extraction. Mindmap updates are silent. + +**Change from previous**: Generation state is now unified with chat state. No separate generation progress UI for inline extraction. + +### Requirement: Monitored conversation auto-generation (REMOVED) +**Reason**: Replaced by inline knowledge extraction. Every response inherently updates the mindmap. +**Migration**: No action needed. Auto-generation logic and 5s debounce are removed. + +### Requirement: Auto-sync mode (REMOVED) +**Reason**: Inline extraction makes auto-sync unnecessary. +**Migration**: Remove `autoSync` flag and related UI. + diff --git a/openspec/changes/archive/2026-05-07-dual-stream-mindmap/tasks.md b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/tasks.md new file mode 100644 index 0000000..621aa3b --- /dev/null +++ b/openspec/changes/archive/2026-05-07-dual-stream-mindmap/tasks.md @@ -0,0 +1,68 @@ +## 1. Core Types & Removals + +- [x] 1.1 Remove CorpusEntry, IncrementalOperation, and obsolete types from src/types/mindmap.ts +- [x] 1.2 Remove KnowledgeNode type (superseded by full tree output) +- [x] 1.3 Delete src/lib/knowledge-applier.ts (superseded by LLM full-tree output) +- [x] 1.4 Simplify MindMap type: remove corpus, generatorProviderId, generatorModelId, forceFullRebuild, lastGeneratedAt, maxDepth + +## 2. Full Mindmap Prompt + +- [x] 2.1 Add buildFullMindmapPrompt(useJsonMode) — dual-mode prompt (JSON vs marker-based) +- [x] 2.2 JSON mode prompt: LLM outputs {"answer": "...", "mindmap": {"nodes": [...]}} +- [x] 2.3 Fallback mode prompt: LLM outputs ... delimiters +- [x] 2.4 Preserve editedByUser nodes via findEditedNodes / mergeEditedNodes +- [x] 2.5 Remove node count, depth, and breadth limits from prompt + +## 3. JSON Mode (response_format) + +- [x] 3.1 Add useJsonMode param to chat() in src/lib/llm-client.ts +- [x] 3.2 Set response_format: { type: "json_object" } when provider supports it +- [x] 3.3 Parse {"answer": "...", "mindmap": {"nodes": [...]}} in ChatPage.tsx +- [x] 3.4 Fallback to marker parsing when JSON mode unavailable +- [x] 3.5 Add trailing comma repair in parseJsonToTree() for robustness + +## 4. Stream Handler (Non-Streaming for Debugging) + +- [x] 4.1 Switch from streamChatWithRetry to chat() (non-streaming) for reliable JSON parsing +- [x] 4.2 Detect markers in fallback mode +- [x] 4.3 Strip knowledge content from displayed chat text +- [x] 4.4 Apply parsed tree to mindmap store with editedByUser preservation +- [x] 4.5 Handle abort: discard partial response + +## 5. Mindmap-as-Context + +- [x] 5.1 Implement mindmapTreeToContext() — serialize tree as Markdown headings +- [x] 5.2 Mark editedByUser nodes with [用户编辑] in context +- [x] 5.3 Combine system prompt + mindmap context into single system message +- [x] 5.4 Include last raw messages for wording nuance + +## 6. Remove Legacy Features + +- [x] 6.1 Remove 5s debounce timer logic from ChatPage.tsx +- [x] 6.2 Remove auto-triggered generateMindmap() call +- [x] 6.3 Remove corpus UI from MindMapPanel.tsx +- [x] 6.4 Remove "更新图谱" button and handleGenerate() +- [x] 6.5 Remove "加入语料库" button from MessageBubble.tsx +- [x] 6.6 Remove generator model settings (generatorProviderId/ModelId) +- [x] 6.7 Remove maxDepth selector and forceFullRebuild checkbox + +## 7. Mindmap Association UX + +- [x] 7.1 NewConversationDialog: support "不关联 / 已有图谱 / 创建新图谱" radio buttons +- [x] 7.2 ChatPage: link conversation to mindmap via addMonitoredConversation on creation +- [x] 7.3 MindMapPanel: show linked conversations list with add/remove +- [x] 7.4 Fix Base UI SelectValue showing ID instead of title + +## 8. Layout & Polish + +- [x] 8.1 Increase dagre spacing: nodesep 100, ranksep 180, edgesep 30, marginx/y 80 +- [x] 8.2 Update MindMapTree empty state text +- [x] 8.3 Remove debug console.log statements (left for development) + +## 9. Testing + +- [x] 9.1 Rewrite mindmap-generator.test.ts for new functions +- [x] 9.2 Remove corpus operation tests from mindmapStore.test.ts +- [x] 9.3 TypeScript strict check clean (npx tsc --noEmit) +- [x] 9.4 All 73 tests passing +- [x] 9.5 ESLint clean diff --git a/package-lock.json b/package-lock.json index ac8eb54..2556de0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -62,65 +62,6 @@ "dev": true, "license": "MIT" }, - "node_modules/@asamuzakjp/css-color": { - "version": "5.1.11", - "resolved": "https://repo.huaweicloud.com/repository/npm/@asamuzakjp/css-color/-/css-color-5.1.11.tgz", - "integrity": "sha512-KVw6qIiCTUQhByfTd78h2yD1/00waTmm9uy/R7Ck/ctUyAPj+AEDLkQIdJW0T8+qGgj3j5bpNKK7Q3G+LedJWg==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@asamuzakjp/generational-cache": "^1.0.1", - "@csstools/css-calc": "^3.2.0", - "@csstools/css-color-parser": "^4.1.0", - "@csstools/css-parser-algorithms": "^4.0.0", - "@csstools/css-tokenizer": "^4.0.0" - }, - "engines": { - "node": "^20.19.0 || ^22.12.0 || >=24.0.0" - } - }, - "node_modules/@asamuzakjp/dom-selector": { - "version": "7.1.1", - "resolved": "https://repo.huaweicloud.com/repository/npm/@asamuzakjp/dom-selector/-/dom-selector-7.1.1.tgz", - "integrity": "sha512-67RZDnYRc8H/8MLDgQCDE//zoqVFwajkepHZgmXrbwybzXOEwOWGPYGmALYl9J2DOLfFPPs6kKCqmbzV895hTQ==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@asamuzakjp/generational-cache": "^1.0.1", - "@asamuzakjp/nwsapi": "^2.3.9", - "bidi-js": "^1.0.3", - "css-tree": "^3.2.1", - "is-potential-custom-element-name": "^1.0.1" - }, - "engines": { - "node": "^20.19.0 || ^22.12.0 || >=24.0.0" - } - }, - "node_modules/@asamuzakjp/generational-cache": { - "version": "1.0.1", - "resolved": "https://repo.huaweicloud.com/repository/npm/@asamuzakjp/generational-cache/-/generational-cache-1.0.1.tgz", - "integrity": "sha512-wajfB8KqzMCN2KGNFdLkReeHncd0AslUSrvHVvvYWuU8ghncRJoA50kT3zP9MVL0+9g4/67H+cdvBskj9THPzg==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "engines": { - "node": "^20.19.0 || ^22.12.0 || >=24.0.0" - } - }, - "node_modules/@asamuzakjp/nwsapi": { - "version": "2.3.9", - "resolved": "https://repo.huaweicloud.com/repository/npm/@asamuzakjp/nwsapi/-/nwsapi-2.3.9.tgz", - "integrity": "sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true - }, "node_modules/@babel/code-frame": { "version": "7.29.0", "resolved": "https://repo.huaweicloud.com/repository/npm/@babel/code-frame/-/code-frame-7.29.0.tgz", @@ -627,173 +568,6 @@ } } }, - "node_modules/@bramus/specificity": { - "version": "2.4.2", - "resolved": "https://repo.huaweicloud.com/repository/npm/@bramus/specificity/-/specificity-2.4.2.tgz", - "integrity": "sha512-ctxtJ/eA+t+6q2++vj5j7FYX3nRu311q1wfYH3xjlLOsczhlhxAg2FWNUXhpGvAw3BWo1xBcvOV6/YLc2r5FJw==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "css-tree": "^3.0.0" - }, - "bin": { - "specificity": "bin/cli.js" - } - }, - "node_modules/@csstools/color-helpers": { - "version": "6.0.2", - "resolved": "https://repo.huaweicloud.com/repository/npm/@csstools/color-helpers/-/color-helpers-6.0.2.tgz", - "integrity": "sha512-LMGQLS9EuADloEFkcTBR3BwV/CGHV7zyDxVRtVDTwdI2Ca4it0CCVTT9wCkxSgokjE5Ho41hEPgb8OEUwoXr6Q==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "optional": true, - "peer": true, - "engines": { - "node": ">=20.19.0" - } - }, - "node_modules/@csstools/css-calc": { - "version": "3.2.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/@csstools/css-calc/-/css-calc-3.2.0.tgz", - "integrity": "sha512-bR9e6o2BDB12jzN/gIbjHa5wLJ4UjD1CB9pM7ehlc0ddk6EBz+yYS1EV2MF55/HUxrHcB/hehAyt5vhsA3hx7w==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "optional": true, - "peer": true, - "engines": { - "node": ">=20.19.0" - }, - "peerDependencies": { - "@csstools/css-parser-algorithms": "^4.0.0", - "@csstools/css-tokenizer": "^4.0.0" - } - }, - "node_modules/@csstools/css-color-parser": { - "version": "4.1.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/@csstools/css-color-parser/-/css-color-parser-4.1.0.tgz", - "integrity": "sha512-U0KhLYmy2GVj6q4T3WaAe6NPuFYCPQoE3b0dRGxejWDgcPp8TP7S5rVdM5ZrFaqu4N67X8YaPBw14dQSYx3IyQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@csstools/color-helpers": "^6.0.2", - "@csstools/css-calc": "^3.2.0" - }, - "engines": { - "node": ">=20.19.0" - }, - "peerDependencies": { - "@csstools/css-parser-algorithms": "^4.0.0", - "@csstools/css-tokenizer": "^4.0.0" - } - }, - "node_modules/@csstools/css-parser-algorithms": { - "version": "4.0.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/@csstools/css-parser-algorithms/-/css-parser-algorithms-4.0.0.tgz", - "integrity": "sha512-+B87qS7fIG3L5h3qwJ/IFbjoVoOe/bpOdh9hAjXbvx0o8ImEmUsGXN0inFOnk2ChCFgqkkGFQ+TpM5rbhkKe4w==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "optional": true, - "peer": true, - "engines": { - "node": ">=20.19.0" - }, - "peerDependencies": { - "@csstools/css-tokenizer": "^4.0.0" - } - }, - "node_modules/@csstools/css-syntax-patches-for-csstree": { - "version": "1.1.3", - "resolved": "https://repo.huaweicloud.com/repository/npm/@csstools/css-syntax-patches-for-csstree/-/css-syntax-patches-for-csstree-1.1.3.tgz", - "integrity": "sha512-SH60bMfrRCJF3morcdk57WklujF4Jr/EsQUzqkarfHXEFcAR1gg7fS/chAE922Sehgzc1/+Tz5H3Ypa1HiEKrg==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "optional": true, - "peer": true, - "peerDependencies": { - "css-tree": "^3.2.1" - }, - "peerDependenciesMeta": { - "css-tree": { - "optional": true - } - } - }, - "node_modules/@csstools/css-tokenizer": { - "version": "4.0.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/@csstools/css-tokenizer/-/css-tokenizer-4.0.0.tgz", - "integrity": "sha512-QxULHAm7cNu72w97JUNCBFODFaXpbDg+dP8b/oWFAZ2MTRppA3U00Y2L1HqaS4J6yBqxwa/Y3nMBaxVKbB/NsA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "optional": true, - "peer": true, - "engines": { - "node": ">=20.19.0" - } - }, "node_modules/@dagrejs/dagre": { "version": "3.0.0", "resolved": "https://repo.huaweicloud.com/repository/npm/@dagrejs/dagre/-/dagre-3.0.0.tgz", @@ -1600,26 +1374,6 @@ "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, - "node_modules/@exodus/bytes": { - "version": "1.15.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/@exodus/bytes/-/bytes-1.15.0.tgz", - "integrity": "sha512-UY0nlA+feH81UGSHv92sLEPLCeZFjXOuHhrIo0HQydScuQc8s0A7kL/UdgwgDq8g8ilksmuoF35YVTNphV2aBQ==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "engines": { - "node": "^20.19.0 || ^22.12.0 || >=24.0.0" - }, - "peerDependencies": { - "@noble/hashes": "^1.8.0 || ^2.0.0" - }, - "peerDependenciesMeta": { - "@noble/hashes": { - "optional": true - } - } - }, "node_modules/@floating-ui/core": { "version": "1.7.5", "resolved": "https://repo.huaweicloud.com/repository/npm/@floating-ui/core/-/core-1.7.5.tgz", @@ -3657,18 +3411,6 @@ "node": ">=6.0.0" } }, - "node_modules/bidi-js": { - "version": "1.0.3", - "resolved": "https://repo.huaweicloud.com/repository/npm/bidi-js/-/bidi-js-1.0.3.tgz", - "integrity": "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "require-from-string": "^2.0.2" - } - }, "node_modules/body-parser": { "version": "2.2.2", "resolved": "https://repo.huaweicloud.com/repository/npm/body-parser/-/body-parser-2.2.2.tgz", @@ -4196,22 +3938,6 @@ "node": ">= 8" } }, - "node_modules/css-tree": { - "version": "3.2.1", - "resolved": "https://repo.huaweicloud.com/repository/npm/css-tree/-/css-tree-3.2.1.tgz", - "integrity": "sha512-X7sjQzceUhu1u7Y/ylrRZFU2FS6LRiFVp6rKLPg23y3x3c3DOKAwuXGDp+PAGjh6CSnCjYeAul8pcT8bAl+lSA==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "mdn-data": "2.27.1", - "source-map-js": "^1.2.1" - }, - "engines": { - "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" - } - }, "node_modules/css.escape": { "version": "1.5.1", "resolved": "https://repo.huaweicloud.com/repository/npm/css.escape/-/css.escape-1.5.1.tgz", @@ -4351,22 +4077,6 @@ "node": ">= 12" } }, - "node_modules/data-urls": { - "version": "7.0.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/data-urls/-/data-urls-7.0.0.tgz", - "integrity": "sha512-23XHcCF+coGYevirZceTVD7NdJOqVn+49IHyxgszm+JIiHLoB2TkmPtsYkNWT1pvRSGkc35L6NHs0yHkN2SumA==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "whatwg-mimetype": "^5.0.0", - "whatwg-url": "^16.0.0" - }, - "engines": { - "node": "^20.19.0 || ^22.12.0 || >=24.0.0" - } - }, "node_modules/debug": { "version": "4.4.3", "resolved": "https://repo.huaweicloud.com/repository/npm/debug/-/debug-4.4.3.tgz", @@ -4384,15 +4094,6 @@ } } }, - "node_modules/decimal.js": { - "version": "10.6.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/decimal.js/-/decimal.js-10.6.0.tgz", - "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true - }, "node_modules/decode-named-character-reference": { "version": "1.3.0", "resolved": "https://repo.huaweicloud.com/repository/npm/decode-named-character-reference/-/decode-named-character-reference-1.3.0.tgz", @@ -4616,21 +4317,6 @@ "node": ">=10.13.0" } }, - "node_modules/entities": { - "version": "8.0.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/entities/-/entities-8.0.0.tgz", - "integrity": "sha512-zwfzJecQ/Uej6tusMqwAqU/6KL2XaB2VZ2Jg54Je6ahNBGNH6Ek6g3jjNCF0fG9EWQKGZNddNjU5F1ZQn/sBnA==", - "dev": true, - "license": "BSD-2-Clause", - "optional": true, - "peer": true, - "engines": { - "node": ">=20.19.0" - }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, "node_modules/env-paths": { "version": "2.2.1", "resolved": "https://repo.huaweicloud.com/repository/npm/env-paths/-/env-paths-2.2.1.tgz", @@ -5851,21 +5537,6 @@ "node": ">=16.9.0" } }, - "node_modules/html-encoding-sniffer": { - "version": "6.0.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/html-encoding-sniffer/-/html-encoding-sniffer-6.0.0.tgz", - "integrity": "sha512-CV9TW3Y3f8/wT0BRFc1/KAVQ3TUHiXmaAb6VW9vtiMFf7SLoMd1PdAc4W3KFOFETBJUb90KatHqlsZMWV+R9Gg==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@exodus/bytes": "^1.6.0" - }, - "engines": { - "node": "^20.19.0 || ^22.12.0 || >=24.0.0" - } - }, "node_modules/html-to-image": { "version": "1.11.11", "resolved": "https://repo.huaweicloud.com/repository/npm/html-to-image/-/html-to-image-1.11.11.tgz", @@ -6197,15 +5868,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/is-potential-custom-element-name": { - "version": "1.0.1", - "resolved": "https://repo.huaweicloud.com/repository/npm/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", - "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true - }, "node_modules/is-promise": { "version": "4.0.0", "resolved": "https://repo.huaweicloud.com/repository/npm/is-promise/-/is-promise-4.0.0.tgz", @@ -6308,61 +5970,6 @@ "js-yaml": "bin/js-yaml.js" } }, - "node_modules/jsdom": { - "version": "29.1.1", - "resolved": "https://repo.huaweicloud.com/repository/npm/jsdom/-/jsdom-29.1.1.tgz", - "integrity": "sha512-ECi4Fi2f7BdJtUKTflYRTiaMxIB0O6zfR1fX0GXpUrf6flp8QIYn1UT20YQqdSOfk2dfkCwS8LAFoJDEppNK5Q==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@asamuzakjp/css-color": "^5.1.11", - "@asamuzakjp/dom-selector": "^7.1.1", - "@bramus/specificity": "^2.4.2", - "@csstools/css-syntax-patches-for-csstree": "^1.1.3", - "@exodus/bytes": "^1.15.0", - "css-tree": "^3.2.1", - "data-urls": "^7.0.0", - "decimal.js": "^10.6.0", - "html-encoding-sniffer": "^6.0.0", - "is-potential-custom-element-name": "^1.0.1", - "lru-cache": "^11.3.5", - "parse5": "^8.0.1", - "saxes": "^6.0.0", - "symbol-tree": "^3.2.4", - "tough-cookie": "^6.0.1", - "undici": "^7.25.0", - "w3c-xmlserializer": "^5.0.0", - "webidl-conversions": "^8.0.1", - "whatwg-mimetype": "^5.0.0", - "whatwg-url": "^16.0.1", - "xml-name-validator": "^5.0.0" - }, - "engines": { - "node": "^20.19.0 || ^22.13.0 || >=24.0.0" - }, - "peerDependencies": { - "canvas": "^3.0.0" - }, - "peerDependenciesMeta": { - "canvas": { - "optional": true - } - } - }, - "node_modules/jsdom/node_modules/lru-cache": { - "version": "11.3.5", - "resolved": "https://repo.huaweicloud.com/repository/npm/lru-cache/-/lru-cache-11.3.5.tgz", - "integrity": "sha512-NxVFwLAnrd9i7KUBxC4DrUhmgjzOs+1Qm50D3oF1/oL+r1NpZ4gA7xvG0/zJ8evR7zIKn4vLf7qTNduWFtCrRw==", - "dev": true, - "license": "BlueOak-1.0.0", - "optional": true, - "peer": true, - "engines": { - "node": "20 || >=22" - } - }, "node_modules/jsesc": { "version": "3.1.0", "resolved": "https://repo.huaweicloud.com/repository/npm/jsesc/-/jsesc-3.1.0.tgz", @@ -7134,15 +6741,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/mdn-data": { - "version": "2.27.1", - "resolved": "https://repo.huaweicloud.com/repository/npm/mdn-data/-/mdn-data-2.27.1.tgz", - "integrity": "sha512-9Yubnt3e8A0OKwxYSXyhLymGW4sCufcLG6VdiDdUGVkPhpqLxlvP5vl1983gQjJl3tqbrM731mjaZaP68AgosQ==", - "dev": true, - "license": "CC0-1.0", - "optional": true, - "peer": true - }, "node_modules/media-typer": { "version": "1.1.0", "resolved": "https://repo.huaweicloud.com/repository/npm/media-typer/-/media-typer-1.1.0.tgz", @@ -8299,21 +7897,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/parse5": { - "version": "8.0.1", - "resolved": "https://repo.huaweicloud.com/repository/npm/parse5/-/parse5-8.0.1.tgz", - "integrity": "sha512-z1e/HMG90obSGeidlli3hj7cbocou0/wa5HacvI3ASx34PecNjNQeaHNo5WIZpWofN9kgkqV1q5YvXe3F0FoPw==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "entities": "^8.0.0" - }, - "funding": { - "url": "https://github.com/inikulin/parse5?sponsor=1" - } - }, "node_modules/parseurl": { "version": "1.3.3", "resolved": "https://repo.huaweicloud.com/repository/npm/parseurl/-/parseurl-1.3.3.tgz", @@ -9043,21 +8626,6 @@ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", "license": "MIT" }, - "node_modules/saxes": { - "version": "6.0.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/saxes/-/saxes-6.0.0.tgz", - "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==", - "dev": true, - "license": "ISC", - "optional": true, - "peer": true, - "dependencies": { - "xmlchars": "^2.2.0" - }, - "engines": { - "node": ">=v12.22.7" - } - }, "node_modules/scheduler": { "version": "0.23.2", "resolved": "https://repo.huaweicloud.com/repository/npm/scheduler/-/scheduler-0.23.2.tgz", @@ -9516,15 +9084,6 @@ "node": ">=8" } }, - "node_modules/symbol-tree": { - "version": "3.2.4", - "resolved": "https://repo.huaweicloud.com/repository/npm/symbol-tree/-/symbol-tree-3.2.4.tgz", - "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true - }, "node_modules/tagged-tag": { "version": "1.0.0", "resolved": "https://repo.huaweicloud.com/repository/npm/tagged-tag/-/tagged-tag-1.0.0.tgz", @@ -9666,21 +9225,6 @@ "node": ">=16" } }, - "node_modules/tr46": { - "version": "6.0.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/tr46/-/tr46-6.0.0.tgz", - "integrity": "sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "punycode": "^2.3.1" - }, - "engines": { - "node": ">=20" - } - }, "node_modules/trim-lines": { "version": "3.0.1", "resolved": "https://repo.huaweicloud.com/repository/npm/trim-lines/-/trim-lines-3.0.1.tgz", @@ -9846,18 +9390,6 @@ "node": ">=20" } }, - "node_modules/undici": { - "version": "7.25.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/undici/-/undici-7.25.0.tgz", - "integrity": "sha512-xXnp4kTyor2Zq+J1FfPI6Eq3ew5h6Vl0F/8d9XU5zZQf1tX9s2Su1/3PiMmUANFULpmksxkClamIZcaUqryHsQ==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "engines": { - "node": ">=20.18.1" - } - }, "node_modules/undici-types": { "version": "7.19.2", "resolved": "https://repo.huaweicloud.com/repository/npm/undici-types/-/undici-types-7.19.2.tgz", @@ -10269,21 +9801,6 @@ } } }, - "node_modules/w3c-xmlserializer": { - "version": "5.0.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz", - "integrity": "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "xml-name-validator": "^5.0.0" - }, - "engines": { - "node": ">=18" - } - }, "node_modules/web-streams-polyfill": { "version": "3.3.3", "resolved": "https://repo.huaweicloud.com/repository/npm/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", @@ -10293,47 +9810,6 @@ "node": ">= 8" } }, - "node_modules/webidl-conversions": { - "version": "8.0.1", - "resolved": "https://repo.huaweicloud.com/repository/npm/webidl-conversions/-/webidl-conversions-8.0.1.tgz", - "integrity": "sha512-BMhLD/Sw+GbJC21C/UgyaZX41nPt8bUTg+jWyDeg7e7YN4xOM05YPSIXceACnXVtqyEw/LMClUQMtMZ+PGGpqQ==", - "dev": true, - "license": "BSD-2-Clause", - "optional": true, - "peer": true, - "engines": { - "node": ">=20" - } - }, - "node_modules/whatwg-mimetype": { - "version": "5.0.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/whatwg-mimetype/-/whatwg-mimetype-5.0.0.tgz", - "integrity": "sha512-sXcNcHOC51uPGF0P/D4NVtrkjSU2fNsm9iog4ZvZJsL3rjoDAzXZhkm2MWt1y+PUdggKAYVoMAIYcs78wJ51Cw==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "engines": { - "node": ">=20" - } - }, - "node_modules/whatwg-url": { - "version": "16.0.1", - "resolved": "https://repo.huaweicloud.com/repository/npm/whatwg-url/-/whatwg-url-16.0.1.tgz", - "integrity": "sha512-1to4zXBxmXHV3IiSSEInrreIlu02vUOvrhxJJH5vcxYTBDAx51cqZiKdyTxlecdKNSjj8EcxGBxNf6Vg+945gw==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@exodus/bytes": "^1.11.0", - "tr46": "^6.0.0", - "webidl-conversions": "^8.0.1" - }, - "engines": { - "node": "^20.19.0 || ^22.12.0 || >=24.0.0" - } - }, "node_modules/which": { "version": "4.0.0", "resolved": "https://repo.huaweicloud.com/repository/npm/which/-/which-4.0.0.tgz", @@ -10478,27 +9954,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/xml-name-validator": { - "version": "5.0.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/xml-name-validator/-/xml-name-validator-5.0.0.tgz", - "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==", - "dev": true, - "license": "Apache-2.0", - "optional": true, - "peer": true, - "engines": { - "node": ">=18" - } - }, - "node_modules/xmlchars": { - "version": "2.2.0", - "resolved": "https://repo.huaweicloud.com/repository/npm/xmlchars/-/xmlchars-2.2.0.tgz", - "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true - }, "node_modules/y18n": { "version": "5.0.8", "resolved": "https://repo.huaweicloud.com/repository/npm/y18n/-/y18n-5.0.8.tgz", diff --git a/src/features/chat/ChatPage.tsx b/src/features/chat/ChatPage.tsx index 1f3adfa..02ec831 100644 --- a/src/features/chat/ChatPage.tsx +++ b/src/features/chat/ChatPage.tsx @@ -1,5 +1,4 @@ -import { useState, useCallback, useRef } from 'react' -import { toast } from 'sonner' +import { useState, useCallback } from 'react' import { MessageSquare, Settings, PanelLeft, X, Network, Archive } from 'lucide-react' import { Button } from '@/components/ui/button' import { Badge } from '@/components/ui/badge' @@ -8,8 +7,10 @@ import { useConversationStore } from '@/stores/conversationStore' import { useProviderStore } from '@/stores/providerStore' import { useChatStore } from '@/stores/chatStore' import { useMindmapStore } from '@/stores/mindmapStore' -import { createClient, streamChatWithRetry, isAbortError } from '@/lib/llm-client' -import { generateMindmap, parseJsonToTree } from '@/lib/mindmap-generator' +import { createClient, chat, isAbortError } from '@/lib/llm-client' +import { buildFullMindmapPrompt, mindmapTreeToContext, parseJsonToTree, findEditedNodes, mergeEditedNodes } from '@/lib/mindmap-generator' +import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions' +import type { MindMapNode } from '@/types/mindmap' import ConversationSidebar, { ConversationSettingsDialog, } from '@/features/conversation/ConversationSidebar' @@ -26,14 +27,28 @@ import { generateId } from '@/lib/id' type View = 'chat' | 'providers' export default function ChatPage() { + const updateMindmapForConversation = useCallback( + (newTree: MindMapNode[], convId: string) => { + if (newTree.length === 0) return + const allMindmaps = useMindmapStore.getState().mindmaps + for (const mm of allMindmaps) { + if (!mm.monitoredConversationIds?.includes(convId)) continue + const editedNodes = findEditedNodes(mm.tree) + const merged = + editedNodes.length > 0 + ? mergeEditedNodes(newTree, editedNodes) + : newTree + useMindmapStore.getState().updateMindmapTree(mm.id, merged) + } + }, + [], + ) const [view, setView] = useState('chat') const [settingsOpen, setSettingsOpen] = useState(false) const [sidebarOpen, setSidebarOpen] = useState(true) const [newConvDialogOpen, setNewConvDialogOpen] = useState(false) const [mindmapCollapsed, setMindmapCollapsed] = useState(false) - const genTimersRef = useRef>>(new Map()) - const conversations = useConversationStore((s) => s.conversations) const activeConversationId = useConversationStore((s) => s.activeConversationId) const addConversation = useConversationStore((s) => s.addConversation) @@ -97,113 +112,94 @@ export default function ChatPage() { .concat(userMsg) .map((m) => ({ role: m.role as 'user' | 'assistant' | 'system', content: m.content })) - if (conv.systemPrompt) { - history.unshift({ role: 'system', content: conv.systemPrompt }) - } + const useJsonMode = prov.supportsJsonMode === true - let fullContent = '' + const effectiveSystemPrompt = conv.systemPrompt + ? `${conv.systemPrompt}\n\n${buildFullMindmapPrompt(useJsonMode)}` + : buildFullMindmapPrompt(useJsonMode) - for await (const chunk of streamChatWithRetry( - client, - { - model: conv.modelId, - messages: history, - signal: controller.signal, - }, - 1, - )) { - fullContent += chunk - updateMessageInConversation(conversationId, assistantMsg.id, { content: fullContent }) + const monitoredMindmap = useMindmapStore + .getState() + .mindmaps.find( + (m) => m.monitoredConversationIds?.includes(conversationId) && m.tree.length > 0, + ) + + let systemContent = effectiveSystemPrompt + if (monitoredMindmap) { + const treeCtx = mindmapTreeToContext(monitoredMindmap.tree) + if (treeCtx) { + systemContent += '\n\n' + treeCtx + } } - updateMessageInConversation(conversationId, assistantMsg.id, { - content: fullContent, - status: 'complete', - }) + const messages: ChatCompletionMessageParam[] = [ + { role: 'system', content: systemContent }, + ...history as ChatCompletionMessageParam[], + ] - if (controller.signal.aborted) return + let displayContent = '' + let accumulated = '' + + try { + const responseText = await chat(client, { + model: conv.modelId, + messages, + signal: controller.signal, + useJsonMode, + }) - const allMindmaps = useMindmapStore.getState().mindmaps - for (const mm of allMindmaps) { - if (!mm.monitoredConversationIds?.includes(conversationId)) continue + accumulated = responseText - const entry = { - id: generateId(), - messageId: assistantMsg.id, - enabled: true, - addedAt: Date.now(), - } - useMindmapStore.getState().addCorpusEntry(mm.id, entry) - - const timers = genTimersRef.current - const existing = timers.get(mm.id) - if (existing) clearTimeout(existing) - - timers.set( - mm.id, - setTimeout(async () => { - const stillGenerating = useChatStore.getState().isGenerating - if (stillGenerating) return - - const mindmapState = useMindmapStore.getState().mindmaps.find((m) => m.id === mm.id) - if (!mindmapState) return - - const linkedConvs = useConversationStore - .getState() - .conversations.filter((c) => mindmapState.monitoredConversationIds?.includes(c.id)) - - const generatorProvId = mindmapState.generatorProviderId ?? linkedConvs[0]?.providerId - const generatorModel = mindmapState.generatorModelId ?? linkedConvs[0]?.modelId - if (!generatorProvId || !generatorModel) return - - const prov = useProviderStore - .getState() - .providers.find((p) => p.id === generatorProvId) - if (!prov) return - - let toastId: string | number | undefined - try { - toastId = toast.loading(`正在为图谱「${mindmapState.title}」自动生成...`) - const client = createClient(prov) - let fullGenContent = '' - - for await (const chunk of generateMindmap( - client, - mindmapState, - mindmapState.corpus, - linkedConvs, - generatorModel, - undefined, - )) { - if (typeof chunk === 'object' && chunk !== null && 'sourceMap' in chunk) continue - fullGenContent += chunk as string - } + if (useJsonMode) { + try { + const parsed = JSON.parse(accumulated) as { answer?: string; mindmap?: { nodes?: unknown[] } } + displayContent = parsed.answer ?? accumulated - const tree = parseJsonToTree(fullGenContent) - useMindmapStore.getState().updateMindmapTree(mm.id, tree) - toast.success(`图谱「${mindmapState.title}」已自动更新`, { - id: toastId, - duration: Infinity, - cancel: { label: '✕', onClick: () => {} }, - }) - } catch { - if (toastId !== undefined) { - toast.error(`图谱「${mindmapState.title}」自动生成失败`, { - id: toastId, - duration: Infinity, - cancel: { label: '✕', onClick: () => {} }, - }) - } else { - toast.error(`图谱「${mindmapState.title}」自动生成失败`, { - duration: Infinity, - cancel: { label: '✕', onClick: () => {} }, - }) + if (parsed.mindmap?.nodes && Array.isArray(parsed.mindmap.nodes)) { + const mindmapJson = JSON.stringify({ nodes: parsed.mindmap.nodes }) + const newTree = parseJsonToTree(mindmapJson) + updateMindmapForConversation(newTree, conversationId) + } + } catch (jsonErr) { + console.error('[mindmap] JSON mode parse failed:', jsonErr) + displayContent = accumulated + } + } else { + // Fallback: marker mode + const idx = accumulated.indexOf('') + console.log('[mindmap] marker found:', idx !== -1) + if (idx !== -1) { + const mindmapStart = idx + displayContent = accumulated.slice(0, idx).replace(/```\w*\s*$/, '') + const mEnd = accumulated.indexOf('', mindmapStart + 1) + if (mEnd !== -1) { + const jsonStr = accumulated.slice( + mindmapStart + ''.length, + mEnd, + ) + console.log('[mindmap] jsonStr length:', jsonStr.length) + try { + const newTree = parseJsonToTree(jsonStr) + updateMindmapForConversation(newTree, conversationId) + } catch (err) { + console.error('[mindmap] parse failed:', err) } - // silently fail for auto-generation + displayContent += accumulated.slice(mEnd + ''.length).replace(/^\s*```\w*\s*/gm, '') } - }, 5000), - ) + } else { + displayContent = accumulated + } + } + } finally { + if (displayContent) { + updateMessageInConversation(conversationId, assistantMsg.id, { + content: displayContent, + status: 'complete', + }) + } } + + if (controller.signal.aborted) return } catch (err: unknown) { if (isAbortError(err)) { updateMessageInConversation(conversationId, assistantMsg.id, { status: 'complete' }) @@ -266,10 +262,21 @@ export default function ChatPage() { const p = providers[0] if (!p) return const modelId = p.models.find((m) => m.enabled)?.id ?? p.models[0]?.id ?? '' - addConversation({ + const conv = addConversation({ providerId: p.id, modelId, }) + + const mindmapId = _result.mindmapId + if (mindmapId) { + useMindmapStore.getState().addMonitoredConversation(mindmapId, conv.id) + } + + if (_result.newMindmapTitle) { + const mm = useMindmapStore.getState().addMindmap(_result.newMindmapTitle) + useMindmapStore.getState().addMonitoredConversation(mm.id, conv.id) + } + setView('chat') } diff --git a/src/features/chat/MessageBubble.tsx b/src/features/chat/MessageBubble.tsx index 7855cdb..929a891 100644 --- a/src/features/chat/MessageBubble.tsx +++ b/src/features/chat/MessageBubble.tsx @@ -1,11 +1,8 @@ -import { useRef } from 'react' -import { Copy, RefreshCw, BookmarkPlus } from 'lucide-react' +import { Copy, RefreshCw } from 'lucide-react' import Markdown from 'react-markdown' import remarkGfm from 'remark-gfm' import Avatar from '@/components/Avatar' -import { useMindmapStore } from '@/stores/mindmapStore' import type { Message } from '@/types/message' -import type { CorpusEntry } from '@/types/mindmap' interface MessageBubbleProps { message: Message @@ -18,8 +15,6 @@ export default function MessageBubble({ message, onRegenerate }: MessageBubblePr const isStreaming = message.status === 'streaming' const isError = message.status === 'error' - const pendingSelectionRef = useRef(null) - const time = new Date(message.createdAt).toLocaleTimeString('zh-CN', { hour: '2-digit', minute: '2-digit', @@ -29,45 +24,6 @@ export default function MessageBubble({ message, onRegenerate }: MessageBubblePr navigator.clipboard.writeText(message.content) } - const handleCorpusMouseDown = () => { - const selection = window.getSelection() - const text = selection && selection.toString().trim() - pendingSelectionRef.current = text && text.length > 0 ? text : null - } - - const handleAddToCorpus = () => { - const activeMindmapId = useMindmapStore.getState().activeMindmapId - if (!activeMindmapId) return - - const selectedText = pendingSelectionRef.current - pendingSelectionRef.current = null - - const mindmap = useMindmapStore.getState().mindmaps.find((m) => m.id === activeMindmapId) - if (!selectedText && mindmap) { - const alreadyAdded = (mindmap.corpus ?? []).some((e) => e.messageId === message.id) - if (alreadyAdded) return - } - - let range: { start: number; end: number } | undefined - if (selectedText) { - const start = message.content.indexOf(selectedText) - if (start !== -1) { - range = { start, end: start + selectedText.length } - } - } - - const entry: CorpusEntry = { - id: crypto.randomUUID(), - messageId: message.id, - selectedText: selectedText || undefined, - range, - enabled: true, - addedAt: Date.now(), - } - - useMindmapStore.getState().addCorpusEntry(activeMindmapId, entry) - } - return (
@@ -112,16 +68,6 @@ export default function MessageBubble({ message, onRegenerate }: MessageBubblePr > - {!isUser && message.status === 'complete' && ( - - )} {!isUser && onRegenerate && ( - + + {countNodes(activeMindmap.tree)} 节点 + - + 导出 @@ -392,13 +153,6 @@ export default function MindMapPanel({ onClose }: MindMapPanelProps) { -
)} @@ -407,422 +161,82 @@ export default function MindMapPanel({ onClose }: MindMapPanelProps) {
- - {corpusOpen && ( -
- {(() => { - const msgConvMap = new Map() - for (const c of conversations) { - for (const m of c.messages) { - msgConvMap.set(m.id, c) + 关联会话 ({linkedConversations.length}) + {activeConvId && ( + - -
- ) - })} -
- )) - })()} - - )} - - )} - - {generating && progressText && ( -
- {progressText} -
- )} - - {generating && reasoningContent && ( -
- )} - AI 思考过程 - {reasoningOpen && ( -
- {reasoningContent} + {linkedOpen && ( +
+ {linkedConversations.length === 0 ? ( +

+ 未关联任何会话,打开一个会话后在右侧面板点击「关联当前」 +

+ ) : ( + linkedConversations.map((c) => ( +
+ + {c.title} + {c.id === activeConvId && ( + (当前) + )} + + +
+ )) + )}
)}
)} handleGenerate()} - /> - - {validationWarnings.length > 0 && !generating && ( -
-

- 质量提醒 ({validationWarnings.length}) -

-
- {validationWarnings.map((w, i) => ( -
- {w.message} -
- ))} -
-
- )} - - { - if (activeMindmapId) { - updateMindmapSettings(activeMindmapId, settings) - } - }} + isGenerating={false} + isStreaming={false} + error={null} /> - - { - if (!open) setCorpusDeleteEntry(null) - }} - > - - - 确认删除 - -

确认删除此条语料?

- - - - -
-
) } -function MindmapSettingsDialog({ - open, - onOpenChange, - mindmap, - onSave, -}: { - open: boolean - onOpenChange: (open: boolean) => void - mindmap: ReturnType['mindmaps'][number] | null - onSave: (settings: { - generatorProviderId?: string - generatorModelId?: string - maxDepth?: number - forceFullRebuild?: boolean - }) => void -}) { - const { providers } = useProviderStore() - const { conversations } = useConversationStore() - const [useCustom, setUseCustom] = useState( - !!mindmap?.generatorProviderId && !!mindmap?.generatorModelId, - ) - const [selectedProviderId, setSelectedProviderId] = useState( - mindmap?.generatorProviderId ?? providers[0]?.id ?? '', - ) - const [selectedModelId, setSelectedModelId] = useState(mindmap?.generatorModelId ?? '') - const [monitoredIds, setMonitoredIds] = useState( - mindmap?.monitoredConversationIds ?? [], - ) - const [maxDepth, setMaxDepth] = useState(mindmap?.maxDepth ?? 3) - const [forceFullRebuild, setForceFullRebuild] = useState(mindmap?.forceFullRebuild ?? false) - - const selectedProvider = providers.find((p) => p.id === selectedProviderId) - const enabledModels = selectedProvider?.models.filter((m) => m.enabled) ?? [] - - const handleSave = () => { - onSave({ - maxDepth, - forceFullRebuild, - ...(useCustom && selectedProviderId && selectedModelId - ? { generatorProviderId: selectedProviderId, generatorModelId: selectedModelId } - : { generatorProviderId: undefined, generatorModelId: undefined }), - }) - - // Sync monitored conversations - if (mindmap) { - const original = mindmap.monitoredConversationIds ?? [] - const store = useMindmapStore.getState() - for (const id of original) { - if (!monitoredIds.includes(id)) { - store.removeMonitoredConversation(mindmap.id, id) - } - } - for (const id of monitoredIds) { - if (!original.includes(id)) { - store.addMonitoredConversation(mindmap.id, id) - } - } - } - - onOpenChange(false) +function countNodes(nodes: import('@/types/mindmap').MindMapNode[]): number { + let count = nodes.length + for (const node of nodes) { + count += countNodes(node.children) } - - return ( - - - - 图谱生成设置 - -
- -

不勾选时使用当前对话的模型生成图谱

- - {useCustom && ( -
-
- - -
-
- - -
-
- )} -
-
- - -
-
- -

- 勾选后每次生成都会重新构建完整图谱而非增量更新 -

-
-
- -
- {conversations.length === 0 ? ( -

暂无对话

- ) : ( - conversations.map((c) => ( - - )) - )} -
-
- - - - -
-
- ) + return count } diff --git a/src/features/mindmap/MindMapTree.tsx b/src/features/mindmap/MindMapTree.tsx index 72cb5db..777d575 100644 --- a/src/features/mindmap/MindMapTree.tsx +++ b/src/features/mindmap/MindMapTree.tsx @@ -232,7 +232,7 @@ export default function MindMapTree({

此图谱暂无内容

-

关联对话后点击「更新图谱」从对话中生成内容

+

关联会话后脑图随对话自动生长

) } diff --git a/src/lib/__tests__/mindmap-generator.test.ts b/src/lib/__tests__/mindmap-generator.test.ts index 8c3442b..9151503 100644 --- a/src/lib/__tests__/mindmap-generator.test.ts +++ b/src/lib/__tests__/mindmap-generator.test.ts @@ -1,917 +1,184 @@ import { describe, it, expect } from 'vitest' import { + buildFullMindmapPrompt, parseMarkdownToTree, - treeToMarkdown, - buildMindmapPrompt, - buildSystemPrompt, - countNodes, - maxTreeDepth, - validateTree, - collectCorpusContent, - deriveNodeId, - buildIncrementalPrompt, - parseOperations, - buildEditedNodeIdSet, - applyOperations, + parseJsonToTree, + findEditedNodes, + mergeEditedNodes, + mindmapTreeToContext, + buildHybridContext, } from '../mindmap-generator' -import type { Conversation, Message } from '@/types' -import type { CorpusEntry, MindMapNode, IncrementalOperation } from '@/types/mindmap' - -function makeConversation(title: string, messages: Message[]): Conversation { +import type { MindMapNode } from '@/types/mindmap' + +function makeNode( + id: string, + label: string, + children: MindMapNode[] = [], + edited = false, +): MindMapNode { return { - id: crypto.randomUUID(), - title, - providerId: 'p1', - modelId: 'gpt-4', - systemPrompt: '', - messages, - createdAt: Date.now(), - updatedAt: Date.now(), + id, + label, + summary: '', + children, + sourceConversationIds: [], + sourceExcerpts: {}, + editedByUser: edited, } } -function makeMessage(role: 'user' | 'assistant', content: string): Message { - return { - id: crypto.randomUUID(), - role, - content, - createdAt: Date.now(), - status: 'complete', - } -} +describe('buildFullMindmapPrompt', () => { + it('contains mindmap delimiter markers', () => { + const prompt = buildFullMindmapPrompt() + expect(prompt).toContain('') + expect(prompt).toContain('') + }) + + it('contains JSON schema instructions', () => { + const prompt = buildFullMindmapPrompt() + expect(prompt).toContain('"nodes"') + expect(prompt).toContain('"label"') + }) + + it('mentions user-edited nodes preservation', () => { + const prompt = buildFullMindmapPrompt() + expect(prompt).toContain('细化它') + }) +}) describe('parseMarkdownToTree', () => { it('parses a single root node', () => { - const md = `# React\nA JavaScript library for building user interfaces.` + const md = '# React' const tree = parseMarkdownToTree(md) expect(tree).toHaveLength(1) expect(tree[0]!.label).toBe('React') - expect(tree[0]!.children).toEqual([]) }) it('parses multiple levels', () => { - const md = `# React\n\n## useState —— React 中最基础的状态 Hook\n用于函数组件的状态管理\n\n### Lazy Initialization\n传入函数避免重复计算\n\n## useEffect` + const md = '# React\n## Hooks\n### useState' const tree = parseMarkdownToTree(md) expect(tree).toHaveLength(1) - expect(tree[0]!.label).toBe('React') - expect(tree[0]!.children).toHaveLength(2) - - const useStateNode = tree[0]!.children[0]! - expect(useStateNode.label).toBe('useState') - expect(useStateNode.summary).toBe('React 中最基础的状态 Hook') - expect(useStateNode.children).toHaveLength(1) - expect(useStateNode.children[0]!.label).toBe('Lazy Initialization') - }) - - it('parses node label without separator', () => { - const md = `# React\n## Hooks` - const tree = parseMarkdownToTree(md) - expect(tree[0]!.children[0]!.label).toBe('Hooks') - expect(tree[0]!.children[0]!.summary).toBe('') - }) - - it('handles empty markdown', () => { - const tree = parseMarkdownToTree('') - expect(tree).toEqual([]) - }) - - it('handles non-header text gracefully', () => { - const md = `Just some text without headers.\nMore text.` - const tree = parseMarkdownToTree(md) - expect(tree).toEqual([]) - }) - - it('limits depth to 3 levels', () => { - const md = `# A\n## B\n### C\n#### D` - const tree = parseMarkdownToTree(md, undefined, 3) expect(tree[0]!.children).toHaveLength(1) expect(tree[0]!.children[0]!.children).toHaveLength(1) - expect(tree[0]!.children[0]!.children[0]!.children).toEqual([]) }) - it('allows depth 4 when maxDepth=4', () => { - const md = `# A\n## B\n### C\n#### D` - const tree = parseMarkdownToTree(md, undefined, 4) - expect(tree[0]!.children[0]!.children[0]!.children).toHaveLength(1) - expect(tree[0]!.children[0]!.children[0]!.children[0]!.label).toBe('D') - }) - - it('defaults to depth 3 when maxDepth not specified', () => { - const md = `# A\n## B\n### C\n#### D` + it('handles separator —— for summary', () => { + const md = '# React —— A UI library\n## Hooks —— Side effects' const tree = parseMarkdownToTree(md) - expect(tree[0]!.children[0]!.children[0]!.children).toEqual([]) + expect(tree[0]!.label).toBe('React') + expect(tree[0]!.summary).toBe('A UI library') + expect(tree[0]!.children[0]!.label).toBe('Hooks') + expect(tree[0]!.children[0]!.summary).toBe('Side effects') }) +}) - it('limits children to 10 per node', () => { - const md = Array.from({ length: 15 }, (_, i) => `# Root\n## Item${i}`).join('\n') - const tree = parseMarkdownToTree(md) - expect(tree[0]!.children.length).toBeLessThanOrEqual(10) +describe('parseJsonToTree', () => { + it('parses valid JSON with nodes', () => { + const json = JSON.stringify({ + nodes: [ + { label: 'React', summary: 'UI library', children: [] }, + ], + }) + const tree = parseJsonToTree(json) + expect(tree).toHaveLength(1) + expect(tree[0]!.label).toBe('React') }) -}) -describe('treeToMarkdown', () => { - it('converts a tree back to markdown', () => { - const node: MindMapNode = { - id: '1', - label: 'React', - summary: '', - children: [ + it('parses nested JSON', () => { + const json = JSON.stringify({ + nodes: [ { - id: '2', - label: 'useState', - summary: 'A basic hook', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, + label: 'React', + summary: '', + children: [ + { label: 'Hooks', summary: '', children: [] }, + ], }, ], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - } - const md = treeToMarkdown([node]) - expect(md).toContain('# React') - expect(md).toContain('## useState') - expect(md).toContain('A basic hook') - }) - - it('handles empty tree', () => { - const md = treeToMarkdown([]) - expect(md).toBe('') - }) -}) - -describe('buildMindmapPrompt', () => { - it('builds prompt for first generation', () => { - const conv = makeConversation('Test', [ - makeMessage('user', 'What is React?'), - makeMessage('assistant', 'React is a library for building UIs.'), - ]) - const { systemPrompt, userMessage } = buildMindmapPrompt(null, [conv]) - expect(systemPrompt).toContain('知识提取助手') - expect(userMessage).toContain('[src:') - expect(userMessage).toContain('What is React?') - expect(userMessage).toContain('React is a library') - }) - - it('includes existing tree in regeneration prompt', () => { - const existingNode: MindMapNode = { - id: '1', - label: 'React', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - } - const conv = makeConversation('New', [ - makeMessage('user', 'What are hooks?'), - makeMessage('assistant', 'Hooks let you use state in functions.'), - ]) - const { userMessage } = buildMindmapPrompt([existingNode], [conv]) - expect(userMessage).toContain('现有') - expect(userMessage).toContain('# React') - expect(userMessage).toContain('What are hooks?') - }) -}) - -describe('buildSystemPrompt', () => { - it('includes few-shot example', () => { - const prompt = buildSystemPrompt() - expect(prompt).toContain('示例输出') - expect(prompt).toContain('前端状态管理') - expect(prompt).toContain('本地状态') - expect(prompt).toContain('Zustand') - }) - - it('includes quality guidelines', () => { - const prompt = buildSystemPrompt() - expect(prompt).toContain('优先提取概念性') - expect(prompt).toContain('宁少勿滥') - }) - - it('reflects configured maxDepth=4', () => { - const prompt = buildSystemPrompt(undefined, 4) - expect(prompt).toContain('最大深度为 4 层') - }) - - it('auto mode does not specify hard depth limit', () => { - const prompt = buildSystemPrompt(undefined, 0) - expect(prompt).toContain('深度不做硬性限制') - }) -}) - -describe('countNodes', () => { - it('counts empty tree as 0', () => { - expect(countNodes([])).toBe(0) - }) - - it('counts single node', () => { - const node: MindMapNode = { - id: '1', - label: 'A', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - } - expect(countNodes([node])).toBe(1) - }) - - it('counts nested tree', () => { - const tree: MindMapNode[] = [ - { - id: '1', - label: 'Root', - summary: '', - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - children: [ - { - id: '2', - label: 'Child1', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - }, - { - id: '3', - label: 'Child2', - summary: '', - children: [ - { - id: '4', - label: 'Grandchild', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - }, - ], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - }, - ], - }, - ] - expect(countNodes(tree)).toBe(4) - }) -}) - -describe('maxTreeDepth', () => { - it('returns 0 for empty tree', () => { - expect(maxTreeDepth([])).toBe(0) + }) + const tree = parseJsonToTree(json) + expect(tree[0]!.children).toHaveLength(1) + expect(tree[0]!.children[0]!.label).toBe('Hooks') }) - it('returns 1 for single node', () => { - const node: MindMapNode = { - id: '1', - label: 'A', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - } - expect(maxTreeDepth([node])).toBe(1) + it('falls back to markdown for invalid JSON', () => { + const tree = parseJsonToTree('# React\n## Hooks') + expect(tree).toHaveLength(1) + expect(tree[0]!.children).toHaveLength(1) }) - it('returns correct depth for 3-level tree', () => { - const tree: MindMapNode[] = [ - { - id: '1', - label: 'L1', - summary: '', - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - children: [ - { - id: '2', - label: 'L2', - summary: '', - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - children: [ - { - id: '3', - label: 'L3', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - }, - ], - }, - ], - }, - ] - expect(maxTreeDepth(tree)).toBe(3) + it('handles empty JSON gracefully', () => { + const tree = parseJsonToTree('{}') + expect(tree).toEqual([]) }) }) -describe('validateTree', () => { - it('validates clean tree with no warnings', () => { - const tree: MindMapNode[] = [ - { - id: '1', - label: 'Root', - summary: '', - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - children: [ - { - id: '2', - label: 'Child', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - }, - ], - }, - ] - expect(validateTree(tree)).toEqual([]) - }) - - it('detects duplicate nodes at same depth', () => { - const tree: MindMapNode[] = [ - { - id: '1', - label: 'Root', - summary: '', - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - children: [ - { - id: '2', - label: 'Same', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - }, - { - id: '3', - label: 'Same', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - }, - ], - }, - ] - const warnings = validateTree(tree) - expect(warnings.length).toBeGreaterThan(0) - expect(warnings.some((w) => w.type === 'duplicate')).toBe(true) +describe('findEditedNodes / mergeEditedNodes', () => { + it('findEditedNodes returns empty when none edited', () => { + const tree = [makeNode('n1', 'A')] + expect(findEditedNodes(tree)).toEqual([]) }) - it('detects empty label nodes', () => { - const tree: MindMapNode[] = [ - { - id: '1', - label: 'Root', - summary: '', - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - children: [ - { - id: '2', - label: '', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - }, - ], - }, + it('findEditedNodes finds recursively', () => { + const tree = [ + makeNode('n1', 'A', [ + makeNode('n2', 'B', [], true), + ]), ] - const warnings = validateTree(tree) - expect(warnings.some((w) => w.type === 'empty-label')).toBe(true) - }) - - it('detects depth exceeded', () => { - const deepNode: MindMapNode = { - id: '4', - label: 'L4', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - } - const l3: MindMapNode = { - id: '3', - label: 'L3', - summary: '', - children: [deepNode], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - } - const l2: MindMapNode = { - id: '2', - label: 'L2', - summary: '', - children: [l3], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - } - const tree: MindMapNode[] = [ - { - id: '1', - label: 'L1', - summary: '', - children: [l2], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - }, - ] - const warnings = validateTree(tree) - expect(warnings.some((w) => w.type === 'depth-exceeded')).toBe(true) - }) - - it('detects breadth exceeded', () => { - const children = Array.from({ length: 12 }, (_, i) => ({ - id: `c${i}`, - label: `Child${i}`, - summary: '', - children: [], - sourceConversationIds: [] as string[], - sourceExcerpts: {} as Record, - editedByUser: false, - })) - const tree: MindMapNode[] = [ - { - id: '1', - label: 'Root', - summary: '', - children, - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - }, - ] - const warnings = validateTree(tree) - expect(warnings.some((w) => w.type === 'breadth-exceeded')).toBe(true) - }) -}) - -describe('buildMindmapPrompt with materialContent', () => { - it('uses materialContent when provided', () => { - const conv = makeConversation('Test', [makeMessage('user', 'What is React?')]) - const matContent = 'Custom material text content' - const { userMessage } = buildMindmapPrompt(null, [conv], matContent) - expect(userMessage).toContain('Custom material text content') - expect(userMessage).not.toContain('[src:') - }) - - it('falls back to conversations when no materialContent', () => { - const conv = makeConversation('Test', [makeMessage('user', 'What is React?')]) - const { userMessage } = buildMindmapPrompt(null, [conv]) - expect(userMessage).toContain('What is React?') - expect(userMessage).toContain('[src:') - }) -}) - -describe('collectCorpusContent', () => { - it('empty corpus returns empty content', () => { - const result = collectCorpusContent([], []) - expect(result.content).toBe('') - expect(result.sourceMap.size).toBe(0) - }) - - it('single enabled entry returns its message content', () => { - const msg = makeMessage('assistant', 'Test content for corpus') - const conv = makeConversation('Test', [msg]) - const entry: CorpusEntry = { - id: crypto.randomUUID(), - messageId: msg.id, - enabled: true, - addedAt: Date.now(), - } - const result = collectCorpusContent([entry], [conv]) - expect(result.content).toContain('Test content for corpus') - expect(result.content).toContain('[src:') - expect(result.sourceMap.size).toBe(1) - }) - - it('disabled entries are skipped', () => { - const msg = makeMessage('assistant', 'Should not appear') - const conv = makeConversation('Test', [msg]) - const entry: CorpusEntry = { - id: crypto.randomUUID(), - messageId: msg.id, - enabled: false, - addedAt: Date.now(), - } - const result = collectCorpusContent([entry], [conv]) - expect(result.content).toBe('') - expect(result.sourceMap.size).toBe(0) - }) - - it('entries with selectedText use selectedText instead of full message', () => { - const msg = makeMessage('assistant', 'Full message content that is long') - const conv = makeConversation('Test', [msg]) - const entry: CorpusEntry = { - id: crypto.randomUUID(), - messageId: msg.id, - selectedText: 'Selected text only', - enabled: true, - addedAt: Date.now(), - } - const result = collectCorpusContent([entry], [conv]) - expect(result.content).toContain('Selected text only') - expect(result.content).not.toContain('Full message content') - }) - - it('source deleted (message not found) is skipped', () => { - const entry: CorpusEntry = { - id: crypto.randomUUID(), - messageId: 'nonexistent-message-id', - enabled: true, - addedAt: Date.now(), - } - const result = collectCorpusContent([entry], []) - expect(result.content).toBe('') - expect(result.sourceMap.size).toBe(0) - }) -}) - -describe('deriveNodeId', () => { - it('produces deterministic IDs for the same label', () => { - expect(deriveNodeId('React')).toBe(deriveNodeId('React')) - }) - - it('produces different IDs for different labels', () => { - expect(deriveNodeId('React')).not.toBe(deriveNodeId('Vue')) + const found = findEditedNodes(tree) + expect(found).toHaveLength(1) + expect(found[0]!.id).toBe('n2') }) - it('produces different IDs for same label under different parents', () => { - const idA = deriveNodeId('State', ['Root']) - const idB = deriveNodeId('State', ['React']) - expect(idA).not.toBe(idB) + it('mergeEditedNodes preserves user-edited nodes', () => { + const oldTree = [makeNode('n1', 'OldLabel', [], true)] + const newTree = [makeNode('n1', 'NewLabel', [])] + const merged = mergeEditedNodes(newTree, oldTree) + expect(merged[0]!.label).toBe('OldLabel') }) - it('returns a string starting with n followed by alphanumeric chars', () => { - expect(deriveNodeId('test')).toMatch(/^n[a-z0-9]+$/) + it('mergeEditedNodes uses new nodes for non-edited', () => { + const editedNodes = findEditedNodes([makeNode('n1', 'Old', [], false)]) + const newTree = [makeNode('n1', 'New', [])] + const merged = mergeEditedNodes(newTree, editedNodes) + expect(merged[0]!.label).toBe('New') }) }) -describe('buildIncrementalPrompt', () => { - it('includes system prompt for incremental editing', () => { - const { systemPrompt } = buildIncrementalPrompt([], []) - expect(systemPrompt).toContain('增量编辑') - }) - - it('includes existing tree in user message', () => { - const node: MindMapNode = { - id: 'n1', - label: 'Root', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - } - const { userMessage } = buildIncrementalPrompt([node], []) - expect(userMessage).toContain('# Root') - expect(userMessage).toContain('节点ID映射') - expect(userMessage).toContain('n1') - }) - - it('includes output format constraints', () => { - const { userMessage } = buildIncrementalPrompt([], []) - expect(userMessage).toContain('"analysis"') - expect(userMessage).toContain('"operations"') - expect(userMessage).toContain('noop') - }) - - it('uses materialContent when provided', () => { - const { userMessage } = buildIncrementalPrompt([], [], 'Custom incremental text') - expect(userMessage).toContain('Custom incremental text') - }) -}) - -describe('parseOperations', () => { - it('parses valid JSON with operations', () => { - const json = JSON.stringify({ - analysis: 'test', - operations: [ - { op: 'add_child', parent_id: 'p1', node: { label: 'New', summary: 'Desc' } }, - { op: 'update', node_id: 'n1', changes: { label: 'Updated' } }, - { op: 'merge', from_id: 'n2', to_id: 'n3' }, - { op: 'delete_leaf', node_id: 'n4' }, - { op: 'noop' }, - ], - }) - const result = parseOperations(json) - expect(result).toHaveLength(5) - if (result) { - expect(result[0]).toEqual({ - op: 'add_child', - parent_id: 'p1', - node: { label: 'New', summary: 'Desc' }, - }) - expect(result[1]).toEqual({ op: 'update', node_id: 'n1', changes: { label: 'Updated' } }) - expect(result[2]).toEqual({ op: 'merge', from_id: 'n2', to_id: 'n3' }) - expect(result[3]).toEqual({ op: 'delete_leaf', node_id: 'n4' }) - expect(result[4]).toEqual({ op: 'noop' }) - } - }) - - it('returns null for invalid JSON', () => { - expect(parseOperations('not json')).toBeNull() - }) - - it('returns null when operations field is missing', () => { - expect(parseOperations('{"analysis":"test"}')).toBeNull() - }) - - it('returns null when operations is not an array', () => { - expect(parseOperations('{"operations":"string"}')).toBeNull() - }) - - it('returns null for add_child missing parent_id', () => { - const json = JSON.stringify({ operations: [{ op: 'add_child', node: { label: 'X' } }] }) - expect(parseOperations(json)).toBeNull() - }) - - it('returns null for add_child missing node.label', () => { - const json = JSON.stringify({ operations: [{ op: 'add_child', parent_id: 'p1', node: {} }] }) - expect(parseOperations(json)).toBeNull() - }) - - it('returns null for update missing changes', () => { - const json = JSON.stringify({ operations: [{ op: 'update', node_id: 'n1' }] }) - expect(parseOperations(json)).toBeNull() - }) - - it('returns null for merge missing from_id', () => { - const json = JSON.stringify({ operations: [{ op: 'merge', to_id: 'n3' }] }) - expect(parseOperations(json)).toBeNull() +describe('mindmapTreeToContext', () => { + it('serializes tree to markdown', () => { + const tree = [makeNode('n1', 'React', [makeNode('n2', 'Hooks')])] + const result = mindmapTreeToContext(tree) + expect(result).toContain('# React') + expect(result).toContain('## Hooks') }) - it('returns null for delete_leaf missing node_id', () => { - const json = JSON.stringify({ operations: [{ op: 'delete_leaf' }] }) - expect(parseOperations(json)).toBeNull() + it('marks editedByUser nodes', () => { + const tree = [makeNode('n1', 'React', [], true)] + const result = mindmapTreeToContext(tree) + expect(result).toContain('[用户编辑]') }) - it('returns null for unknown operation type', () => { - const json = JSON.stringify({ operations: [{ op: 'unknown_op' }] }) - expect(parseOperations(json)).toBeNull() - }) - - it('defaults summary to empty string when missing in add_child', () => { - const json = JSON.stringify({ - operations: [{ op: 'add_child', parent_id: 'p1', node: { label: 'X' } }], - }) - const result = parseOperations(json) - expect(result).toHaveLength(1) - if (result) { - expect(result[0]).toEqual({ - op: 'add_child', - parent_id: 'p1', - node: { label: 'X', summary: '' }, - }) - } + it('returns empty string for empty tree', () => { + expect(mindmapTreeToContext([])).toBe('') }) }) -describe('buildEditedNodeIdSet', () => { - it('returns empty set when no nodes are edited', () => { - const tree: MindMapNode[] = [ - { - id: 'n1', - label: 'A', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - }, - ] - expect(buildEditedNodeIdSet(tree)).toEqual(new Set()) +describe('buildHybridContext', () => { + it('returns original messages when tree is empty', () => { + const messages = [{ role: 'user' as const, content: 'Hello' }] + const result = buildHybridContext(messages, []) + expect(result).toEqual(messages) }) - it('returns ID of edited node', () => { - const tree: MindMapNode[] = [ - { - id: 'n1', - label: 'A', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: true, - }, + it('prepends mindmap context when tree is non-empty', () => { + const tree = [makeNode('n1', 'React')] + const messages = [ + { role: 'user' as const, content: 'Q1' }, + { role: 'assistant' as const, content: 'A1' }, ] - expect(buildEditedNodeIdSet(tree)).toEqual(new Set(['n1'])) - }) - - it('finds edited nodes recursively', () => { - const tree: MindMapNode[] = [ - { - id: 'n1', - label: 'A', - summary: '', - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - children: [ - { - id: 'n2', - label: 'B', - summary: '', - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: true, - }, - ], - }, - ] - expect(buildEditedNodeIdSet(tree)).toEqual(new Set(['n2'])) - }) -}) - -describe('applyOperations', () => { - function makeNode( - id: string, - label: string, - children: MindMapNode[] = [], - edited = false, - ): MindMapNode { - return { - id, - label, - summary: '', - children, - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: edited, - } - } - - describe('add_child', () => { - it('adds a child to the specified parent', () => { - const tree = [makeNode('n1', 'Root')] - const ops: IncrementalOperation[] = [ - { op: 'add_child', parent_id: 'n1', node: { label: 'Child', summary: 'A child' } }, - ] - const { newTree, changes } = applyOperations(tree, ops, new Set()) - expect(newTree[0]!.children).toHaveLength(1) - expect(newTree[0]!.children[0]!.label).toBe('Child') - expect(newTree[0]!.children[0]!.summary).toBe('A child') - expect(changes).toHaveLength(1) - expect(changes[0]!.op).toBe('add_child') - }) - - it('skips add_child when parent_id does not exist', () => { - const tree = [makeNode('n1', 'Root')] - const ops: IncrementalOperation[] = [ - { op: 'add_child', parent_id: 'nonexistent', node: { label: 'X', summary: '' } }, - ] - const { newTree, changes } = applyOperations(tree, ops, new Set()) - expect(newTree[0]!.children).toHaveLength(0) - expect(changes).toHaveLength(0) - }) - }) - - describe('update', () => { - it('updates label and summary of a node', () => { - const tree = [makeNode('n1', 'OldLabel', [makeNode('n2', 'Child')])] - const ops: IncrementalOperation[] = [ - { op: 'update', node_id: 'n1', changes: { label: 'NewLabel', summary: 'New summary' } }, - ] - const { newTree, changes } = applyOperations(tree, ops, new Set()) - expect(newTree[0]!.label).toBe('NewLabel') - expect(newTree[0]!.summary).toBe('New summary') - expect(newTree[0]!.children).toHaveLength(1) - expect(changes).toHaveLength(1) - }) - - it('does not update edited nodes', () => { - const tree = [makeNode('n1', 'OldLabel', [], true)] - const ops: IncrementalOperation[] = [ - { op: 'update', node_id: 'n1', changes: { label: 'NewLabel' } }, - ] - const { newTree, changes } = applyOperations(tree, ops, new Set(['n1'])) - expect(newTree[0]!.label).toBe('OldLabel') - expect(changes).toHaveLength(0) - }) - - it('skips update when node_id does not exist', () => { - const tree = [makeNode('n1', 'Label')] - const ops: IncrementalOperation[] = [ - { op: 'update', node_id: 'nonexistent', changes: { label: 'X' } }, - ] - const { newTree, changes } = applyOperations(tree, ops, new Set()) - expect(newTree).toHaveLength(1) - expect(newTree[0]!.label).toBe('Label') - expect(changes).toHaveLength(0) - }) - }) - - describe('merge', () => { - it('merges children from from_id to to_id and removes from_id', () => { - const childA = makeNode('c1', 'ChildA') - const childB = makeNode('c2', 'ChildB') - const tree = [makeNode('n1', 'Source', [childA]), makeNode('n2', 'Target', [childB])] - const ops: IncrementalOperation[] = [{ op: 'merge', from_id: 'n1', to_id: 'n2' }] - const { newTree, changes } = applyOperations(tree, ops, new Set()) - expect(newTree).toHaveLength(1) - expect(newTree[0]!.id).toBe('n2') - expect(newTree[0]!.children).toHaveLength(2) - expect(newTree[0]!.children.map((c) => c.label).sort()).toEqual(['ChildA', 'ChildB']) - expect(changes).toHaveLength(1) - }) - - it('skips merge when from_id does not exist', () => { - const tree = [makeNode('n1', 'Target')] - const ops: IncrementalOperation[] = [{ op: 'merge', from_id: 'nonexistent', to_id: 'n1' }] - const { newTree, changes } = applyOperations(tree, ops, new Set()) - expect(newTree).toHaveLength(1) - expect(changes).toHaveLength(0) - }) - }) - - describe('delete_leaf', () => { - it('deletes a leaf node', () => { - const tree = [makeNode('n1', 'Root', [makeNode('n2', 'Leaf')])] - const ops: IncrementalOperation[] = [{ op: 'delete_leaf', node_id: 'n2' }] - const { newTree, changes } = applyOperations(tree, ops, new Set()) - expect(newTree[0]!.children).toHaveLength(0) - expect(changes).toHaveLength(1) - }) - - it('does not delete nodes with children', () => { - const tree = [makeNode('n1', 'Root', [makeNode('n2', 'Child')])] - const ops: IncrementalOperation[] = [{ op: 'delete_leaf', node_id: 'n1' }] - const { newTree, changes } = applyOperations(tree, ops, new Set()) - expect(newTree).toHaveLength(1) - expect(changes).toHaveLength(0) - }) - - it('does not delete edited nodes', () => { - const tree = [makeNode('n1', 'Root', [makeNode('n2', 'Leaf', [], true)])] - const ops: IncrementalOperation[] = [{ op: 'delete_leaf', node_id: 'n2' }] - const { newTree, changes } = applyOperations(tree, ops, new Set(['n2'])) - expect(newTree[0]!.children).toHaveLength(1) - expect(changes).toHaveLength(0) - }) - - it('skips delete_leaf when node_id does not exist', () => { - const tree = [makeNode('n1', 'Root')] - const ops: IncrementalOperation[] = [{ op: 'delete_leaf', node_id: 'nonexistent' }] - const { newTree, changes } = applyOperations(tree, ops, new Set()) - expect(newTree).toHaveLength(1) - expect(changes).toHaveLength(0) - }) - }) - - describe('noop', () => { - it('skips noop operations', () => { - const tree = [makeNode('n1', 'Root')] - const ops: IncrementalOperation[] = [{ op: 'noop' }] - const { newTree, changes } = applyOperations(tree, ops, new Set()) - expect(newTree).toHaveLength(1) - expect(changes).toHaveLength(0) - }) - }) - - describe('combinations', () => { - it('processes multiple operations in sequence', () => { - const tree = [makeNode('n1', 'Root'), makeNode('n2', 'OldTopic')] - const ops: IncrementalOperation[] = [ - { op: 'add_child', parent_id: 'n1', node: { label: 'NewChild', summary: '' } }, - { op: 'update', node_id: 'n2', changes: { label: 'UpdatedTopic' } }, - { op: 'noop' }, - ] - const { newTree, changes } = applyOperations(tree, ops, new Set()) - expect(newTree[0]!.children).toHaveLength(1) - expect(newTree[0]!.children[0]!.label).toBe('NewChild') - expect(newTree[1]!.label).toBe('UpdatedTopic') - expect(changes).toHaveLength(2) - }) + const result = buildHybridContext(messages, tree) + expect(result[0]!.role).toBe('system') + expect(result[0]!.content).toContain('# React') }) }) diff --git a/src/lib/llm-client.ts b/src/lib/llm-client.ts index e7815e8..97c76aa 100644 --- a/src/lib/llm-client.ts +++ b/src/lib/llm-client.ts @@ -63,6 +63,28 @@ export async function* streamChat( } } +export async function chat( + client: OpenAI, + params: { + model: string + messages: ChatCompletionMessageParam[] + signal?: AbortSignal + useJsonMode?: boolean + }, +): Promise { + const response = await client.chat.completions.create( + { + model: params.model, + messages: params.messages, + stream: false, + ...(params.useJsonMode ? { response_format: { type: 'json_object' as const } } : {}), + } as OpenAI.Chat.Completions.ChatCompletionCreateParams, + { signal: params.signal }, + ) + const result = response as OpenAI.Chat.Completions.ChatCompletion + return result.choices[0]?.message?.content ?? '' +} + export async function* streamChatWithRetry( client: OpenAI, params: { diff --git a/src/lib/mindmap-generator.ts b/src/lib/mindmap-generator.ts index 65bbf0f..aef67c4 100644 --- a/src/lib/mindmap-generator.ts +++ b/src/lib/mindmap-generator.ts @@ -1,201 +1,70 @@ -import OpenAI from 'openai' import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions' -import type { Conversation } from '../types/conversation' -import type { - MindMapNode, - MindMap, - CorpusEntry, - IncrementalOperation, - ChangeRecord, -} from '../types/mindmap' -import { generateId, deriveNodeId } from './id' +import type { MindMapNode } from '../types/mindmap' +import { deriveNodeId } from './id' export { deriveNodeId } -export function buildSystemPrompt(useJsonMode?: boolean, maxDepth = 3): string { - const base = `你是一个知识提取助手。请根据对话内容生成结构化的思维导图。` +// ─── Mindmap Output Prompt ────────────────────────────────────────── - // Chinese ordinal labels 1-5 - const ordinals = ['', '一', '二', '三', '四', '五'] - const levelLabels = ['', '根主题', '子主题', '细节', '细节', '细节'] +export function buildFullMindmapPrompt(useJsonMode = false): string { + if (useJsonMode) { + return `## 思维导图生成指令 - const displayDepth = maxDepth === 0 ? Math.min(3, 5) : Math.min(maxDepth, 5) +你必须输出JSON格式,包含两个字段: +{ + "answer": "你对用户问题的Markdown格式回答,完整详细", + "mindmap": {"nodes": [{"label": "概念", "summary": "摘要", "children": [{"label": "子概念", "summary": "...", "children": []}]}]} +} - // Build markdown header level descriptions (e.g. "一级标题 # 表示根主题,二级标题 ## 表示子主题") - const headerParts: string[] = [] - for (let i = 1; i <= displayDepth; i++) { - headerParts.push(`${ordinals[i]}级标题 ${'#'.repeat(i)} 表示${levelLabels[i]}`) +"mindmap" 字段的规则: +- nodes 通常只包含一个根节点,所有内容在它的 children 下面 +- 已有思维导图结构会提供给你——你必须在它的基础上做三件事: + 1. 把本次对话新讨论的具体概念作为子节点加入 + 2. 如果已有分支不够详细,用新知识细化它(增加子节点层级) + 3. 已有概念的摘要如果被新知识补充了,更新它 +- 标记 [用户编辑] 的节点保持原样不要改动` } - const headerDesc = headerParts.join(',') - const headerExParts: string[] = [] - const exampleCount = maxDepth === 0 ? 3 : maxDepth - for (let i = 1; i <= exampleCount; i++) { - headerExParts.push('#'.repeat(i)) - } - const headerExamples = headerExParts.join(' / ') - - let depthLimitStr: string - if (maxDepth === 0) { - depthLimitStr = - '深度不做硬性限制。根据内容的知识密度自行判断:表层概念用较少层级,技术细节可以到 4-5 层。不要让无关紧要的细节占据层级。' - } else if (maxDepth === 1) { - depthLimitStr = `最大深度为 1 层(#)` - } else { - depthLimitStr = `最大深度为 ${maxDepth} 层(${headerExamples}),不超过 ${maxDepth} 层` - } + return `## 思维导图生成指令 - const jsonDepthRule = - maxDepth === 0 - ? '深度不做硬性限制。根据内容的知识密度自行判断:表层概念用较少层级,技术细节可以到 4-5 层。不要让无关紧要的细节占据层级' - : `最大深度为 ${maxDepth} 层` - - const formatRules = useJsonMode - ? ` -输出格式要求(严格遵循): -- 输出必须是合法的 JSON 对象,包含一个 "nodes" 数组 -- 每个节点包含: label (字符串), summary (字符串), children (节点数组) -- 可选字段: content (Markdown 字符串), contentType ("markdown" 或 "text",默认 "text") -- 当节点需要展示代码块、表格、加粗等格式时,设置 contentType 为 "markdown",在 content 字段写入 Markdown 内容 -- content 支持的 Markdown 语法: **加粗** *斜体* \`行内代码\` ~~删除线~~ \`\`\`代码块\`\`\` |表格| -- ${jsonDepthRule},每个节点下最多 10 个直接子节点 -- 只包含对话中明确讨论过的主题,不要编造内容 -- 标题简洁明了,控制在 20 字以内 -- 优先提取概念性、方法论类知识,而非琐碎的操作细节 -- 每个节点需要标注知识来源,在 label 中包含 [源: convId/msgId] -- 输出格式: { "nodes": [{ "label": "概念 [源: abc/123]", "summary": "描述", "contentType": "markdown", "content": "详细内容...", "children": [...] }] }` - : ` -输出格式要求(严格遵循): -- 必须使用 Markdown 标题语法:${headerDesc},**禁止使用列表符号(如 - 或 * 或数字)作为层级标记** -- 每个标题后可跟一句简短摘要(1-2句话) -- 标题与摘要之间用 —— 分隔。示例:## useState —— React 中最基础的状态 Hook -- ${depthLimitStr} -- 每个节点下最多 10 个直接子节点 -- 只包含对话中明确讨论过的主题,不要编造内容 -- 标题简洁明了,控制在 20 字以内 -- 优先提取概念性、方法论类知识,而非琐碎的操作细节 -- 每个节点需要标注知识来源。在节点标题后用 [源: convId/msgId] 标注` - - const examples = useJsonMode - ? ` -## 示例输出 - -{ "nodes": [{ "label": "前端状态管理 [源: abc/123]", "summary": "前端应用中管理UI状态的方法和模式", "contentType": "markdown", "content": "**核心方案对比**\\n\\n| 库 | 特点 |\\n|---|---|\\n| useState | 简单值 |\\n| useReducer | 复杂逻辑 |\\n| Zustand | 轻量外部库 |\\n\\n\`\`\`ts\\nconst store = create((set) => ({\\n count: 0,\\n inc: () => set((s) => ({ count: s.count + 1 })),\\n}))\\n\`\`\`", "children": [ - { "label": "本地状态", "summary": "组件内部的状态管理", "contentType": "text", "children": [ - { "label": "useState [源: abc/123, def/456]", "summary": "React中最基础的状态Hook", "children": [] }, - { "label": "useReducer [源: def/456]", "summary": "适用于复杂状态逻辑", "children": [] } - ]} -]}] }` - : ` -## 示例输出 - -以下是一个高质量输出的范例(主题:前端状态管理): - -# 前端状态管理 [源: abc/123] —— 前端应用中管理UI状态的方法和模式 - -## 本地状态 —— 组件内部的状态管理 -### useState [源: abc/123, def/456] —— React中最基础的状态Hook,适用于简单值 -### useReducer [源: def/456] —— 适用于复杂状态逻辑,类似Redux模式 - -## 全局状态 —— 跨组件共享的状态管理方案 -### Context API —— React内置的轻量级状态共享机制 -### Zustand —— 轻量级外部状态库,基于发布订阅模式` - - const guidance: string[] = [] - if (maxDepth === 0 || maxDepth >= 3) { - guidance.push('根节点应概括整体话题') - guidance.push('二级节点按概念维度分类,不是按时间顺序罗列') - guidance.push('三级节点提供具体的技术名称或关键要点') - } else if (maxDepth === 2) { - guidance.push('根节点应概括整体话题') - guidance.push('二级节点按概念维度分类,提供具体的技术名称或关键要点,不是按时间顺序罗列') - } else { - guidance.push('根节点应概括整体话题,包含具体的技术名称或关键要点') - } - guidance.push('如果对话内容较少,宁少勿滥,不必填满深度和广度限制') +你的回答由两部分组成: +1. 正常回答用户问题(Markdown 格式) +2. 在回答的最末尾,输出更新后的完整思维导图 JSON - return `${base}${formatRules}${examples} +你必须按以下格式输出,不要省略标记,不要用代码块包裹: -注意要点: -${guidance.map((g) => `- ${g}`).join('\n')}` -} + +{"nodes": [{"label": "主概念", "summary": "摘要", "children": [{"label": "子概念", "summary": "...", "children": []}]}]} + -export function buildMindmapPrompt( - existingTree: MindMapNode[] | null, - conversations: Conversation[], - materialContent?: string, - useJsonMode?: boolean, - maxDepth = 3, -): { - systemPrompt: string - userMessage: string - sourceMap: Map -} { - const systemPrompt = buildSystemPrompt(useJsonMode, maxDepth) - const sourceMap = new Map() - - let userMessage = '' - - if (existingTree && existingTree.length > 0) { - userMessage += '以下是现有的思维导图结构:\n\n' - userMessage += treeToMarkdown(existingTree) - userMessage += '\n\n---\n\n' - userMessage += - '请基于以下新的内容更新思维导图。合并新知识点,扩充已有主题,输出完整的更新后 JSON:\n\n' - } else { - userMessage += '请基于以下内容生成思维导图:\n\n' - } +规则: +- nodes 数组通常只包含一个根节点,所有内容都在它的 children 下面 +- 已有思维导图结构会提供给你——你必须在它的基础上做三件事: + 1. 把本次对话新讨论的具体概念,作为子节点加入到已有树的对应位置 + 2. 如果已有概念的分支不够详细,用新学到的知识细化它(增加子节点层级) + 3. 已有概念的摘要如果被新知识补充了,更新它 +- 不要只是维持树的现有粗细粒度——对话中学到的新要点必须体现在树中 +- 标记 [用户编辑] 的节点保持原样不要改动` +} - if (materialContent) { - userMessage += materialContent - } else { - const recentConversations = conversations.slice(-10) - for (const conv of recentConversations) { - userMessage += `### 会话: ${conv.title}\n\n` - for (const msg of conv.messages) { - if (msg.role === 'system') continue - const role = msg.role === 'user' ? '用户' : 'AI' - const srcKey = `${conv.id}/${msg.id.slice(0, 8)}` - const content = msg.content.length > 2000 ? msg.content.slice(0, 2000) + '...' : msg.content - userMessage += `[src:${srcKey}] **${role}**: ${content}\n\n` - sourceMap.set(srcKey, { - conversationId: conv.id, - messageId: msg.id, - text: content.slice(0, 200), - }) - } - userMessage += '---\n\n' - } - } +// ─── Markdown → Tree Parsing ────────────────────────────────────── - return { systemPrompt, userMessage, sourceMap } +function stripSourceAnnotations(text: string): string { + return text.replace(/\s*\[源:\s*[^\]]+\]\s*/g, '').trim() } export function parseMarkdownToTree( markdown: string, - sourceMap?: Map, - maxDepth = 3, + maxDepth = 6, ): MindMapNode[] { const lines = markdown.split('\n') const roots: MindMapNode[] = [] const stack: { depth: number; node: MindMapNode }[] = [] const headerRe = new RegExp('^(#{1,' + maxDepth + '})\\s+(.+)') - let pendingSummary = '' - for (const line of lines) { const headerMatch = line.match(headerRe) - if (!headerMatch) { - const trimmed = line.trim() - if (trimmed && !trimmed.startsWith('-') && !trimmed.startsWith('*')) { - if (pendingSummary) { - pendingSummary += ' ' + trimmed - } else { - pendingSummary = trimmed - } - } - continue - } + if (!headerMatch) continue const depth = headerMatch[1]?.length ?? 1 const titleText = headerMatch[2]?.trim() ?? '' @@ -209,22 +78,15 @@ export function parseMarkdownToTree( summary = titleText.slice(sepIndex + 2).trim() } - const sourceIds = parseSourceIds(label, sourceMap) - const sourceExcerpts = buildSourceExcerpts(sourceIds, sourceMap) - - pendingSummary = '' - const parentPath = stack.map((s) => s.node.label) const node: MindMapNode = { id: deriveNodeId(label, parentPath), - label: stripSourceAnnotations(label) || label || titleText, + label: stripSourceAnnotations(label) || label, summary: stripSourceAnnotations(summary), - content: undefined, - contentType: undefined, children: [], - sourceConversationIds: sourceIds, - sourceExcerpts, + sourceConversationIds: [], + sourceExcerpts: {}, editedByUser: false, } @@ -238,7 +100,7 @@ export function parseMarkdownToTree( roots.push(node) } else { const parent = stack[stack.length - 1] - if (parent && parent.node.children.length < 10) { + if (parent) { parent.node.children.push(node) } } @@ -251,162 +113,7 @@ export function parseMarkdownToTree( return roots } -function parseSourceIds( - text: string, - sourceMap?: Map, -): string[] { - const sourceMatch = text.match(/\[源:\s*([^\]]+)\]/) - if (!sourceMatch || !sourceMap) { - // Fallback: return first available conversation ID - if (sourceMap && sourceMap.size > 0) { - const firstEntry = sourceMap.values().next().value - return firstEntry ? [firstEntry.conversationId] : [] - } - return [] - } - const keys = sourceMatch[1]?.split(',').map((k) => k.trim()) ?? [] - const ids = new Set() - for (const key of keys) { - const entry = sourceMap.get(key) - if (entry) ids.add(entry.conversationId) - } - return ids.size > 0 ? [...ids] : [] -} - -function stripSourceAnnotations(text: string): string { - return text.replace(/\s*\[源:\s*[^\]]+\]\s*/g, '').trim() -} - -function buildSourceExcerpts( - sourceIds: string[], - sourceMap?: Map, -): Record { - if (!sourceMap) return {} - const excerpts: Record = {} - for (const [, entry] of sourceMap) { - if (sourceIds.includes(entry.conversationId)) { - excerpts[entry.conversationId] = entry.text.slice(0, 200) - } - } - return excerpts -} - -export function countNodes(nodes: MindMapNode[]): number { - let count = 0 - for (const node of nodes) { - count++ - count += countNodes(node.children) - } - return count -} - -export function maxTreeDepth(nodes: MindMapNode[], currentDepth = 1): number { - if (nodes.length === 0) return 0 - let max = currentDepth - for (const node of nodes) { - if (node.children.length > 0) { - const childMax = maxTreeDepth(node.children, currentDepth + 1) - if (childMax > max) max = childMax - } - } - return max -} - -export interface ValidationWarning { - type: 'duplicate' | 'empty-label' | 'depth-exceeded' | 'breadth-exceeded' - nodeLabel: string - message: string -} - -export function validateTree(nodes: MindMapNode[], maxDepth = 3): ValidationWarning[] { - const warnings: ValidationWarning[] = [] - const seenLabels = new Set() - - function walk(nodeList: MindMapNode[], depth: number, parentLabel: string) { - for (const node of nodeList) { - if (!node.label || node.label.trim() === '') { - warnings.push({ - type: 'empty-label', - nodeLabel: '(空)', - message: `在"${parentLabel}"下发现空节点,已自动移除`, - }) - continue - } - - const key = `${depth}:${node.label}` - if (seenLabels.has(key)) { - warnings.push({ - type: 'duplicate', - nodeLabel: node.label, - message: `在"${parentLabel}"下发现重复节点"${node.label}"`, - }) - } else { - seenLabels.add(key) - } - - if (depth >= maxDepth && node.children.length > 0) { - warnings.push({ - type: 'depth-exceeded', - nodeLabel: node.label, - message: `节点"${node.label}"超过最大深度${maxDepth}层,子节点已截断`, - }) - } - - if (node.children.length > 10) { - warnings.push({ - type: 'breadth-exceeded', - nodeLabel: node.label, - message: `节点"${node.label}"超过最大子节点数10个,已保留前10个`, - }) - } - - if (depth < maxDepth) { - walk(node.children, depth + 1, node.label) - } - } - } - - walk(nodes, 1, '根') - return warnings -} - -export function treeToMarkdown(nodes: MindMapNode[], depth = 0): string { - const lines: string[] = [] - const prefix = '#'.repeat(Math.min(depth + 1, 6)) - - for (const node of nodes) { - let line = `${prefix} ${node.label}` - if (node.summary) { - line += ` —— ${node.summary}` - } - lines.push(line) - - if (node.children.length > 0 && depth < 2) { - lines.push(treeToMarkdown(node.children, depth + 1)) - } - } - - return lines.join('\n') -} - -export function conversationMessagesToHistory( - conversations: Conversation[], -): ChatCompletionMessageParam[] { - const messages: ChatCompletionMessageParam[] = [] - - for (const conv of conversations.slice(-10)) { - for (const msg of conv.messages) { - if (msg.role === 'system') continue - const content = msg.content.length > 2000 ? msg.content.slice(0, 2000) + '...' : msg.content - messages.push({ - role: msg.role as 'user' | 'assistant', - content, - }) - } - } - - return messages -} +// ─── JSON → Tree Parsing ────────────────────────────────────────── interface JsonNode { label?: string @@ -418,27 +125,21 @@ interface JsonNode { function jsonNodeToMindMapNode( item: unknown, - sourceMap?: Map, depth = 0, - maxDepth = 3, + maxDepth = 6, parentLabels: string[] = [], ): MindMapNode { const raw = item as JsonNode const label = (raw.label ?? '未命名').trim() - const summary = (raw.summary ?? '').trim() const cleanLabel = stripSourceAnnotations(label) - const sourceIds = parseSourceIds(label, sourceMap) - const sourceExcerpts = buildSourceExcerpts(sourceIds, sourceMap) + const summary = (raw.summary ?? '').trim() const contentType = raw.contentType === 'markdown' || raw.contentType === 'text' ? raw.contentType : undefined const content = typeof raw.content === 'string' ? raw.content : undefined const children = (raw.children ?? []) .slice(0, 10) .map((c: JsonNode) => - jsonNodeToMindMapNode(c, sourceMap, depth + 1, maxDepth, [ - ...parentLabels, - cleanLabel || '未命名', - ]), + jsonNodeToMindMapNode(c, depth + 1, maxDepth, [...parentLabels, cleanLabel || '未命名']), ) return { @@ -447,465 +148,136 @@ function jsonNodeToMindMapNode( summary: stripSourceAnnotations(summary), content, contentType, - children: depth < (maxDepth ?? 3) ? children : [], - sourceConversationIds: sourceIds, - sourceExcerpts, + children: depth < (maxDepth ?? 6) ? children : [], + sourceConversationIds: [], + sourceExcerpts: {}, editedByUser: false, } } -export function parseJsonToTree( - jsonString: string, - sourceMap?: Map, - maxDepth = 3, -): MindMapNode[] { +export function parseJsonToTree(jsonString: string, maxDepth = 6): MindMapNode[] { const text = jsonString.trim() - // Stage 1: Try direct JSON parse let parsed: { nodes?: unknown[] } = {} + let parseStage = 0 + let parseError = '' try { parsed = JSON.parse(text) as { nodes?: unknown[] } - } catch { - // Stage 2: Try extracting from outermost ```json fence - const fenceStripped = text + parseStage = 1 + } catch (e1) { + parseError = String(e1) + // Try repairing trailing comma before } or ] + let repaired = text.replace(/,(\s*[}\]])/g, '$1') + const fenceStripped = repaired .replace(/^```(?:json)?\s*\n?/, '') .replace(/\n?\s*```\s*$/, '') .trim() try { parsed = JSON.parse(fenceStripped) as { nodes?: unknown[] } + parseStage = 2 } catch { - // Stage 3: Find JSON object boundaries (first { to last }) const braceStart = text.indexOf('{') const braceEnd = text.lastIndexOf('}') if (braceStart !== -1 && braceEnd > braceStart) { const extracted = text.slice(braceStart, braceEnd + 1) + // Also try repair on extracted + const extractedRepaired = extracted.replace(/,(\s*[}\]])/g, '$1') try { - parsed = JSON.parse(extracted) as { nodes?: unknown[] } + parsed = JSON.parse(extractedRepaired) as { nodes?: unknown[] } + parseStage = 3 } catch { - return parseMarkdownToTree(jsonString, sourceMap, maxDepth) + console.warn('[parseJsonToTree] all parse stages failed, error:', parseError, 'tail:', text.slice(-80), 'len:', text.length) + return parseMarkdownToTree(jsonString, maxDepth) } } else { - return parseMarkdownToTree(jsonString, sourceMap, maxDepth) + console.warn('[parseJsonToTree] no braces found, error:', parseError) + return parseMarkdownToTree(jsonString, maxDepth) } } } if (!Array.isArray(parsed.nodes)) { - return parseMarkdownToTree(jsonString, sourceMap, maxDepth) - } - - return parsed.nodes.map((n) => jsonNodeToMindMapNode(n, sourceMap, 0, maxDepth)) -} - -export function collectCorpusContent( - corpus: CorpusEntry[], - conversations: Conversation[], -): { - content: string - sourceMap: Map -} { - const sourceMap = new Map() - const parts: string[] = [] - - for (const entry of corpus) { - if (!entry.enabled) continue - let msg: Conversation['messages'][number] | undefined - let convId = '' - let conv: Conversation | undefined - for (const c of conversations) { - const found = c.messages.find((m) => m.id === entry.messageId) - if (found) { - msg = found - convId = c.id - conv = c - break - } - } - if (!msg || !conv) continue - - const text = entry.selectedText ?? msg.content - const role = msg.role === 'user' ? '用户' : 'AI' - const srcKey = `${convId}/${entry.messageId.slice(0, 8)}` - - let contextLine = '' - if (msg.role === 'assistant') { - const msgIndex = conv.messages.indexOf(msg) - const prevMsg = msgIndex > 0 ? conv.messages[msgIndex - 1] : null - if (prevMsg && prevMsg.role === 'user') { - const prevText = - prevMsg.content.length > 500 ? prevMsg.content.slice(0, 500) + '...' : prevMsg.content - contextLine = `**用户**: ${prevText}\n` - } - } - - parts.push(`${contextLine}[src:${srcKey}] **${role}**: ${text}`) - sourceMap.set(srcKey, { - conversationId: convId, - messageId: entry.messageId, - text: text.slice(0, 200), - }) + console.warn('[parseJsonToTree] parsed.nodes is not an array, type:', typeof parsed.nodes) + return parseMarkdownToTree(jsonString, maxDepth) } - return { content: parts.join('\n\n'), sourceMap } + console.log('[parseJsonToTree] parse stage:', parseStage, 'nodes count:', parsed.nodes.length) + return parsed.nodes.map((n) => jsonNodeToMindMapNode(n, 0, maxDepth)) } -export function buildIncrementalPrompt( - existingTree: MindMapNode[], - conversations: Conversation[], - materialContent?: string, -): { systemPrompt: string; userMessage: string } { - const systemPrompt = - '你是一个知识图谱增量编辑助手。分析现有图谱和新内容之间的差异,输出需要修改的操作。' - - let userMessage = '## 现有图谱\n\n' - userMessage += treeToMarkdown(existingTree) - userMessage += '\n\n### 节点ID映射\n\n' - const allNodes = flattenTree(existingTree) - for (const node of allNodes) { - userMessage += `- "${node.label}" → \`${node.id}\`\n` - } +// ─── Edited Node Preservation ────────────────────────────────────── - userMessage += '\n\n---\n\n' - userMessage += '## 新内容\n\n' - - if (materialContent) { - userMessage += materialContent - } else { - const recentConversations = conversations.slice(-10) - for (const conv of recentConversations) { - userMessage += `### 会话: ${conv.title}\n\n` - for (const msg of conv.messages) { - if (msg.role === 'system') continue - const role = msg.role === 'user' ? '用户' : 'AI' - const content = msg.content.length > 2000 ? msg.content.slice(0, 2000) + '...' : msg.content - userMessage += `**${role}**: ${content}\n\n` - } - userMessage += '---\n\n' - } - } - - userMessage += `\n## 输出要求 - -输出 JSON 格式,包含 "analysis" 和 "operations" 字段: - -{ - "analysis": "分析摘要,说明需要修改的原因", - "operations": [ - { "op": "add_child", "parent_id": "节点ID", "node": { "label": "新节点标签", "summary": "新节点摘要", "contentType": "markdown", "content": "详细Markdown内容" } }, - { "op": "update", "node_id": "节点ID", "changes": { "label": "新标签", "summary": "新摘要", "contentType": "markdown", "content": "更新后的Markdown内容" } }, - { "op": "merge", "from_id": "源节点ID", "to_id": "目标节点ID" }, - { "op": "delete_leaf", "node_id": "节点ID" }, - { "op": "noop" } - ] -} - -约束: -- 只输出必要的操作,不要包含多余的操作 -- 如果没有变化,使用 {"op": "noop"} -- node_id、parent_id 必须与现有节点ID完全匹配(见上方节点ID映射) -- 不要覆盖用户编辑过的节点(editedByUser 为 true 的节点) -- update 操作只包含需要修改的字段 -- delete_leaf 只能用于叶子节点(没有子节点的节点) -- merge 操作将 from_id 的子节点合并到 to_id,然后删除 from_id -- node 和 changes 中的 contentType/content 为可选字段,content 支持 **加粗** *斜体* \`行内代码\` ~~删除线~~ \`\`\`代码块\`\`\` |表格|` - - return { systemPrompt, userMessage } -} - -function flattenTree(nodes: MindMapNode[]): MindMapNode[] { +export function findEditedNodes(nodes: MindMapNode[]): MindMapNode[] { const result: MindMapNode[] = [] for (const node of nodes) { - result.push(node) - result.push(...flattenTree(node.children)) + if (node.editedByUser) result.push(node) + result.push(...findEditedNodes(node.children)) } return result } -export function parseOperations(jsonString: string): IncrementalOperation[] | null { - try { - const parsed = JSON.parse(jsonString) as { analysis?: string; operations?: unknown[] } - if (!Array.isArray(parsed.operations)) return null - - const ops: IncrementalOperation[] = [] - for (const item of parsed.operations) { - if (!item || typeof item !== 'object') return null - const op = item as Record - - switch (op.op) { - case 'add_child': { - if (typeof op.parent_id !== 'string') return null - if (!op.node || typeof op.node !== 'object') return null - const node = op.node as Record - if (typeof node.label !== 'string') return null - const nodeContent = - typeof node.content === 'string' ? node.content : undefined - const nodeContentType = - node.contentType === 'markdown' || node.contentType === 'text' - ? node.contentType - : undefined - ops.push({ - op: 'add_child', - parent_id: op.parent_id, - node: { - label: node.label, - summary: typeof node.summary === 'string' ? node.summary : '', - content: nodeContent, - contentType: nodeContentType, - }, - }) - break - } - case 'update': { - if (typeof op.node_id !== 'string') return null - if (!op.changes || typeof op.changes !== 'object') return null - const changes = op.changes as Record - if ( - typeof changes.label !== 'string' && - typeof changes.summary !== 'string' && - typeof changes.content !== 'string' && - typeof changes.contentType !== 'string' - ) - return null - const cleanChanges: { - label?: string - summary?: string - content?: string - contentType?: 'text' | 'markdown' - } = {} - if (typeof changes.label === 'string') cleanChanges.label = changes.label - if (typeof changes.summary === 'string') cleanChanges.summary = changes.summary - if (typeof changes.content === 'string') cleanChanges.content = changes.content - if (changes.contentType === 'markdown' || changes.contentType === 'text') - cleanChanges.contentType = changes.contentType - ops.push({ op: 'update', node_id: op.node_id, changes: cleanChanges }) - break - } - case 'merge': { - if (typeof op.from_id !== 'string') return null - if (typeof op.to_id !== 'string') return null - ops.push({ op: 'merge', from_id: op.from_id, to_id: op.to_id }) - break - } - case 'delete_leaf': { - if (typeof op.node_id !== 'string') return null - ops.push({ op: 'delete_leaf', node_id: op.node_id }) - break - } - case 'noop': { - ops.push({ op: 'noop' }) - break - } - default: - return null - } +export function mergeEditedNodes( + newTree: MindMapNode[], + editedNodes: MindMapNode[], +): MindMapNode[] { + const editedIds = new Set(editedNodes.map((n) => n.id)) + return newTree.map((node) => { + if (editedIds.has(node.id)) { + const edited = editedNodes.find((n) => n.id === node.id) + return edited ?? node } - - return ops - } catch { - return null - } + return { ...node, children: mergeEditedNodes(node.children, editedNodes) } + }) } -function findNodeById(nodes: MindMapNode[], id: string): MindMapNode | null { - for (const node of nodes) { - if (node.id === id) return node - const found = findNodeById(node.children, id) - if (found) return found - } - return null -} +// ─── Mindmap as Context ──────────────────────────────────────────── -function findParentAndNode( - nodes: MindMapNode[], - id: string, - parent: MindMapNode | null = null, -): { parent: MindMapNode | null; node: MindMapNode | null } { - for (const node of nodes) { - if (node.id === id) return { parent, node } - const found = findParentAndNode(node.children, id, node) - if (found.node) return found - } - return { parent: null, node: null } -} +export function mindmapTreeToContext(tree: MindMapNode[], maxNodes = 200): string { + if (tree.length === 0) return '' -export function buildEditedNodeIdSet(tree: MindMapNode[]): Set { - const ids = new Set() - function walk(nodes: MindMapNode[]) { - for (const node of nodes) { - if (node.editedByUser) ids.add(node.id) - walk(node.children) - } - } - walk(tree) - return ids -} + const lines: string[] = ['## Knowledge Graph Context', ''] -export function applyOperations( - tree: MindMapNode[], - ops: IncrementalOperation[], - editedNodeIds: Set, -): { newTree: MindMapNode[]; changes: ChangeRecord[] } { - const newTree = structuredClone(tree) - const changes: ChangeRecord[] = [] - const now = Date.now() - - for (const op of ops) { - switch (op.op) { - case 'add_child': { - const parent = findNodeById(newTree, op.parent_id) - if (!parent) continue - const newNode: MindMapNode = { - id: generateId(), - label: op.node.label, - summary: op.node.summary, - content: op.node.content, - contentType: op.node.contentType, - children: [], - sourceConversationIds: [], - sourceExcerpts: {}, - editedByUser: false, - } - parent.children.push(newNode) - changes.push({ - op: 'add_child', - nodeId: newNode.id, - description: `Added "${op.node.label}" under "${parent.label}"`, - timestamp: now, - }) - break - } - case 'update': { - const node = findNodeById(newTree, op.node_id) - if (!node) continue - if (editedNodeIds.has(op.node_id)) continue - if (op.changes.label !== undefined) node.label = op.changes.label - if (op.changes.summary !== undefined) node.summary = op.changes.summary - if (op.changes.content !== undefined) node.content = op.changes.content - if (op.changes.contentType !== undefined) node.contentType = op.changes.contentType - changes.push({ - op: 'update', - nodeId: op.node_id, - description: `Updated "${node.label}" (${Object.keys(op.changes).join(', ')})`, - timestamp: now, - }) - break + let count = 0 + function walk(nodes: MindMapNode[], depth: number) { + for (const node of nodes) { + if (count >= maxNodes) return + count++ + const prefix = '#'.repeat(Math.min(depth + 1, 6)) + let line = `${prefix} ${node.label}` + if (node.summary) { + line += ` —— ${node.summary}` } - case 'merge': { - const { parent: fromParent, node: fromNode } = findParentAndNode(newTree, op.from_id) - const toNode = findNodeById(newTree, op.to_id) - if (!fromNode || !toNode) continue - toNode.children.push(...fromNode.children) - fromNode.children = [] - if (fromParent) { - fromParent.children = fromParent.children.filter((c) => c.id !== op.from_id) - } else { - const idx = newTree.findIndex((n) => n.id === op.from_id) - if (idx !== -1) newTree.splice(idx, 1) - } - changes.push({ - op: 'merge', - nodeId: op.from_id, - description: `Merged "${fromNode.label}" into "${toNode.label}"`, - timestamp: now, - }) - break + if (node.editedByUser) { + line += ' [用户编辑]' } - case 'delete_leaf': { - const { parent, node } = findParentAndNode(newTree, op.node_id) - if (!node) continue - if (node.children.length > 0) continue - if (editedNodeIds.has(op.node_id)) continue - if (parent) { - parent.children = parent.children.filter((c) => c.id !== op.node_id) - } else { - const idx = newTree.findIndex((n) => n.id === op.node_id) - if (idx !== -1) newTree.splice(idx, 1) - } - changes.push({ - op: 'delete_leaf', - nodeId: op.node_id, - description: `Deleted leaf "${node.label}"`, - timestamp: now, - }) - break + lines.push(line) + if (node.children.length > 0) { + walk(node.children, depth + 1) } - case 'noop': - break } } - return { newTree, changes } -} + walk(tree, 0) -export async function* generateMindmap( - client: OpenAI, - mindmap: MindMap, - corpus: CorpusEntry[], - conversations: Conversation[], - modelId: string, - signal?: AbortSignal, - mode: 'full' | 'incremental' = 'full', -): AsyncIterable { - const effectiveDepth = - mindmap.maxDepth == null ? 3 : mindmap.maxDepth === 0 ? 6 : mindmap.maxDepth - const existingTree = mindmap.tree.length > 0 ? mindmap.tree : null - const { content: materialContent, sourceMap } = collectCorpusContent(corpus, conversations) - - yield { sourceMap } as unknown as string - - let systemPrompt: string - let userMessage: string - - if (mode === 'incremental' && existingTree) { - const result = buildIncrementalPrompt(existingTree, conversations, materialContent) - systemPrompt = result.systemPrompt - userMessage = result.userMessage - } else { - const result = buildMindmapPrompt( - existingTree, - conversations, - materialContent, - true, - effectiveDepth, - ) - systemPrompt = result.systemPrompt - userMessage = result.userMessage + if (count >= maxNodes) { + lines.push('', '... (truncated)') } - const messages: ChatCompletionMessageParam[] = [ - { role: 'system', content: systemPrompt }, - { role: 'user', content: userMessage }, - ] - - const createParams: OpenAI.Chat.Completions.ChatCompletionCreateParams = { - model: modelId, - messages, - stream: false, - } + return lines.join('\n') +} - createParams.response_format = { - type: 'json_object', - } as OpenAI.Chat.Completions.ChatCompletionCreateParams['response_format'] +export function buildHybridContext( + messages: ChatCompletionMessageParam[], + tree: MindMapNode[], +): ChatCompletionMessageParam[] { + if (tree.length === 0) return messages - let response: OpenAI.Chat.Completions.ChatCompletion - try { - response = await client.chat.completions.create(createParams, { signal }) - } catch (err: unknown) { - if (err instanceof Error && err.message.includes('response_format')) { - delete createParams.response_format - response = await client.chat.completions.create(createParams, { signal }) - } else { - throw err - } - } + const mindmapContext = mindmapTreeToContext(tree) + if (!mindmapContext) return messages - const fullText = response.choices[0]?.message?.content ?? '' + const result: ChatCompletionMessageParam[] = [] + result.push({ role: 'system', content: mindmapContext }) + result.push(...messages.slice(-4)) - if (mode === 'incremental' && existingTree) { - yield fullText - const parsed = parseOperations(fullText) - yield { incrementalResult: parsed } as unknown as string - } else { - yield fullText - } + return result } diff --git a/src/lib/mindmap-layout.ts b/src/lib/mindmap-layout.ts index 42f9bbe..4bc3bdf 100644 --- a/src/lib/mindmap-layout.ts +++ b/src/lib/mindmap-layout.ts @@ -60,7 +60,7 @@ export function applyLayout( edges: MindMapFlowEdge[], ): { nodes: MindMapFlowNode[]; edges: MindMapFlowEdge[] } { const g = new dagre.graphlib.Graph().setDefaultEdgeLabel(() => ({})) - g.setGraph({ rankdir: 'LR', nodesep: 60, ranksep: 120, edgesep: 20, marginx: 40, marginy: 40 }) + g.setGraph({ rankdir: 'LR', nodesep: 100, ranksep: 180, edgesep: 30, marginx: 80, marginy: 80 }) for (const n of nodes) { const hasRichContent = n.data?.contentType === 'markdown' && n.data?.content diff --git a/src/stores/__tests__/mindmapStore.test.ts b/src/stores/__tests__/mindmapStore.test.ts index 9b7ffa4..e39cfe5 100644 --- a/src/stores/__tests__/mindmapStore.test.ts +++ b/src/stores/__tests__/mindmapStore.test.ts @@ -105,180 +105,6 @@ describe('mindmapStore', () => { }) }) -describe('corpus operations', () => { - beforeEach(() => { - useMindmapStore.setState({ - mindmaps: [], - activeMindmapId: null, - }) - }) - - it('adds a corpus entry', () => { - const { addMindmap, addCorpusEntry } = useMindmapStore.getState() - const mm = addMindmap('Test') - - addCorpusEntry(mm.id, { - id: 'c1', - messageId: 'msg-1', - selectedText: undefined as string | undefined, - enabled: true, - addedAt: 1000, - }) - - const state = useMindmapStore.getState() - expect(state.mindmaps[0]!.corpus).toHaveLength(1) - expect(state.mindmaps[0]!.corpus[0]!.id).toBe('c1') - expect(state.mindmaps[0]!.corpus[0]!.messageId).toBe('msg-1') - expect(state.mindmaps[0]!.corpus[0]!.enabled).toBe(true) - expect(state.mindmaps[0]!.corpus[0]!.addedAt).toBe(1000) - }) - - it('preserves existing corpus entries when adding more', () => { - const { addMindmap, addCorpusEntry } = useMindmapStore.getState() - const mm = addMindmap('Test') - - addCorpusEntry(mm.id, { - id: 'c1', - messageId: 'msg-1', - selectedText: undefined as string | undefined, - enabled: true, - addedAt: 1000, - }) - addCorpusEntry(mm.id, { - id: 'c2', - messageId: 'msg-2', - selectedText: undefined as string | undefined, - enabled: true, - addedAt: 1001, - }) - - const state = useMindmapStore.getState() - expect(state.mindmaps[0]!.corpus).toHaveLength(2) - expect(state.mindmaps[0]!.corpus[0]!.id).toBe('c1') - expect(state.mindmaps[0]!.corpus[1]!.id).toBe('c2') - }) - - it('removes a corpus entry', () => { - const { addMindmap, addCorpusEntry, removeCorpusEntry } = useMindmapStore.getState() - const mm = addMindmap('Test') - - addCorpusEntry(mm.id, { - id: 'c1', - messageId: 'msg-1', - selectedText: undefined as string | undefined, - enabled: true, - addedAt: 1000, - }) - addCorpusEntry(mm.id, { - id: 'c2', - messageId: 'msg-2', - selectedText: undefined as string | undefined, - enabled: true, - addedAt: 1001, - }) - - removeCorpusEntry(mm.id, 'c1') - - const state = useMindmapStore.getState() - expect(state.mindmaps[0]!.corpus).toHaveLength(1) - expect(state.mindmaps[0]!.corpus[0]!.id).toBe('c2') - }) - - it('toggles a corpus entry enabled state', () => { - const { addMindmap, addCorpusEntry, toggleCorpusEntry } = useMindmapStore.getState() - const mm = addMindmap('Test') - - addCorpusEntry(mm.id, { - id: 'c1', - messageId: 'msg-1', - selectedText: undefined as string | undefined, - enabled: true, - addedAt: 1000, - }) - - toggleCorpusEntry(mm.id, 'c1', false) - expect(useMindmapStore.getState().mindmaps[0]!.corpus[0]!.enabled).toBe(false) - - toggleCorpusEntry(mm.id, 'c1', true) - expect(useMindmapStore.getState().mindmaps[0]!.corpus[0]!.enabled).toBe(true) - }) - - it('updates a corpus entry note', () => { - const { addMindmap, addCorpusEntry, updateCorpusEntryNote } = useMindmapStore.getState() - const mm = addMindmap('Test') - - addCorpusEntry(mm.id, { - id: 'c1', - messageId: 'msg-1', - selectedText: undefined as string | undefined, - enabled: true, - addedAt: 1000, - }) - - updateCorpusEntryNote(mm.id, 'c1', 'This is a note') - expect(useMindmapStore.getState().mindmaps[0]!.corpus[0]!.note).toBe('This is a note') - }) - - it('clears all corpus entries', () => { - const { addMindmap, addCorpusEntry, clearCorpus } = useMindmapStore.getState() - const mm = addMindmap('Test') - - addCorpusEntry(mm.id, { - id: 'c1', - messageId: 'msg-1', - selectedText: undefined as string | undefined, - enabled: true, - addedAt: 1000, - }) - addCorpusEntry(mm.id, { - id: 'c2', - messageId: 'msg-2', - selectedText: undefined as string | undefined, - enabled: true, - addedAt: 1001, - }) - - clearCorpus(mm.id) - - expect(useMindmapStore.getState().mindmaps[0]!.corpus).toEqual([]) - }) - - it('adds batch corpus entries', () => { - const { addMindmap, addBatchCorpusEntries } = useMindmapStore.getState() - const mm = addMindmap('Test') - - addBatchCorpusEntries(mm.id, [ - { - id: 'c1', - messageId: 'msg-1', - selectedText: undefined as string | undefined, - enabled: true, - addedAt: 1000, - }, - { - id: 'c2', - messageId: 'msg-2', - selectedText: undefined as string | undefined, - enabled: true, - addedAt: 1001, - }, - { - id: 'c3', - messageId: 'msg-3', - selectedText: undefined as string | undefined, - enabled: true, - addedAt: 1002, - }, - ]) - - const state = useMindmapStore.getState() - expect(state.mindmaps[0]!.corpus).toHaveLength(3) - expect(state.mindmaps[0]!.corpus[0]!.id).toBe('c1') - expect(state.mindmaps[0]!.corpus[1]!.id).toBe('c2') - expect(state.mindmaps[0]!.corpus[2]!.id).toBe('c3') - }) -}) - describe('monitored conversations', () => { beforeEach(() => { useMindmapStore.setState({ diff --git a/src/stores/mindmapStore.ts b/src/stores/mindmapStore.ts index 4ee906b..f131dcf 100644 --- a/src/stores/mindmapStore.ts +++ b/src/stores/mindmapStore.ts @@ -2,7 +2,7 @@ import { create } from 'zustand' import { persist } from 'zustand/middleware' import { createJSONStorage } from 'zustand/middleware' import { createIndexedDBStorage } from '@/lib/indexeddb-storage-adapter' -import type { CorpusEntry, MindMap, MindMapNode } from '../types/mindmap' +import type { MindMap, MindMapNode } from '../types/mindmap' interface MindMapState { mindmaps: MindMap[] @@ -11,16 +11,6 @@ interface MindMapState { removeMindmap: (id: string) => void updateMindmapTree: (id: string, tree: MindMapNode[]) => void updateMindmapTitle: (id: string, title: string) => void - updateMindmapSettings: ( - id: string, - settings: { - generatorProviderId?: string - generatorModelId?: string - maxDepth?: number - forceFullRebuild?: boolean - lastGeneratedAt?: number - }, - ) => void setActiveMindmapId: (id: string | null) => void getActiveMindmap: () => MindMap | null updateNode: ( @@ -32,14 +22,8 @@ interface MindMapState { deleteNode: (mindmapId: string, nodeId: string) => void moveNode: (mindmapId: string, nodeId: string, direction: 'up' | 'down') => void reparentNode: (mindmapId: string, nodeId: string, newParentId: string) => void - addCorpusEntry: (mindmapId: string, entry: CorpusEntry) => void - removeCorpusEntry: (mindmapId: string, entryId: string) => void - toggleCorpusEntry: (mindmapId: string, entryId: string, enabled: boolean) => void - updateCorpusEntryNote: (mindmapId: string, entryId: string, note: string) => void - clearCorpus: (mindmapId: string) => void addMonitoredConversation: (mindmapId: string, conversationId: string) => void removeMonitoredConversation: (mindmapId: string, conversationId: string) => void - addBatchCorpusEntries: (mindmapId: string, entries: CorpusEntry[]) => void setCollapsedNodeIds: (id: string, nodeIds: string[]) => void } @@ -112,7 +96,6 @@ export const useMindmapStore = create()( id: generateId(), title, tree: [], - corpus: [], monitoredConversationIds: [], createdAt: now, updatedAt: now, @@ -151,36 +134,6 @@ export const useMindmapStore = create()( })) }, - updateMindmapSettings: (id, settings) => { - set((state) => ({ - mindmaps: state.mindmaps.map((m) => - m.id === id - ? { - ...m, - generatorProviderId: - settings.generatorProviderId !== undefined - ? settings.generatorProviderId - : m.generatorProviderId, - generatorModelId: - settings.generatorModelId !== undefined - ? settings.generatorModelId - : m.generatorModelId, - maxDepth: settings.maxDepth !== undefined ? settings.maxDepth : m.maxDepth, - forceFullRebuild: - settings.forceFullRebuild !== undefined - ? settings.forceFullRebuild - : m.forceFullRebuild, - lastGeneratedAt: - settings.lastGeneratedAt !== undefined - ? settings.lastGeneratedAt - : m.lastGeneratedAt, - updatedAt: Date.now(), - } - : m, - ), - })) - }, - setActiveMindmapId: (id) => set({ activeMindmapId: id }), getActiveMindmap: () => { @@ -198,9 +151,6 @@ export const useMindmapStore = create()( ...node, ...patch, editedByUser: true, - sourceConversationIds: [], - sourceExcerpts: {}, - updatedAt: Date.now(), })), updatedAt: Date.now(), } @@ -324,70 +274,6 @@ export const useMindmapStore = create()( }) }, - addCorpusEntry: (mindmapId, entry) => { - set((state) => ({ - mindmaps: state.mindmaps.map((m) => - m.id === mindmapId - ? { ...m, corpus: [...(m.corpus ?? []), entry], updatedAt: Date.now() } - : m, - ), - })) - }, - - removeCorpusEntry: (mindmapId, entryId) => { - set((state) => ({ - mindmaps: state.mindmaps.map((m) => - m.id === mindmapId - ? { - ...m, - corpus: (m.corpus ?? []).filter((e) => e.id !== entryId), - lastGeneratedAt: undefined, - updatedAt: Date.now(), - } - : m, - ), - })) - }, - - toggleCorpusEntry: (mindmapId, entryId, enabled) => { - set((state) => ({ - mindmaps: state.mindmaps.map((m) => - m.id === mindmapId - ? { - ...m, - corpus: (m.corpus ?? []).map((e) => (e.id === entryId ? { ...e, enabled } : e)), - lastGeneratedAt: undefined, - updatedAt: Date.now(), - } - : m, - ), - })) - }, - - updateCorpusEntryNote: (mindmapId, entryId, note) => { - set((state) => ({ - mindmaps: state.mindmaps.map((m) => - m.id === mindmapId - ? { - ...m, - corpus: (m.corpus ?? []).map((e) => (e.id === entryId ? { ...e, note } : e)), - updatedAt: Date.now(), - } - : m, - ), - })) - }, - - clearCorpus: (mindmapId) => { - set((state) => ({ - mindmaps: state.mindmaps.map((m) => - m.id === mindmapId - ? { ...m, corpus: [], lastGeneratedAt: undefined, updatedAt: Date.now() } - : m, - ), - })) - }, - addMonitoredConversation: (mindmapId, conversationId) => { set((state) => ({ mindmaps: state.mindmaps.map((m) => @@ -422,16 +308,6 @@ export const useMindmapStore = create()( })) }, - addBatchCorpusEntries: (mindmapId, entries) => { - set((state) => ({ - mindmaps: state.mindmaps.map((m) => - m.id === mindmapId - ? { ...m, corpus: [...(m.corpus ?? []), ...entries], updatedAt: Date.now() } - : m, - ), - })) - }, - setCollapsedNodeIds: (id, nodeIds) => { set((state) => ({ mindmaps: state.mindmaps.map((m) => @@ -442,7 +318,7 @@ export const useMindmapStore = create()( }), { name: 'mindmap-store', - version: 2, + version: 3, storage: createJSONStorage(() => createIndexedDBStorage()), }, ), diff --git a/src/types/index.ts b/src/types/index.ts index 2c18b77..8c1dc5a 100644 --- a/src/types/index.ts +++ b/src/types/index.ts @@ -1,11 +1,4 @@ export type { Provider, Model } from './provider' export type { Message, MessageRole, MessageStatus } from './message' export type { Conversation } from './conversation' -export type { - MindMap, - MindMapNode, - CorpusEntry, - IncrementalOperation, - IncrementalResult, - ChangeRecord, -} from './mindmap' +export type { MindMap, MindMapNode } from './mindmap' diff --git a/src/types/mindmap.ts b/src/types/mindmap.ts index 8adad44..dc471e8 100644 --- a/src/types/mindmap.ts +++ b/src/types/mindmap.ts @@ -10,67 +10,12 @@ export interface MindMapNode { editedByUser: boolean } -export interface CorpusEntry { - id: string - messageId: string - selectedText?: string - range?: { start: number; end: number } - note?: string - enabled: boolean - addedAt: number -} - -export interface OperationNodeContent { - label: string - summary: string - content?: string - contentType?: 'text' | 'markdown' -} - -export interface OperationNodeChanges { - label?: string - summary?: string - content?: string - contentType?: 'text' | 'markdown' -} - -export type IncrementalOperation = - | { op: 'add_child'; parent_id: string; node: OperationNodeContent } - | { op: 'update'; node_id: string; changes: OperationNodeChanges } - | { op: 'merge'; from_id: string; to_id: string } - | { op: 'delete_leaf'; node_id: string } - | { op: 'noop' } - -export interface IncrementalResult { - analysis: string - operations: IncrementalOperation[] -} - -export interface ChangeRecord { - op: IncrementalOperation['op'] - nodeId: string - description: string - timestamp: number -} - -export interface ValidationWarning { - type: 'duplicate' | 'empty-label' | 'depth-exceeded' | 'breadth-exceeded' - nodeLabel: string - message: string -} - export interface MindMap { id: string title: string tree: MindMapNode[] - corpus: CorpusEntry[] monitoredConversationIds: string[] collapsedNodeIds?: string[] - maxDepth?: number - generatorProviderId?: string - generatorModelId?: string - forceFullRebuild?: boolean - lastGeneratedAt?: number createdAt: number updatedAt: number }