Skip to content

Commit 4148b5d

Browse files
Shukriclaude
authored andcommitted
feat: multi-model AI analysis with auto-selection by context
Backend (src/ai.rs, src/routes/analysis.rs): - Add Model enum: Haiku 4.5 / Sonnet 4.6 / Opus 4.6 with api_id, display_name, max_tokens (512 / 1024 / 2048) - Add AnalysisContext struct + select_model() function: - Haiku: vitals events, info-level, < 5 frames, high-frequency (>500 events) - Opus: chained exceptions, > 30 frames, critical error with > 20 frames - Sonnet: everything else (balanced default) - analyse_issue() accepts &Model and adjusts depth instruction per tier - POST /api/issues/:id/analyze accepts optional JSON body { "model": "auto"|"haiku"|"sonnet"|"opus" } - Response includes model, model_auto (bool), model_reason (why auto chose it) - GET /api/issues/:id/analyze returns model_auto + model_reason from cache - Migration: add model_auto + model_reason columns to ai_analyses Frontend (IssueDetails.vue): - Model picker toggle bar: Auto / Haiku / Sonnet / Opus (with tooltips) - Sends chosen model in POST body; defaults to "auto" - Result footer shows auto-selection reason (italic), model badge colour-coded (sky=Haiku, violet=Sonnet, amber=Opus), auto-selected vs manual indicator Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
1 parent 2959c76 commit 4148b5d

15 files changed

Lines changed: 310 additions & 83 deletions

dashboard/src/views/IssueDetails.vue

Lines changed: 84 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -528,26 +528,45 @@
528528

529529
<!-- ── AI Analysis panel ──────────────────────────────────────────── -->
530530
<div class="mb-6">
531-
<div class="flex items-center justify-between mb-3">
531+
<div class="flex items-center justify-between mb-3 gap-3 flex-wrap">
532532
<h2 class="text-[11px] text-gray-500 uppercase tracking-wide font-medium flex items-center gap-1.5">
533533
<span>AI Analysis</span>
534534
<span v-if="ai?.cached" class="text-[9px] px-1.5 py-0.5 rounded bg-gray-700/60 text-gray-500 font-bold tracking-wider">cached</span>
535535
</h2>
536-
<button
537-
@click="runAnalysis"
538-
:disabled="aiLoading"
539-
class="flex items-center gap-1.5 text-[11px] px-3 py-1.5 rounded-lg font-medium transition-all
540-
bg-violet-600/20 text-violet-400 hover:bg-violet-600/30 disabled:opacity-50 disabled:cursor-not-allowed"
541-
>
542-
<svg v-if="aiLoading" class="animate-spin w-3 h-3" viewBox="0 0 24 24" fill="none">
543-
<circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4"/>
544-
<path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8v8H4z"/>
545-
</svg>
546-
<svg v-else width="12" height="12" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
547-
<path d="M12 2L2 7l10 5 10-5-10-5zM2 17l10 5 10-5M2 12l10 5 10-5"/>
548-
</svg>
549-
{{ aiLoading ? 'Analysing…' : ai ? 'Re-analyse' : 'Analyse with AI' }}
550-
</button>
536+
537+
<div class="flex items-center gap-2 ml-auto">
538+
<!-- Model picker -->
539+
<div class="flex items-center bg-[#111119] border border-white/6 rounded-lg p-0.5 gap-0.5">
540+
<button
541+
v-for="m in MODEL_OPTIONS" :key="m.value"
542+
@click="selectedModel = m.value"
543+
:class="selectedModel === m.value
544+
? 'bg-[#1e1e30] text-white'
545+
: 'text-gray-500 hover:text-gray-300'"
546+
class="px-2.5 py-1 rounded-md text-[10px] font-medium transition-all whitespace-nowrap"
547+
:title="m.description"
548+
>
549+
{{ m.label }}
550+
</button>
551+
</div>
552+
553+
<!-- Analyse button -->
554+
<button
555+
@click="runAnalysis"
556+
:disabled="aiLoading"
557+
class="flex items-center gap-1.5 text-[11px] px-3 py-1.5 rounded-lg font-medium transition-all
558+
bg-violet-600/20 text-violet-400 hover:bg-violet-600/30 disabled:opacity-50 disabled:cursor-not-allowed"
559+
>
560+
<svg v-if="aiLoading" class="animate-spin w-3 h-3" viewBox="0 0 24 24" fill="none">
561+
<circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4"/>
562+
<path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8v8H4z"/>
563+
</svg>
564+
<svg v-else width="12" height="12" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
565+
<path d="M12 2L2 7l10 5 10-5-10-5zM2 17l10 5 10-5M2 12l10 5 10-5"/>
566+
</svg>
567+
{{ aiLoading ? 'Analysing…' : ai ? 'Re-analyse' : 'Analyse with AI' }}
568+
</button>
569+
</div>
551570
</div>
552571

553572
<div v-if="aiError" class="bg-red-500/10 border border-red-500/20 rounded-xl px-4 py-3 text-sm text-red-400">
@@ -580,13 +599,32 @@
580599
<p class="text-[10px] text-gray-600 uppercase tracking-wide font-medium mb-1.5">Prevention</p>
581600
<p class="text-[13px] text-gray-400 leading-relaxed">{{ ai.prevention }}</p>
582601
</div>
583-
<p class="text-[10px] text-gray-700 text-right">
584-
Powered by {{ ai.model }} · {{ ai.cached ? 'cached result' : 'fresh analysis' }}
585-
</p>
602+
603+
<!-- Model footer -->
604+
<div class="flex items-center justify-between flex-wrap gap-2">
605+
<div v-if="ai.model_auto && ai.model_reason"
606+
class="flex items-center gap-1.5 text-[10px] text-gray-600">
607+
<svg width="10" height="10" viewBox="0 0 16 16" fill="none" stroke="currentColor" stroke-width="1.8">
608+
<circle cx="8" cy="8" r="6"/><line x1="8" y1="5" x2="8" y2="8"/><circle cx="8" cy="11" r="0.5" fill="currentColor"/>
609+
</svg>
610+
<span class="italic">{{ ai.model_reason }}</span>
611+
</div>
612+
<p class="text-[10px] text-gray-700 ml-auto">
613+
<span :class="modelBadgeColor(ai.model)" class="font-semibold">{{ ai.model }}</span>
614+
· {{ ai.model_auto ? 'auto-selected' : 'manual' }}
615+
· {{ ai.cached ? 'cached' : 'fresh' }}
616+
</p>
617+
</div>
586618
</div>
587619

588620
<div v-else class="bg-[#111119] border border-white/6 rounded-xl px-4 py-6 text-center">
589-
<p class="text-[13px] text-gray-500">Click <span class="text-violet-400 font-medium">Analyse with AI</span> to get root cause, fix suggestions, and prevention tips powered by Claude.</p>
621+
<p class="text-[13px] text-gray-500">
622+
Click <span class="text-violet-400 font-medium">Analyse with AI</span> to get root cause,
623+
fix suggestions, and prevention tips.
624+
</p>
625+
<p class="text-[11px] text-gray-700 mt-1">
626+
Model: <span class="text-gray-500">{{ selectedModel === 'auto' ? 'auto-selected based on issue complexity' : selectedModelLabel }}</span>
627+
</p>
590628
</div>
591629
</div>
592630

@@ -608,7 +646,7 @@
608646
</template>
609647

610648
<script setup>
611-
import { ref, onMounted } from 'vue'
649+
import { computed, ref, onMounted } from 'vue'
612650
import { useRoute, useRouter } from 'vue-router'
613651
import axios from 'axios'
614652
@@ -619,6 +657,27 @@ const loading = ref(true)
619657
const ai = ref(null)
620658
const aiLoading = ref(false)
621659
const aiError = ref(null)
660+
661+
// ── AI model selection ────────────────────────────────────────────────────────
662+
const MODEL_OPTIONS = [
663+
{ value: 'auto', label: 'Auto', description: 'Automatically pick the best model based on issue complexity' },
664+
{ value: 'haiku', label: 'Haiku', description: 'Claude Haiku 4.5 — fast, great for simple/high-frequency errors' },
665+
{ value: 'sonnet', label: 'Sonnet', description: 'Claude Sonnet 4.6 — balanced accuracy and speed (recommended)' },
666+
{ value: 'opus', label: 'Opus', description: 'Claude Opus 4.6 — most capable, best for deep/complex issues' },
667+
]
668+
const selectedModel = ref('auto')
669+
const selectedModelLabel = computed(() =>
670+
MODEL_OPTIONS.find(m => m.value === selectedModel.value)?.description ?? ''
671+
)
672+
673+
const modelBadgeColor = (model) => {
674+
if (!model) return 'text-gray-500'
675+
const m = model.toLowerCase()
676+
if (m.includes('haiku')) return 'text-sky-400'
677+
if (m.includes('sonnet')) return 'text-violet-400'
678+
if (m.includes('opus')) return 'text-amber-400'
679+
return 'text-gray-400'
680+
}
622681
const showAssignee = ref(false)
623682
const assigneeInput = ref('')
624683
const expandedPlugins = ref(new Set())
@@ -763,7 +822,10 @@ async function runAnalysis() {
763822
aiLoading.value = true
764823
aiError.value = null
765824
try {
766-
const { data } = await axios.post(`/api/issues/${route.params.id}/analyze`)
825+
const { data } = await axios.post(
826+
`/api/issues/${route.params.id}/analyze`,
827+
{ model: selectedModel.value },
828+
)
767829
ai.value = data
768830
} catch (err) {
769831
if (err.response?.status === 429) {
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
-- Add model selection metadata to ai_analyses
2+
ALTER TABLE ai_analyses
3+
ADD COLUMN IF NOT EXISTS model_auto BOOLEAN DEFAULT TRUE NOT NULL,
4+
ADD COLUMN IF NOT EXISTS model_reason TEXT DEFAULT NULL;

src/ai.rs

Lines changed: 126 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -4,36 +4,133 @@ use serde::{Deserialize, Serialize};
44
use serde_json::{json, Value};
55

66
const ANTHROPIC_API: &str = "https://api.anthropic.com/v1/messages";
7-
const MODEL: &str = "claude-sonnet-4-6";
7+
8+
// ── Model registry ────────────────────────────────────────────────────────────
9+
10+
#[derive(Debug, Clone, PartialEq)]
11+
pub enum Model {
12+
Haiku, // Fast, cheap — simple/common errors, vitals, high-frequency issues
13+
Sonnet, // Balanced — standard production errors (default)
14+
Opus, // Most capable — deep stacks, exception chains, critical issues
15+
}
16+
17+
impl Model {
18+
pub fn from_str(s: &str) -> Option<Self> {
19+
match s.to_lowercase().as_str() {
20+
"haiku" | "claude-haiku-4-5" => Some(Self::Haiku),
21+
"sonnet" | "claude-sonnet-4-6" => Some(Self::Sonnet),
22+
"opus" | "claude-opus-4-6" => Some(Self::Opus),
23+
_ => None,
24+
}
25+
}
26+
27+
pub fn api_id(&self) -> &'static str {
28+
match self {
29+
Self::Haiku => "claude-haiku-4-5-20251001",
30+
Self::Sonnet => "claude-sonnet-4-6",
31+
Self::Opus => "claude-opus-4-6",
32+
}
33+
}
34+
35+
pub fn display_name(&self) -> &'static str {
36+
match self {
37+
Self::Haiku => "Claude Haiku 4.5",
38+
Self::Sonnet => "Claude Sonnet 4.6",
39+
Self::Opus => "Claude Opus 4.6",
40+
}
41+
}
42+
43+
pub fn max_tokens(&self) -> u32 {
44+
match self {
45+
Self::Haiku => 512,
46+
Self::Sonnet => 1024,
47+
Self::Opus => 2048,
48+
}
49+
}
50+
}
51+
52+
// ── Auto-selection logic ──────────────────────────────────────────────────────
53+
54+
pub struct AnalysisContext {
55+
pub stacktrace_frames: usize,
56+
pub level: String,
57+
pub event_count: i64,
58+
pub has_exception_chain: bool,
59+
pub is_vitals: bool,
60+
}
61+
62+
/// Pick the most appropriate model based on issue complexity signals.
63+
/// Returns (Model, reason_string).
64+
pub fn select_model(ctx: &AnalysisContext) -> (Model, &'static str) {
65+
// Vitals events are pure metrics — arithmetic, not reasoning
66+
if ctx.is_vitals {
67+
return (Model::Haiku, "Performance vitals — metrics interpretation doesn't need deep reasoning");
68+
}
69+
70+
// Deep complexity signals → Opus
71+
if ctx.has_exception_chain {
72+
return (Model::Opus, "Chained exception detected — multi-layer cause analysis requires the most capable model");
73+
}
74+
if ctx.stacktrace_frames > 30 {
75+
return (Model::Opus, "Large stack trace — deep call-graph analysis benefits from extended reasoning");
76+
}
77+
if ctx.level == "error" && ctx.stacktrace_frames > 20 {
78+
return (Model::Opus, "Critical error with complex stack — thorough root-cause reasoning selected");
79+
}
80+
81+
// Simple signals → Haiku
82+
if ctx.is_vitals || ctx.level == "info" {
83+
return (Model::Haiku, "Info-level event — lightweight analysis is sufficient");
84+
}
85+
if ctx.stacktrace_frames < 5 {
86+
return (Model::Haiku, "Short stack trace — fast model is sufficient for simple errors");
87+
}
88+
if ctx.event_count > 500 {
89+
return (Model::Haiku, "High-frequency issue — well-known error pattern, fast analysis appropriate");
90+
}
91+
92+
// Default: balanced Sonnet for standard production errors
93+
(Model::Sonnet, "Standard production error — balanced model selected for accuracy and speed")
94+
}
95+
96+
// ── Analysis result ───────────────────────────────────────────────────────────
897

998
#[derive(Debug, Serialize, Deserialize)]
1099
pub struct AiAnalysis {
11-
pub root_cause: String,
12-
pub explanation: String,
13-
pub fix_suggestion: String,
14-
pub code_example: Option<String>,
15-
pub severity: String,
16-
pub prevention: Option<String>,
17-
pub model: String,
100+
pub root_cause: String,
101+
pub explanation: String,
102+
pub fix_suggestion: String,
103+
pub code_example: Option<String>,
104+
pub severity: String,
105+
pub prevention: Option<String>,
106+
pub model: String,
107+
pub model_auto: bool,
108+
pub model_reason: Option<String>,
18109
}
19110

20-
/// Analyse an error issue using Claude.
21-
/// `title` — issue title / exception type
22-
/// `stacktrace` — formatted stack trace string
23-
/// `platform` — e.g. "php", "javascript", "rust"
24-
/// `context` — optional extra context JSON
111+
// ── API call ──────────────────────────────────────────────────────────────────
112+
25113
pub async fn analyse_issue(
26-
client: &Client,
27-
api_key: &str,
28-
title: &str,
114+
client: &Client,
115+
api_key: &str,
116+
title: &str,
29117
stacktrace: &str,
30-
platform: &str,
31-
context: Option<&str>,
118+
platform: &str,
119+
context: Option<&str>,
120+
model: &Model,
121+
model_auto: bool,
122+
model_reason: Option<&str>,
32123
) -> Result<AiAnalysis, String> {
33124
let context_block = context
34125
.map(|c| format!("\n\n**Extra context:**\n```json\n{}\n```", c))
35126
.unwrap_or_default();
36127

128+
let depth_instruction = match model {
129+
Model::Haiku => "Be concise. One-sentence answers where possible.",
130+
Model::Sonnet => "Be thorough but focused. 2–4 sentences per field.",
131+
Model::Opus => "Be comprehensive. Trace the full call path, explain all contributing factors, and provide production-ready fix code.",
132+
};
133+
37134
let prompt = format!(
38135
r#"You are a senior software engineer specializing in debugging production errors.
39136
@@ -46,23 +143,25 @@ Analyze this error and respond with a JSON object (no markdown fences, raw JSON
46143
{stacktrace}
47144
```{context_block}
48145
146+
{depth_instruction}
147+
49148
Respond with exactly this JSON structure:
50149
{{
51150
"root_cause": "one-sentence root cause",
52-
"explanation": "2-4 sentences explaining what went wrong and why",
151+
"explanation": "explanation of what went wrong and why",
53152
"fix_suggestion": "concise actionable fix instructions",
54-
"code_example": "optional code snippet showing the fix, or null",
153+
"code_example": "code snippet showing the fix, or null",
55154
"severity": "critical | high | medium | low",
56-
"prevention": "optional tip to prevent this class of error in future, or null"
155+
"prevention": "tip to prevent this class of error in future, or null"
57156
}}
58157
59158
Be specific to the actual stack trace. Do not hallucinate file names or functions not present in the trace."#
60159
);
61160

62161
let body = json!({
63-
"model": MODEL,
64-
"max_tokens": 1024,
65-
"messages": [{ "role": "user", "content": prompt }]
162+
"model": model.api_id(),
163+
"max_tokens": model.max_tokens(),
164+
"messages": [{ "role": "user", "content": prompt }]
66165
});
67166

68167
let resp = client
@@ -89,7 +188,6 @@ Be specific to the actual stack trace. Do not hallucinate file names or function
89188
.and_then(|c| c["text"].as_str())
90189
.ok_or("empty response from Claude")?;
91190

92-
// Strip any accidental markdown code fences
93191
let clean = text
94192
.trim()
95193
.trim_start_matches("```json")
@@ -107,6 +205,8 @@ Be specific to the actual stack trace. Do not hallucinate file names or function
107205
code_example: parsed["code_example"].as_str().map(String::from),
108206
severity: parsed["severity"].as_str().unwrap_or("medium").to_string(),
109207
prevention: parsed["prevention"].as_str().map(String::from),
110-
model: MODEL.to_string(),
208+
model: model.display_name().to_string(),
209+
model_auto,
210+
model_reason: model_reason.map(String::from),
111211
})
112212
}

0 commit comments

Comments
 (0)