-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathkernelweave_gui.py
More file actions
442 lines (371 loc) · 19.4 KB
/
kernelweave_gui.py
File metadata and controls
442 lines (371 loc) · 19.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
"""
KernelWeave Glass-Panel GUI (Rupert OS - Agentic Edition)
=========================================================
A completely rebuilt GUI for KernelWeave, renamed to Rupert.
Features:
- No visualizer (as requested).
- 4 Panels:
1. Prompt Terminal (Left)
2. Active Kernel Display (Right Top)
3. Active Tool Display (Right Middle)
4. Command Terminal (Right Bottom) - Shows commands executing in realtime!
- Strict read-only terminals.
- Concise system prompt to stop wasting tokens.
- **Agentic ReAct Loop**: Automatically executes tools and feeds results back!
- **Model Dropdown**: Lists available local models!
- **Preinstalled Skills**: Auto-installs Playwright and adds browser capability!
"""
import os
import sys
import time
import json
import threading
import queue
import re
import subprocess
from pathlib import Path
import tkinter as tk
from tkinter import scrolledtext, messagebox, ttk
# Ensure kernelweave is importable
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from kernelweave.kernel import KernelStore
from kernelweave.runtime import ExecutionEngine, KernelRuntime
from kernelweave_ollama import get_ollama_models, tool_web_search, tool_run_command, tool_read_file, tool_write_file, tool_list_dir
def ensure_dependency(package_name, import_name):
import importlib
try:
importlib.import_module(import_name)
return True
except ImportError:
print(f"[Setup] Installing missing dependency: {package_name}...")
try:
subprocess.run([sys.executable, "-m", "pip", "install", package_name], check=True)
return True
except Exception as e:
print(f"[Setup] Failed to install via venv pip, trying system pip...")
try:
subprocess.run(["pip", "install", package_name], check=True)
return True
except Exception as e2:
print(f"[Setup] Failed to install {package_name}: {e2}")
return False
# Auto-install playwright
ensure_dependency("playwright", "playwright")
def tool_browser_browse(url):
try:
from playwright.sync_api import sync_playwright
with sync_playwright() as p:
# Use headed mode so the user can see it!
try:
browser = p.chromium.launch(headless=False)
except Exception:
# If browsers are not installed, try to install them
print("[Setup] Installing Chromium binaries...")
subprocess.run([sys.executable, "-m", "playwright", "install", "chromium"], check=True)
browser = p.chromium.launch(headless=False)
page = browser.new_page()
page.goto(url)
time.sleep(2) # Give it a second to be seen
title = page.title()
# Get a snippet of content
content = page.evaluate("() => document.body.innerText")[:500]
browser.close()
return f"Successfully browsed {url}.\nTitle: {title}\nContent Snippet: {content}..."
except Exception as e:
return f"Browser error: {e}"
# Map of tools
TOOLS = {
"web_search": tool_web_search,
"run_command": tool_run_command,
"read_file": tool_read_file,
"write_file": tool_write_file,
"list_dir": tool_list_dir,
"browser_browse": tool_browser_browse
}
# ── Theme Colors (Electric Obsidian) ───────────────────────────
BG_COLOR = "#020508" # Very deep obsidian blue-black
SURFACE_COLOR = "#07111e" # Dark slate blue for panels
TEXT_COLOR = "#a5c4ec" # Tech blue text
ACCENT_CYAN = "#00f0ff" # Cyan
ACCENT_ORANGE = "#ff5500" # Warning Orange
ACCENT_GREEN = "#00ff66" # Success Green
DIM_COLOR = "#102a45" # Border color
SYSTEM_PROMPT = """You are Rupert, an advanced autonomous AI operating system.
You are running on a local neuro-symbolic stack.
CRITICAL: Do not waste tokens writing explanations or bullshit. Be extremely concise.
If you need to use a tool, output the JSON tool call IMMEDIATELY. Do not explain why.
Wait for the tool execution result before continuing.
You must use tools by outputting a JSON object. For example:
```json
{
"tool": "browser_browse",
"args": {"url": "https://www.google.com"}
}
```
Available tools: `browser_browse`, `web_search`, `run_command`, `read_file`, `write_file`, `list_dir`.
"""
class RupertGUI:
def __init__(self, root):
self.root = root
self.root.title("Rupert OS - KernelWeave")
self.root.geometry("1400x850")
self.root.configure(bg=BG_COLOR)
# Backend state
self.runtime = None
self.store = None
self.stop_requested = False
self.executing = False
self.conversation_history = []
# Queue for thread communication
self.msg_queue = queue.Queue()
# Presets
self.presets = {
"OLLAMA (Local)": "http://127.0.0.1:11434",
"OPENAI": "https://api.openai.com/v1",
"GEMINI": "https://generativelanguage.googleapis.com/v1"
}
# Setup UI
self.create_layout()
# Load store and scan models
self.initialize_engine()
# Start poll loop
self.root.after(100, self.poll_queue)
def create_layout(self):
self.main_pane = tk.Frame(self.root, bg=BG_COLOR)
self.main_pane.pack(fill='both', expand=True, padx=20, pady=20)
# ── LEFT PANEL (Prompt Terminal & Controls) ───────────────────
self.left_panel = tk.Frame(self.main_pane, bg=BG_COLOR)
self.left_panel.pack(side='left', fill='both', expand=True, padx=(0, 10))
# Model Selector (Dropdown!)
tk.Label(self.left_panel, text="// CORE MODEL", fg=ACCENT_CYAN, bg=BG_COLOR, font=('Courier', 10, 'bold')).pack(anchor='w')
self.model_var = tk.StringVar()
self.model_dropdown = ttk.Combobox(self.left_panel, textvariable=self.model_var, font=('Courier', 12))
self.model_dropdown.pack(fill='x', pady=(2, 10))
# Endpoint Presets
tk.Label(self.left_panel, text="// ENDPOINT PRESETS", fg=ACCENT_CYAN, bg=BG_COLOR, font=('Courier', 10, 'bold')).pack(anchor='w')
preset_frame = tk.Frame(self.left_panel, bg=BG_COLOR)
preset_frame.pack(fill='x', pady=(2, 10))
for name, url in self.presets.items():
btn = tk.Button(preset_frame, text=name.split()[0], bg=DIM_COLOR, fg=TEXT_COLOR, font=('Courier', 8, 'bold'), borderwidth=0, padx=5, pady=5,
command=lambda u=url: self.set_endpoint(u))
btn.pack(side='left', padx=(0, 5))
# Endpoint URL
tk.Label(self.left_panel, text="// ENDPOINT URL", fg=ACCENT_CYAN, bg=BG_COLOR, font=('Courier', 10, 'bold')).pack(anchor='w')
self.url_entry = tk.Entry(self.left_panel, bg=SURFACE_COLOR, fg=TEXT_COLOR, font=('Courier', 12), borderwidth=1, relief='solid', insertbackground=TEXT_COLOR)
self.url_entry.pack(fill='x', pady=(2, 10), ipady=5)
self.url_entry.insert(0, "http://127.0.0.1:11434")
# Prompt Terminal
tk.Label(self.left_panel, text="// PROMPT TERMINAL", fg=ACCENT_CYAN, bg=BG_COLOR, font=('Courier', 10, 'bold')).pack(anchor='w')
self.log_area = scrolledtext.ScrolledText(
self.left_panel,
bg=SURFACE_COLOR,
fg=TEXT_COLOR,
font=('Courier', 10),
insertbackground=TEXT_COLOR,
wrap=tk.WORD,
borderwidth=1,
relief='solid',
state='disabled'
)
self.log_area.pack(fill='both', expand=True, pady=(2, 10))
self.log_area.tag_config('user', foreground=ACCENT_CYAN)
self.log_area.tag_config('bot', foreground=TEXT_COLOR)
self.log_area.tag_config('system', foreground=DIM_COLOR)
# Prompt Input
tk.Label(self.left_panel, text="// COMMAND INPUT", fg=ACCENT_CYAN, bg=BG_COLOR, font=('Courier', 10, 'bold')).pack(anchor='w')
self.prompt_entry = tk.Entry(self.left_panel, bg=SURFACE_COLOR, fg=TEXT_COLOR, font=('Courier', 12), borderwidth=1, relief='solid', insertbackground=TEXT_COLOR)
self.prompt_entry.pack(fill='x', pady=(2, 10), ipady=12)
self.prompt_entry.bind("<Return>", lambda e: self.send_prompt())
# Buttons
btn_frame = tk.Frame(self.left_panel, bg=BG_COLOR)
btn_frame.pack(fill='x')
self.btn_send = tk.Button(btn_frame, text="TRANSMIT", bg=ACCENT_CYAN, fg=BG_COLOR, font=('Courier', 10, 'bold'), borderwidth=0, padx=20, pady=10, command=self.send_prompt)
self.btn_send.pack(side='left', padx=(0, 10))
self.btn_stop = tk.Button(btn_frame, text="HALT", bg=ACCENT_ORANGE, fg=TEXT_COLOR, font=('Courier', 10, 'bold'), borderwidth=0, padx=20, pady=10, command=self.force_stop)
self.btn_stop.pack(side='left')
# ── RIGHT PANEL (Status & Commands) ───────────────────────────
self.right_panel = tk.Frame(self.main_pane, bg=BG_COLOR)
self.right_panel.pack(side='right', fill='both', expand=True, padx=(10, 0))
# Active Kernel Panel
tk.Label(self.right_panel, text="// ACTIVE KERNEL", fg=ACCENT_CYAN, bg=BG_COLOR, font=('Courier', 10, 'bold')).pack(anchor='w')
self.kernel_label = tk.Label(self.right_panel, text="None", fg=TEXT_COLOR, bg=SURFACE_COLOR, font=('Courier', 12), anchor='w', padx=10, pady=10, relief='solid', borderwidth=1)
self.kernel_label.pack(fill='x', pady=(2, 15))
# Active Tool Panel
tk.Label(self.right_panel, text="// ACTIVE TOOL", fg=ACCENT_CYAN, bg=BG_COLOR, font=('Courier', 10, 'bold')).pack(anchor='w')
self.tool_label = tk.Label(self.right_panel, text="None", fg=TEXT_COLOR, bg=SURFACE_COLOR, font=('Courier', 12), anchor='w', padx=10, pady=10, relief='solid', borderwidth=1)
self.tool_label.pack(fill='x', pady=(2, 15))
# Command Terminal (Shows commands executing in realtime!)
tk.Label(self.right_panel, text="// COMMAND TERMINAL", fg=ACCENT_CYAN, bg=BG_COLOR, font=('Courier', 10, 'bold')).pack(anchor='w')
self.cmd_area = scrolledtext.ScrolledText(
self.right_panel,
bg=SURFACE_COLOR,
fg=ACCENT_GREEN,
font=('Courier', 10),
insertbackground=TEXT_COLOR,
wrap=tk.WORD,
borderwidth=1,
relief='solid',
state='disabled'
)
self.cmd_area.pack(fill='both', expand=True, pady=(2, 0))
def initialize_engine(self):
self.append_log("Rupert: Initializing Core Systems...", "system")
try:
self.store = KernelStore(Path("store"))
self.runtime = KernelRuntime(self.store, use_embeddings=True)
self.append_log(f"Rupert: Store online. {len(self.store.list_kernels())} kernels loaded.", "system")
# Load models into dropdown
models = get_ollama_models()
if models:
self.model_dropdown['values'] = models
if "granite4.1:8b" in models:
self.model_dropdown.set("granite4.1:8b")
else:
self.model_dropdown.set(models[0])
else:
self.model_dropdown['values'] = ["granite4.1:8b"]
self.model_dropdown.set("granite4.1:8b")
except Exception as e:
self.append_log(f"Error initializing core: {e}", "system")
def send_prompt(self):
selected = self.model_var.get().strip()
if not selected:
messagebox.showerror("Error", "Select model.")
return
if self.executing: return
prompt = self.prompt_entry.get().strip()
if not prompt: return
self.append_log(f"\nUser: {prompt}", "user")
self.prompt_entry.delete(0, tk.END)
self.executing = True
self.stop_requested = False
threading.Thread(target=self.async_execute, args=(prompt, selected), daemon=True).start()
def async_execute(self, prompt, selected):
try:
# 1. Routing
plan = self.runtime.run(prompt)
mode = plan['mode']
kernel_id = plan.get('kernel_id', 'None')
self.msg_queue.put(('update_kernel', kernel_id))
# 2. Execution Loop (Agentic ReAct!)
self.msg_queue.put(('log', "Rupert > ", "bot"))
base_url = self.url_entry.get().strip()
url = f"{base_url}/api/generate"
history_text = "\n".join(self.conversation_history[-4:]) if self.conversation_history else ""
current_prompt = f"{SYSTEM_PROMPT}\n\nRecent History:\n{history_text}\n\nUser: {prompt}"
max_iterations = 3
for i in range(max_iterations):
if self.stop_requested: break
import urllib.request
body = {"model": selected, "prompt": current_prompt, "stream": True}
req = urllib.request.Request(url, data=json.dumps(body).encode('utf-8'), headers={"content-type": "application/json"})
full_response = ""
try:
self.msg_queue.put(('update_tool', "Thinking..."))
first_token = True
with urllib.request.urlopen(req, timeout=30) as response:
for line in response:
if self.stop_requested: break
if line:
chunk = json.loads(line.decode('utf-8'))
# Support standard response or reasoning/thinking fields
token = chunk.get("response", "")
reasoning = chunk.get("reasoning_content", "") or chunk.get("thinking", "")
# If there is reasoning content, prioritize it or prepend it
if reasoning:
token = reasoning
if first_token and token.strip():
self.msg_queue.put(('update_tool', "None"))
first_token = False
full_response += token
self.msg_queue.put(('stream', token))
self.msg_queue.put(('stream', "\n"))
# Check for tool calls
blocks = re.findall(r'```json\s*(.*?)\s*```', full_response, re.DOTALL)
if not blocks:
if full_response.strip().startswith("{") and full_response.strip().endswith("}"):
blocks = [full_response.strip()]
if blocks:
try:
data = json.loads(blocks[0])
if "tool" in data and "args" in data:
tool = data["tool"]
args = data["args"]
self.msg_queue.put(('update_tool', tool))
self.msg_queue.put(('cmd', f"Executing tool: {tool}"))
# Execute tool
result = self.run_tool(tool, args)
self.msg_queue.put(('cmd', f"Result: {result}"))
# Feed back to model!
current_prompt += f"\n\nResponse:\n{full_response}\n\nTool Result ({tool}):\n{result}\n\nPlease continue based on this result."
self.msg_queue.put(('log', f"Rupert [Continuing loop {i+1}] > ", "bot"))
continue
except Exception as e:
self.msg_queue.put(('cmd', f"Failed to parse or execute tool: {e}"))
break
except Exception as e:
self.msg_queue.put(('log', f"Ollama error: {e}", "system"))
break
self.conversation_history.append(f"User: {prompt}")
self.conversation_history.append(f"Assistant: {full_response}")
self.msg_queue.put(('done',))
except Exception as e:
self.msg_queue.put(('log', f"Error: {e}", "system"))
self.msg_queue.put(('done',))
def run_tool(self, tool, args):
try:
if tool in TOOLS:
# Resolve /tmp on windows
for k, v in args.items():
if isinstance(v, str) and v.startswith("/tmp/"):
args[k] = v.replace("/tmp/", "e:/kernelweave/store/")
# Fix xdg-open on windows
if tool == "run_command":
cmd = args.get("command")
if cmd and "xdg-open" in cmd:
args["command"] = cmd.replace("xdg-open", "start")
result = TOOLS[tool](**args)
return result
return f"Tool '{tool}' not found in registry."
except Exception as e:
return f"Error executing tool: {e}"
def poll_queue(self):
try:
while True:
msg = self.msg_queue.get_nowait()
msg_type = msg[0]
if msg_type == 'log':
self.append_log(msg[1], msg[2] if len(msg) > 2 else "bot")
elif msg_type == 'stream':
self.log_area.config(state='normal')
self.log_area.insert(tk.END, msg[1], 'bot')
self.log_area.config(state='disabled')
self.log_area.see(tk.END)
elif msg_type == 'update_kernel':
self.kernel_label.config(text=msg[1])
elif msg_type == 'update_tool':
self.tool_label.config(text=msg[1])
elif msg_type == 'cmd':
self.cmd_area.config(state='normal')
self.cmd_area.insert(tk.END, msg[1] + "\n")
self.cmd_area.config(state='disabled')
self.cmd_area.see(tk.END)
elif msg_type == 'done':
self.executing = False
self.tool_label.config(text="None")
self.msg_queue.task_done()
except queue.Empty:
pass
self.root.after(100, self.poll_queue)
def append_log(self, text, tag="bot"):
self.log_area.config(state='normal')
self.log_area.insert(tk.END, text + "\n", tag)
self.log_area.config(state='disabled')
self.log_area.see(tk.END)
def force_stop(self):
self.stop_requested = True
self.append_log("\n[SYSTEM] Emergency Halt requested.", "system")
if __name__ == "__main__":
root = tk.Tk()
app = RupertGUI(root)
root.mainloop()