Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion aider/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from packaging import version

__version__ = "0.90.4.dev"
__version__ = "0.90.5.dev"
safe_version = __version__

try:
Expand Down
2 changes: 1 addition & 1 deletion aider/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ def get_parser(default_config_files, git_root):
)
group.add_argument(
"--context-compaction-max-tokens",
type=int,
type=float,
default=None,
help=(
"The maximum number of tokens in the conversation before context compaction is"
Expand Down
1 change: 1 addition & 0 deletions aider/coders/agent_prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ class AgentPrompts(CoderPrompts):
- **Act Proactively**: Autonomously use file discovery and context management tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `View`, `Remove`) to gather information and fulfill the user's request. Chain tool calls across multiple turns to continue exploration.
- **Be Decisive**: Trust that your initial findings are valid. Refrain from asking the same question or searching for the same term in multiple similar ways.
- **Be Concise**: Keep all responses brief and direct (1-3 sentences). Avoid preamble, postamble, and unnecessary explanations. Do not repeat yourself.
- **Be Careful**: Break updates down into smaller, more manageable chunks. Focus on one thing at a time.
</context>

<context name="workflow_and_tool_usage">
Expand Down
10 changes: 2 additions & 8 deletions aider/coders/base_coder.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,6 @@ def __init__(
context_compaction_summary_tokens=8192,
map_cache_dir=".",
repomap_in_memory=False,
preserve_todo_list=False,
linear_output=False,
):
# initialize from args.map_cache_dir
Expand All @@ -321,13 +320,6 @@ def __init__(

self.auto_copy_context = auto_copy_context
self.auto_accept_architect = auto_accept_architect
self.preserve_todo_list = preserve_todo_list

if self.preserve_todo_list:
self.io.tool_warning(
"--preserve-todo-list is deprecated; todo lists are now saved and restored with"
" sessions. The flag will be removed in a future release."
)

self.ignore_mentions = ignore_mentions
if not self.ignore_mentions:
Expand Down Expand Up @@ -2344,6 +2336,8 @@ async def send_message(self, inp):
return
except Exception as e:
self.io.tool_error(f"Error processing tool calls: {str(e)}")
self.reflected_message = True
return
# Continue without tool processing

self.num_tool_calls = 0
Expand Down
6 changes: 5 additions & 1 deletion aider/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -1414,7 +1414,11 @@ def replace_json(match):

# Match b'{...}', b"[...]", '{...}', "[...]"
# Handle escaped quotes with (?<!\\)
text = re.sub(r"b?(['\"])([\{\[].*?)(?<!\\)\1", replace_json, text, flags=re.DOTALL)
try:
new_text = re.sub(r"b?(['\"])([\{\[].*?)(?<!\\)\1", replace_json, text, flags=re.DOTALL)
return new_text
except Exception:
pass

return text

Expand Down
12 changes: 9 additions & 3 deletions aider/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -1193,10 +1193,17 @@ def apply_model_overrides(model_name):
else:
map_tokens = args.map_tokens

if args.enable_context_compaction and args.context_compaction_max_tokens is None:
if args.enable_context_compaction and (
args.context_compaction_max_tokens is None or args.context_compaction_max_tokens < 1
):
max_input_tokens = main_model.info.get("max_input_tokens")
ratio = 0.8

if args.context_compaction_max_tokens:
ratio = args.context_compaction_max_tokens

if max_input_tokens:
args.context_compaction_max_tokens = int(max_input_tokens * 0.8)
args.context_compaction_max_tokens = int(max_input_tokens * ratio)

try:
# Load MCP servers from config string or file
Expand Down Expand Up @@ -1249,7 +1256,6 @@ def apply_model_overrides(model_name):
context_compaction_summary_tokens=args.context_compaction_summary_tokens,
map_cache_dir=args.map_cache_dir,
repomap_in_memory=args.map_memory_cache,
preserve_todo_list=args.preserve_todo_list,
linear_output=args.linear_output,
)

Expand Down
2 changes: 1 addition & 1 deletion aider/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@
class ModelSettings:
# Model class needs to have each of these as well
name: str
edit_format: str = "whole"
edit_format: str = "diff"
weak_model_name: Optional[str] = None
use_repo_map: bool = False
send_undo_reply: bool = False
Expand Down
4 changes: 2 additions & 2 deletions aider/tools/insert_block.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ class Tool(BaseTool):
"function": {
"name": "InsertBlock",
"description": (
"Insert a block of content into a file. Only use one of: after_pattern,"
" before_pattern, position."
"Insert a block of content into a file. Mutually Exclusive Parameters:"
" after_pattern, before_pattern, position."
),
"parameters": {
"type": "object",
Expand Down
4 changes: 2 additions & 2 deletions aider/tools/show_numbered_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ class Tool(BaseTool):
"function": {
"name": "ShowNumberedContext",
"description": (
"Show numbered lines of context around a pattern or line number. Only use one of:"
" pattern, line_number"
"Show numbered lines of context around a pattern or line number. Mutually Exclusive"
" Parameters: pattern, line_number"
),
"parameters": {
"type": "object",
Expand Down
153 changes: 102 additions & 51 deletions aider/tui/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,40 +69,51 @@ def __init__(self, coder_worker, output_queue, input_queue, args):
)

self.bind(
self.tui_config["key_bindings"]["newline"], "noop", description="New Line", show=True
self._encode_keys(self.get_keys_for("newline")),
"noop",
description="New Line",
show=True,
)
self.bind(
self.tui_config["key_bindings"]["submit"], "noop", description="Submit", show=True
self._encode_keys(self.get_keys_for("submit")), "noop", description="Submit", show=True
)
self.bind(
self.tui_config["key_bindings"]["cycle_forward"],
self._encode_keys(self.get_keys_for("cycle_forward")),
"noop",
description="Cycle Forward",
show=True,
)
self.bind(
self.tui_config["key_bindings"]["cycle_backward"],
self._encode_keys(self.get_keys_for("cycle_backward")),
"noop",
description="Cycle Backward",
show=True,
)
self.bind(
self.tui_config["key_bindings"]["cancel"], "noop", description="Cancel", show=True
self._encode_keys(self.get_keys_for("cancel")), "noop", description="Cancel", show=True
)

self.bind(
self.tui_config["key_bindings"]["focus"],
self._encode_keys(self.get_keys_for("focus")),
"focus_input",
description="Focus Input",
show=True,
)
self.bind(
self.tui_config["key_bindings"]["stop"], "interrupt", description="Interrupt", show=True
self._encode_keys(self.get_keys_for("stop")),
"interrupt",
description="Interrupt",
show=True,
)
self.bind(
self.tui_config["key_bindings"]["clear"], "clear_output", description="Clear", show=True
self._encode_keys(self.get_keys_for("clear")),
"clear_output",
description="Clear",
show=True,
)
self.bind(
self._encode_keys(self.get_keys_for("focus")), "quit", description="Quit", show=True
)
self.bind(self.tui_config["key_bindings"]["focus"], "quit", description="Quit", show=True)

self.register_theme(BASE_THEME)
self.theme = "aider"
Expand Down Expand Up @@ -267,10 +278,10 @@ def update_key_hints(self, generating=False):
try:
hints = self.query_one(KeyHints)
if generating:
stop = self.app._decode_keys(self.app.tui_config["key_bindings"]["stop"])
stop = self.app.get_keys_for("stop")
hints.update(f"{stop} to cancel")
else:
submit = self.app._decode_keys(self.app.tui_config["key_bindings"]["submit"])
submit = self.app.get_keys_for("submit")
hints.update(f"{submit} to submit")
except Exception:
pass
Expand Down Expand Up @@ -486,17 +497,26 @@ def action_noop(self):
pass

def _encode_keys(self, key):
if key == "shift+enter":
return "ctrl+j"
key = key.replace("shift+enter", "ctrl+j")

return key

def _decode_keys(self, key):
if key == "ctrl+j":
return "shift+enter"
key = key.replace("ctrl+j", "shift+enter")

return key

def is_key_for(self, type, key):
allowed_keys = self.tui_config["key_bindings"][type].split(",")
if key in allowed_keys:
return True

return False

def get_keys_for(self, type):
allowed_keys = self.tui_config["key_bindings"][type]
return self._decode_keys(allowed_keys)

def _do_quit(self):
"""Perform the actual quit after UI updates."""
self.worker.stop()
Expand Down Expand Up @@ -644,7 +664,16 @@ def _get_suggestions(self, text: str) -> list[str]:
suggestions = []
commands = self.worker.coder.commands

if text.startswith("/"):
if len(text) and text[-1] == " ":
return

if "@" in text:
# Symbol completion triggered by @
# Find the @ and get the prefix after it
at_index = text.rfind("@")
prefix = text[at_index + 1 :]
suggestions = self._get_symbol_completions(prefix)
elif text.startswith("/"):
# Command completion
parts = text.split(maxsplit=1)
cmd_part = parts[0]
Expand All @@ -661,7 +690,7 @@ def _get_suggestions(self, text: str) -> list[str]:
cmd_name = cmd_part
end_lookup = text.rsplit(maxsplit=1)

arg_prefix = end_lookup[1]
arg_prefix = end_lookup[-1]
arg_prefix_lower = arg_prefix.lower()

# Check if this command needs path-based completion
Expand Down Expand Up @@ -689,12 +718,6 @@ def _get_suggestions(self, text: str) -> list[str]:
suggestions = list(cmd_completions)
except Exception:
pass
elif "@" in text:
# Symbol completion triggered by @
# Find the @ and get the prefix after it
at_index = text.rfind("@")
prefix = text[at_index + 1 :]
suggestions = self._get_symbol_completions(prefix)
else:
# Check if last contiguous, no-space separated string contains a forward slash
# This allows path completions even without a leading slash
Expand All @@ -708,6 +731,34 @@ def _get_suggestions(self, text: str) -> list[str]:

return [str(s) for s in suggestions[:50]]

def _get_completed_text(self, current_text: str, completion: str) -> str:
"""Calculate the new text after applying completion."""
if current_text.startswith("/"):
parts = current_text.rsplit(maxsplit=1)
if len(parts) == 1:
# Replace entire command
# Only add space if command takes arguments
commands = self.worker.coder.commands
has_completions = commands.get_completions(completion) is not None
if has_completions:
return completion + " "
else:
return completion
else:
# Replace argument
return parts[0] + " " + completion
elif "@" in current_text:
# Replace from @ onwards with the symbol
at_index = current_text.rfind("@")
return current_text[:at_index] + completion + " "
else:
# Replace last word with completion
words = current_text.rsplit(maxsplit=1)
if len(words) > 1:
return words[0] + " " + completion
else:
return completion

def on_input_area_completion_requested(self, message: InputArea.CompletionRequested):
"""Handle completion request - show or update completion bar."""
input_area = self.query_one("#input", InputArea)
Expand Down Expand Up @@ -743,6 +794,13 @@ def on_input_area_completion_cycle(self, message: InputArea.CompletionCycle):
try:
completion_bar = self.query_one("#completion-bar", CompletionBar)
completion_bar.cycle_next()
selected = completion_bar.current_selection
if selected:
input_area = self.query_one("#input", InputArea)
# Use completion_prefix as base
base_text = input_area.completion_prefix
new_text = self._get_completed_text(base_text, selected)
input_area.set_completion_preview(new_text)
except Exception:
pass

Expand All @@ -751,6 +809,13 @@ def on_input_area_completion_cycle_previous(self, message: InputArea.CompletionC
try:
completion_bar = self.query_one("#completion-bar", CompletionBar)
completion_bar.cycle_previous()
selected = completion_bar.current_selection
if selected:
input_area = self.query_one("#input", InputArea)
# Use completion_prefix as base
base_text = input_area.completion_prefix
new_text = self._get_completed_text(base_text, selected)
input_area.set_completion_preview(new_text)
except Exception:
pass

Expand All @@ -775,43 +840,29 @@ def on_input_area_completion_dismiss(self, message: InputArea.CompletionDismiss)
def on_completion_bar_selected(self, message: CompletionBar.Selected):
"""Handle completion selection."""
input_area = self.query_one("#input", InputArea)
input_area.completion_active = False

# Insert the completion
current = input_area.value
# Use stored prefix as base for completion
current = input_area.completion_prefix
selected = message.value

if current.startswith("/"):
parts = current.split(maxsplit=1)
if len(parts) == 1:
# Replace entire command
# Only add space if command takes arguments
commands = self.worker.coder.commands
has_completions = commands.get_completions(selected) is not None
if has_completions:
input_area.value = selected + " "
else:
input_area.value = selected
else:
# Replace argument
input_area.value = parts[0] + " " + selected
elif "@" in current:
# Replace from @ onwards with the symbol
at_index = current.rfind("@")
input_area.value = current[:at_index] + selected + " "
else:
# Replace last word with completion
words = current.rsplit(maxsplit=1)
if len(words) > 1:
input_area.value = words[0] + " " + selected
else:
input_area.value = selected
new_text = self._get_completed_text(current, selected)

# Reset cycling state so the new value is registered as the new prefix
input_area._cycling = False
input_area.value = new_text
input_area.completion_active = False

input_area.focus()
input_area.cursor_position = len(input_area.value)

def on_completion_bar_dismissed(self, message: CompletionBar.Dismissed):
"""Handle completion bar dismissal."""
input_area = self.query_one("#input", InputArea)

# Restore original text if we were cycling
if input_area._cycling:
input_area.value = input_area.completion_prefix
input_area._cycling = False

input_area.completion_active = False
input_area.focus()
Loading
Loading