diff --git a/.gitignore b/.gitignore index 5220459a..ec10849f 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ __pycache__ tmp* .env +.envrc .venv .request_cache_*.pkl diff --git a/config/gesserit-config.json b/config/gesserit-config.json index c25a18a0..15b5dedd 100644 --- a/config/gesserit-config.json +++ b/config/gesserit-config.json @@ -3,6 +3,7 @@ "llvm": "", "cuda": "", "kokkos": "", + "ninja": "", "sm": "86", "exec_check": "ncu", "exec_check_fail_text": "==WARNING== No kernels were profiled." diff --git a/config/perlmutter-config.json b/config/perlmutter-config.json index d2d6dc53..e3bf83f9 100644 --- a/config/perlmutter-config.json +++ b/config/perlmutter-config.json @@ -3,6 +3,7 @@ "llvm": "module load PrgEnv-llvm", "cuda": "module load cudatoolkit", "kokkos": "module load kokkos-gpu", + "ninja": "", "sm": "80", "exec_check": "ncu", "exec_check_fail_text": "==WARNING== No kernels were profiled." diff --git a/config/zaratan-config.json b/config/zaratan-config.json index cd4affec..1d3900a0 100644 --- a/config/zaratan-config.json +++ b/config/zaratan-config.json @@ -3,6 +3,7 @@ "llvm": "spack load llvm && export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/scratch/zt1/project/bhatele-lab/user/jhdavis/spack-install/linux-rhel8-zen2/gcc-11.3.0/llvm-19.1.7-owz26zzxphj6x4xfgxsyxgfhvktvs4kd/lib/x86_64-unknown-linux-gnu/", "cuda": "module load cuda/gcc/11.3.0/zen2/12.3.0", "kokkos": "spack load kokkos", + "ninja": "spack load ninja", "sm": "80", "exec_check": "~/llms4hpc/code-translation/src/drivers/exec-check.sh", "exec_check_fail_text": "No GPU kernels launched!!" diff --git a/src/translate/codex/__init__.py b/src/translate/codex/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/translate/codex/codex_translator.py b/src/translate/codex/codex_translator.py new file mode 100644 index 00000000..357b2a57 --- /dev/null +++ b/src/translate/codex/codex_translator.py @@ -0,0 +1,433 @@ +""" Class that invokes Codex CLI to perform code translation. +""" +# std imports +import os +import shutil +import subprocess +import json +import time +import atexit +from pathlib import Path +from typing import List, Optional, Dict, Any + +# local imports +from translator import Translator +from repo import Repo + + +class CodexTranslator(Translator): + """Translator that uses OpenAI Codex CLI to perform code translation.""" + + # Constants + TEMP_REPO_PATH = "/tmp/temp_codex_repo" + CONTAINER_REPO_PATH = "/temp_codex_repo" + TRANSLATION_TASK_FILENAME = "translation_task.md" + EXPERIMENT_METADATA_FILENAME = "experiment_metadata.json" + SERVE_CHECK_COOLDOWN = 10 + _MAX_SERVE_CHECK_ATTEMPTS = 100 + VLLM_HOST = "127.0.0.1" + VLLM_PORT = 8000 + + # File extensions to remove from output + REMOVE_EXTENSIONS = (".cu", ".cuh") + + # Git commands + GIT_INIT = ["git", "init"] + GIT_ADD_ALL = ["git", "add", "."] + GIT_COMMIT_INITIAL = ["git", "commit", "-m", "Initial commit"] + + # Instance variables + _codex_model_name: Optional[str] + _vllm_environment: Optional[str] + _vllm_yaml_config: Optional[str] + + _temp_repo_path: str + _translation_task_path: str + _output_path: str + _vllm_launched_from_python: bool + + def __init__( + self, + input_repo: Repo, + output_repos: List[os.PathLike], + src_model: str, + dst_model: str, + dst_config: Dict[str, Any], + log_interactions: bool = False, + dry: bool = False, + hide_progress: bool = False, + codex_model_name: Optional[str] = None, + codex_vllm_environment: Optional[str] = None, + codex_vllm_yaml_config: Optional[str] = None, + ) -> None: + super().__init__( + input_repo, + output_repos, + src_model, + dst_model, + dst_config, + log_interactions=log_interactions, + dry=dry, + hide_progress=hide_progress, + ) + + self._codex_model_name = codex_model_name + self._vllm_environment = codex_vllm_environment + self._vllm_yaml_config = codex_vllm_yaml_config + self._vllm_launched_from_python = False + + self._temp_repo_path = self.TEMP_REPO_PATH + self._translation_task_path = os.path.join( + self._input_repo.path, self.TRANSLATION_TASK_FILENAME + ) + self._output_path = os.path.join(self._output_paths[0], "repo") + + if self._vllm_environment: + self._launch_vllm_server(self._vllm_environment, self._vllm_yaml_config) + self._vllm_launched_from_python = True + else: + print("Warning: --codex-vllm-environment not provided; assuming external vLLM server is running.") + + @staticmethod + def add_args(parser: Any) -> None: + """Add command line arguments for Codex configuration.""" + parser.add_argument("--codex-model-name", type=str, + help="Model name to pass to Codex (e.g. 'openai/gpt-oss-120b').") + parser.add_argument("--codex-vllm-environment", type=str, + help="Path to the Python environment that has vLLM installed (e.g. ~/pssg-venv).") + parser.add_argument("--codex-vllm-yaml-config", type=str, + help="Path to vLLM YAML config file to pass via --config.") + + @staticmethod + def parse_args(args: Any) -> Dict[str, Any]: + """Parse command line arguments for Codex configuration.""" + return { + "codex_model_name": args.codex_model_name, + "codex_vllm_environment": args.codex_vllm_environment, + "codex_vllm_yaml_config": args.codex_vllm_yaml_config, + } + + def translate(self) -> None: + """Execute the complete translation process using Codex CLI. + + The process includes: + 1. Generate translation task + 2. Initialize temporary repository + 3. Run Codex and capture in-place file changes + 4. Save translated output + 5. Clean up and write metadata + """ + try: + self._execute_translation_workflow() + finally: + self.cleanup_temp_repo() + + def _execute_translation_workflow(self) -> None: + """Execute the main translation workflow steps.""" + self.generate_translation_task() + self.initialize_temp_repo() + + if self.run_codex(): + self._fix_makefile_tabs_and_duplicates() + print("Saving translated output...") + self.save_output(self._output_path) + self.remove_unnecessary_output_files() + self.write_experiment_metadata() + self._save_codex_log() + else: + print("Translation failed.") + + def generate_translation_task(self) -> None: + """Generate the translation task file for Codex.""" + print("Generating translation task...") + + translation_task = self._create_translation_task_content() + + try: + with open(self._translation_task_path, "w", encoding="utf-8") as f: + f.write(translation_task) + print(f"Translation task generated: {self._translation_task_path}") + except IOError as e: + print(f"Error writing translation task: {e}") + raise + + def _create_translation_task_content(self) -> str: + """Create the content for the translation task file.""" + data = self._dst_config + + prompt = ( + f"You are a helpful coding assistant. You are helping a software developer translate a " + f"codebase from the {self._src_model} execution model to the {self._dst_model} execution " + f"model.\n\n" + f"The codebase is called {data['app']}. Its path is {self.TEMP_REPO_PATH}. Given this code " + f"repository, translate the {data['app']} codebase's {self._src_model}-specific files to " + f"the {self._dst_model} execution model.\n\n" + f"The new files should be in {data['filename_desc']} and all old {self._src_model} files " + f"must be deleted. You may use standard command-line tools (e.g., the `rm` command) to " + f"remove obsolete {self._src_model}-specific files. A new {data['build_filename']} should " + f"be made to compile accordingly with the new files.\n\n" + f"Ensure that the user can compile this code using, for example, `{data['ex_build_cmd']}` " + f"to build the code for {data['ex_build_desc']}. Ensure also that the command line " + f"interface after translation still works as expected, so that, for example, " + f"`{data['ex_run_cmd']}` still works to run the code with {data['ex_run_desc']}." + ) + return prompt.strip() + + def initialize_temp_repo(self) -> None: + """Initialize the temporary repository and perform initial Git setup.""" + print("Initializing temporary Git repository...") + self._prepare_temp_directory() + self._copy_source_to_temp() + self._initialize_git_repo() + + def _prepare_temp_directory(self) -> None: + """Remove existing temp directory if it exists.""" + if os.path.exists(self._temp_repo_path): + print("The temporary repository exists. Removing the repository...") + shutil.rmtree(self._temp_repo_path) + + def _copy_source_to_temp(self) -> None: + """Copy the original repository to the temporary directory.""" + shutil.copytree(self._input_repo.path, self._temp_repo_path, dirs_exist_ok=True) + + def _initialize_git_repo(self) -> None: + """Initialize Git repository and make initial commit.""" + subprocess.run(self.GIT_INIT, cwd=self._temp_repo_path, check=True) + subprocess.run(self.GIT_ADD_ALL, cwd=self._temp_repo_path, check=True) + subprocess.run(self.GIT_COMMIT_INITIAL, cwd=self._temp_repo_path, check=True) + + def run_codex(self) -> bool: + """Run the Codex CLI command. Codex modifies files in-place.""" + try: + with open(self._translation_task_path, "r", encoding="utf-8") as f: + prompt = f.read() + except IOError as e: + print(f"Error reading translation task: {e}") + return False + + command = self._build_codex_command(prompt) + env = self._build_codex_env() + print(f"Running Codex command: {' '.join(command[:4])} ...") + + try: + proc = subprocess.Popen( + command, text=True, cwd=self._temp_repo_path, env=env, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + ) + log_lines = [] + for line in proc.stdout: + print(line, end="", flush=True) + log_lines.append(line) + proc.wait() + self._codex_log = "".join(log_lines) + if proc.returncode != 0: + print(f"Codex exited with return code {proc.returncode}") + return False + print("Codex command executed successfully.") + return True + except Exception as e: + self._codex_log = "".join(log_lines) if 'log_lines' in dir() else "" + print(f"An error occurred running Codex: {e}") + return False + + def _build_codex_command(self, prompt: str) -> List[str]: + """Build the Codex CLI command with all required parameters.""" + cmd = ["codex", "exec", "--sandbox", "danger-full-access"] + if self._codex_model_name: + cmd.extend(["--model", self._codex_model_name]) + cmd.append(prompt) + return cmd + + def _build_codex_env(self) -> dict: + """Build the subprocess environment for the Codex command.""" + env = os.environ.copy() + if self._vllm_launched_from_python: + base_url = f"http://{self.VLLM_HOST}:{self.VLLM_PORT}/v1" + # Set both names: Node.js SDK (Codex) reads OPENAI_BASE_URL; + # some tools also check OPENAI_API_BASE. + env["OPENAI_BASE_URL"] = base_url + env["OPENAI_API_BASE"] = base_url + if "OPENAI_API_KEY" not in env: + env["OPENAI_API_KEY"] = "dummy-local-ok" + # Prevent interactive pagers from blocking Codex when it runs + # git log, man, or other pager-triggering commands internally. + env.setdefault("PAGER", "cat") + env.setdefault("MANPAGER", "cat") + env.setdefault("GIT_PAGER", "cat") + env.setdefault("LESS", "-R") + return env + + def save_output(self, output_dir: str) -> None: + """Copy the contents of the temporary repository to the final output directory. + + Removes the .git directory to prepare for adding to the results repository. + """ + try: + if os.path.exists(output_dir): + shutil.rmtree(output_dir) + shutil.copytree(self._temp_repo_path, output_dir, dirs_exist_ok=True) + + # Remove .git directory + git_dir = os.path.join(output_dir, ".git") + if os.path.exists(git_dir): + shutil.rmtree(git_dir) + except (OSError, shutil.Error) as e: + print(f"Error saving output: {e}") + raise + + def remove_unnecessary_output_files(self) -> None: + """Remove unnecessary files (any .cu or .cuh files) from the output.""" + print(f"Cleaning the output repository: {self._output_path}") + + try: + self._remove_files_by_extension(self._output_path, self.REMOVE_EXTENSIONS) + print(f"Finished cleaning the output repository: {self._output_path}") + except OSError as e: + print(f"Error cleaning output files: {e}") + raise + + def _remove_files_by_extension(self, directory: str, extensions: tuple) -> None: + """Remove files with specified extensions from a directory tree.""" + for root, _, files in os.walk(directory): + for file in files: + if file.endswith(extensions): + file_path = os.path.join(root, file) + os.remove(file_path) + + def _fix_makefile_tabs_and_duplicates(self) -> None: + makefile = Path(self._temp_repo_path) / "Makefile" + if not makefile.exists(): + return + + lines = makefile.read_text(encoding="utf-8", errors="replace").splitlines(True) + + # 1) Remove exact duplicate lines (preserve order) + print("Removing duplicate lines in the Makefile...") + seen = set() + duplicates = [] + for line in lines: + if line not in seen: + seen.add(line) + duplicates.append(line) + lines = duplicates + + # 2) Enforce Makefile tab rules + print("Fixing Makefile tabs...") + i = 0 + while i < len(lines) - 1: + curr = lines[i].lstrip() + nxt = lines[i + 1] + + is_rule = ":" in curr + is_conditional = curr.startswith(( + "ifeq", + "ifneq", + "ifdef", + "ifndef", + "else" + )) + + if is_rule or is_conditional: + if nxt.strip() and not nxt.startswith("\t") and not nxt.lstrip().startswith("#"): + lines[i + 1] = "\t" + nxt + i += 1 + + makefile.write_text("".join(lines), encoding="utf-8") + + def write_experiment_metadata(self) -> None: + """Write experiment metadata to a JSON file in the output directory.""" + exp_meta_fpath = os.path.join(self._output_path, "..", self.EXPERIMENT_METADATA_FILENAME) + + try: + os.makedirs(os.path.dirname(exp_meta_fpath), exist_ok=True) + + metadata = self._create_experiment_metadata() + + with open(exp_meta_fpath, "w", encoding="utf-8") as f: + json.dump(metadata, f, indent=4) + + print(f"Experiment metadata written to {exp_meta_fpath}.") + except (OSError, json.JSONEncodeError) as e: + print(f"Error writing experiment metadata: {e}") + raise + + def _create_experiment_metadata(self) -> Dict[str, Any]: + """Create the experiment metadata dictionary.""" + output_number = int(self._output_path.split("/")[-2][7:]) + + return { + "app": self._dst_config["app"], + "prompt_strategy": "Codex", + "llm_name": self._codex_model_name, + "source_model": self._src_model, + "dest_model": self._dst_model, + "output_number": output_number, + "path": self._output_path, + } + + def _save_codex_log(self) -> None: + """Save the captured Codex CLI output to a log file in the output directory.""" + log_path = os.path.join(self._output_path, "..", "codex_log.txt") + try: + with open(log_path, "w", encoding="utf-8") as f: + f.write(getattr(self, "_codex_log", "")) + print(f"Codex log saved to {log_path}") + except OSError as e: + print(f"Error saving Codex log: {e}") + + def cleanup_temp_repo(self) -> None: + """Remove the temporary repository.""" + print("Cleaning up temporary repository...") + + try: + if os.path.exists(self._temp_repo_path): + shutil.rmtree(self._temp_repo_path) + print("Temporary repository cleaned up.") + except OSError as e: + print(f"Error cleaning up temporary repository: {e}") + # Don't raise here as this is cleanup code + + def _launch_vllm_server(self, environment_path: str, yaml_config: Optional[str] = None): + """Launch a vLLM server in the background using the Python environment directory + provided. + """ + # Early exit if vLLM server is already running + if subprocess.run( + ["curl", f"http://{self.VLLM_HOST}:{self.VLLM_PORT}/health"], + capture_output=True, text=True, check=False + ).returncode == 0: + return None + py_executable = os.path.join(environment_path, "bin", "python") + vllm_command = [ + py_executable, "-m", "vllm.entrypoints.openai.api_server", + "--tool-call-parser", "openai", + "--enable-auto-tool-choice", + "--reasoning-parser", "openai_gptoss", + "--host", self.VLLM_HOST, + "--port", str(self.VLLM_PORT), + ] + vllm_api_key = os.getenv("VLLM_API_KEY") + if self._codex_model_name is not None: + vllm_command.extend(["--model", self._codex_model_name]) + if vllm_api_key is not None: + vllm_command.extend(["--api-key", vllm_api_key]) + if yaml_config: + vllm_command.extend(["--config", yaml_config]) + print("Full vLLM subprocess command:", " ".join(vllm_command)) + vllm_server = subprocess.Popen(vllm_command) + # Ping the server until it is ready at the health endpoint + checking, num_attempts = True, 0 + while checking and num_attempts < self._MAX_SERVE_CHECK_ATTEMPTS: + status = subprocess.run( + ["curl", f"http://{self.VLLM_HOST}:{self.VLLM_PORT}/health"], + capture_output=True, text=True, check=False + ) + if status.returncode == 0: + checking = False + else: + print(f"vLLM server not ready, checking again after {self.SERVE_CHECK_COOLDOWN} seconds...") + time.sleep(self.SERVE_CHECK_COOLDOWN) + num_attempts += 1 + atexit.register(vllm_server.terminate) + print("vLLM server ready.") + return vllm_server diff --git a/src/translate/codex/codex_vllm_proxy.py b/src/translate/codex/codex_vllm_proxy.py new file mode 100644 index 00000000..317b5a62 --- /dev/null +++ b/src/translate/codex/codex_vllm_proxy.py @@ -0,0 +1,344 @@ +"""Lightweight proxy that sits between Codex CLI and vLLM. + +Rewrites tool types that vLLM's /v1/responses endpoint does not support: + - "custom" -> "function" (freeform grammar tools like apply_patch) + - "local_shell" -> "function" (built-in shell tool) + - strips "web_search" / "image_generation" (unsupported, rarely needed) + +On the response path it converts function_call items back to the format +Codex expects (custom_tool_call / local_shell_call). + +Usage: + python codex_vllm_proxy.py [--vllm-url http://127.0.0.1:8008] [--port 9000] + +Then point Codex at the proxy: + export OPENAI_BASE_URL="http://127.0.0.1:9000/v1" +""" + +import argparse +import json +import sys +from http.server import HTTPServer, BaseHTTPRequestHandler +from urllib.request import Request, urlopen +from urllib.error import HTTPError + +# --------------------------------------------------------------------------- +# Tool rewriting helpers +# --------------------------------------------------------------------------- + +# Track which tools were rewritten so we can fix the response +_rewritten_tools = {} # name -> original type + + +def rewrite_request_input(body): + """Sanitize the 'input' conversation history for vLLM compatibility. + + On multi-turn requests Codex sends back the full conversation history + including item types that vLLM's /v1/responses endpoint cannot parse + (e.g. reasoning items, custom_tool_call, custom_tool_call_output, + local_shell_call, local_shell_call_output). We convert or strip + these so that vLLM only sees types it understands. + """ + if "input" not in body or not isinstance(body["input"], list): + return body + + new_input = [] + for item in body["input"]: + if not isinstance(item, dict): + new_input.append(item) + continue + + item_type = item.get("type", "") + + # --- reasoning items: strip entirely (not needed for generation) --- + if item_type == "reasoning": + continue + + # --- custom_tool_call -> function_call --- + if item_type == "custom_tool_call": + raw_input = item.get("input", "") + new_input.append({ + "type": "function_call", + "call_id": item.get("call_id", ""), + "name": item.get("name", ""), + "arguments": json.dumps({"input": raw_input}), + }) + continue + + # --- custom_tool_call_output -> function_call_output --- + if item_type == "custom_tool_call_output": + new_input.append({ + "type": "function_call_output", + "call_id": item.get("call_id", ""), + "output": item.get("output", ""), + }) + continue + + # --- local_shell_call -> function_call --- + if item_type == "local_shell_call": + action = item.get("action", {}) + args = {} + if "command" in action: + args["command"] = action["command"] + if "workdir" in action: + args["workdir"] = action["workdir"] + if "timeout_ms" in action: + args["timeout_ms"] = action["timeout_ms"] + new_input.append({ + "type": "function_call", + "call_id": item.get("call_id", ""), + "name": "local_shell", + "arguments": json.dumps(args), + }) + continue + + # --- local_shell_call_output -> function_call_output --- + if item_type == "local_shell_call_output": + new_input.append({ + "type": "function_call_output", + "call_id": item.get("call_id", ""), + "output": item.get("output", ""), + }) + continue + + # --- message items: strip encrypted_content / reasoning_text from content --- + if item_type == "message" and isinstance(item.get("content"), list): + cleaned = item.copy() + cleaned["content"] = [ + c for c in item["content"] + if not (isinstance(c, dict) and c.get("type") == "reasoning_text") + ] + new_input.append(cleaned) + continue + + # --- pass through everything else unchanged --- + new_input.append(item) + + body["input"] = new_input + return body + + +def rewrite_request_tools(body): + """Rewrite tools in the request body to types vLLM supports.""" + _rewritten_tools.clear() + + if "tools" not in body: + return body + + new_tools = [] + for tool in body["tools"]: + tool_type = tool.get("type", "") + + if tool_type == "custom": + # Convert freeform/grammar tool to a function tool with a + # single string "input" parameter. The model should still + # produce the same grammar-formatted text as the "input" value. + _rewritten_tools[tool["name"]] = "custom" + new_tools.append({ + "type": "function", + "name": tool["name"], + "description": tool.get("description", ""), + "parameters": { + "type": "object", + "properties": { + "input": { + "type": "string", + "description": "The freeform tool input.", + } + }, + "required": ["input"], + "additionalProperties": False, + }, + }) + + elif tool_type == "local_shell": + # Convert built-in local_shell to a function tool matching + # Codex's own "shell" function schema. + _rewritten_tools["local_shell"] = "local_shell" + new_tools.append({ + "type": "function", + "name": "local_shell", + "description": "Run a shell command and return its output.", + "parameters": { + "type": "object", + "properties": { + "command": { + "type": "array", + "items": {"type": "string"}, + "description": "The command to execute.", + }, + "workdir": { + "type": "string", + "description": "Working directory.", + }, + "timeout_ms": { + "type": "number", + "description": "Timeout in milliseconds.", + }, + }, + "required": ["command"], + "additionalProperties": False, + }, + }) + + elif tool_type in ("web_search", "image_generation"): + # Strip unsupported built-in types + continue + + else: + # Pass through function tools and anything else unchanged + new_tools.append(tool) + + body["tools"] = new_tools + return body + + +def rewrite_response_output(body): + """Rewrite function_call items back to custom_tool_call / local_shell_call + for tools that were originally non-function types.""" + if "output" not in body: + return body + + new_output = [] + for item in body["output"]: + item_type = item.get("type", "") + + if item_type == "function_call": + name = item.get("name", "") + original_type = _rewritten_tools.get(name) + + if original_type == "custom": + # Extract the raw input string from the function arguments + try: + args = json.loads(item.get("arguments", "{}")) + raw_input = args.get("input", item.get("arguments", "")) + except (json.JSONDecodeError, TypeError): + raw_input = item.get("arguments", "") + + new_output.append({ + "type": "custom_tool_call", + "call_id": item.get("call_id", item.get("id", "")), + "name": name, + "input": raw_input, + }) + continue + + elif original_type == "local_shell": + try: + args = json.loads(item.get("arguments", "{}")) + except (json.JSONDecodeError, TypeError): + args = {} + + action = {"type": "exec"} + if "command" in args: + action["command"] = args["command"] + if "workdir" in args: + action["workdir"] = args["workdir"] + if "timeout_ms" in args: + action["timeout_ms"] = args["timeout_ms"] + + new_output.append({ + "type": "local_shell_call", + "call_id": item.get("call_id", item.get("id", "")), + "action": action, + "status": item.get("status", "completed"), + }) + continue + + new_output.append(item) + + body["output"] = new_output + return body + + +# --------------------------------------------------------------------------- +# HTTP Proxy +# --------------------------------------------------------------------------- + +class CodexVLLMProxy(BaseHTTPRequestHandler): + vllm_url = "http://127.0.0.1:8008" + + def _proxy(self, method="GET", body=None): + target_url = self.vllm_url + self.path + + headers = {"Content-Type": "application/json"} + for key in ("Authorization", "Accept"): + val = self.headers.get(key) + if val: + headers[key] = val + + # Rewrite request body if needed + if body and self.path.endswith("/responses"): + try: + data = json.loads(body) + data = rewrite_request_input(data) + data = rewrite_request_tools(data) + body = json.dumps(data).encode() + except (json.JSONDecodeError, TypeError): + pass + + req = Request(target_url, data=body, headers=headers, method=method) + + try: + resp = urlopen(req, timeout=600) + resp_body = resp.read() + except HTTPError as e: + resp_body = e.read() + self.send_response(e.code) + self.send_header("Content-Type", "application/json") + self.end_headers() + self.wfile.write(resp_body) + return + + # Rewrite response body if needed + if self.path.endswith("/responses") and _rewritten_tools: + try: + resp_data = json.loads(resp_body) + resp_data = rewrite_response_output(resp_data) + resp_body = json.dumps(resp_data).encode() + except (json.JSONDecodeError, TypeError): + pass + + self.send_response(resp.status) + self.send_header("Content-Type", resp.headers.get("Content-Type", "application/json")) + self.end_headers() + self.wfile.write(resp_body) + + def do_GET(self): + self._proxy("GET") + + def do_POST(self): + length = int(self.headers.get("Content-Length", 0)) + body = self.rfile.read(length) if length else None + self._proxy("POST", body) + + def log_message(self, format, *args): + # Prefix log lines for clarity + sys.stderr.write(f"[codex-proxy] {format % args}\n") + + +def main(): + parser = argparse.ArgumentParser(description="Codex <-> vLLM proxy") + parser.add_argument("--vllm-url", default="http://127.0.0.1:8008", + help="Base URL of the vLLM server (default: http://127.0.0.1:8008)") + parser.add_argument("--port", type=int, default=9000, + help="Port for the proxy to listen on (default: 9000)") + parser.add_argument("--host", default="127.0.0.1", + help="Host for the proxy to bind to (default: 127.0.0.1)") + args = parser.parse_args() + + CodexVLLMProxy.vllm_url = args.vllm_url + server = HTTPServer((args.host, args.port), CodexVLLMProxy) + print(f"Codex-vLLM proxy listening on http://{args.host}:{args.port}") + print(f"Forwarding to vLLM at {args.vllm_url}") + print(f"Set: export OPENAI_BASE_URL=\"http://{args.host}:{args.port}/v1\"") + + try: + server.serve_forever() + except KeyboardInterrupt: + print("\nProxy stopped.") + server.server_close() + + +if __name__ == "__main__": + main() diff --git a/src/translate/opencode/__init__.py b/src/translate/opencode/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/translate/opencode/opencode_translator.py b/src/translate/opencode/opencode_translator.py new file mode 100644 index 00000000..5836f2fb --- /dev/null +++ b/src/translate/opencode/opencode_translator.py @@ -0,0 +1,471 @@ +""" Class that invokes OpenCode CLI to perform code translation. +""" +# std imports +import os +import shutil +import subprocess +import json +import time +import atexit +from pathlib import Path +from typing import List, Optional, Dict, Any, Tuple + +# local imports +from translator import Translator +from repo import Repo + + +def _parse_provider_model(model_name: str) -> Tuple[str, str]: + """Split 'provider/model/id' into ('provider', 'model/id'). + + The first path segment is the OpenCode provider ID; everything after + the first slash is the model ID passed to that provider. + + Examples: + 'localvllm/openai/gpt-oss-120b' -> ('localvllm', 'openai/gpt-oss-120b') + 'openai/gpt-4o' -> ('openai', 'gpt-4o') + """ + if "/" not in model_name: + raise ValueError( + f"--opencode-model-name must be in 'provider/model' format, got: {model_name!r}" + ) + provider_id, model_id = model_name.split("/", 1) + return provider_id, model_id + + +class OpenCodeTranslator(Translator): + """Translator that uses OpenCode CLI to perform code translation.""" + + # Constants + TEMP_REPO_PATH = "/tmp/temp_opencode_repo" + TRANSLATION_TASK_FILENAME = "translation_task.md" + EXPERIMENT_METADATA_FILENAME = "experiment_metadata.json" + SERVE_CHECK_COOLDOWN = 10 + _MAX_SERVE_CHECK_ATTEMPTS = 100 + VLLM_HOST = "127.0.0.1" + VLLM_PORT = 8000 + + # File extensions to remove from output + REMOVE_EXTENSIONS = (".cu", ".cuh") + + # Git commands + GIT_INIT = ["git", "init"] + GIT_ADD_ALL = ["git", "add", "."] + GIT_COMMIT_INITIAL = ["git", "commit", "-m", "Initial commit"] + + # Instance variables + _opencode_model_name: str + _provider_id: str + _model_id: str + _provider_base_url: str + _xdg_scratch_dir: str + _vllm_environment: Optional[str] + _vllm_yaml_config: Optional[str] + _vllm_launched_from_python: bool + + _temp_repo_path: str + _translation_task_path: str + _output_path: str + + def __init__( + self, + input_repo: Repo, + output_repos: List[os.PathLike], + src_model: str, + dst_model: str, + dst_config: Dict[str, Any], + log_interactions: bool = False, + dry: bool = False, + hide_progress: bool = False, + opencode_model_name: str = "localvllm/openai/gpt-oss-120b", + opencode_provider_base_url: str = "http://127.0.0.1:8008/v1", + opencode_xdg_scratch_dir: Optional[str] = None, + opencode_vllm_environment: Optional[str] = None, + opencode_vllm_yaml_config: Optional[str] = None, + ) -> None: + super().__init__( + input_repo, + output_repos, + src_model, + dst_model, + dst_config, + log_interactions=log_interactions, + dry=dry, + hide_progress=hide_progress, + ) + + self._opencode_model_name = opencode_model_name + self._provider_id, self._model_id = _parse_provider_model(opencode_model_name) + self._provider_base_url = opencode_provider_base_url + self._xdg_scratch_dir = opencode_xdg_scratch_dir or os.environ.get("PSCRATCH", "/tmp/opencode_xdg") + self._vllm_environment = opencode_vllm_environment + self._vllm_yaml_config = opencode_vllm_yaml_config + self._vllm_launched_from_python = False + + self._temp_repo_path = self.TEMP_REPO_PATH + self._translation_task_path = os.path.join( + self._input_repo.path, self.TRANSLATION_TASK_FILENAME + ) + self._output_path = os.path.join(self._output_paths[0], "repo") + + if self._vllm_environment: + self._launch_vllm_server(self._vllm_environment, self._vllm_yaml_config) + self._vllm_launched_from_python = True + self._provider_base_url = f"http://{self.VLLM_HOST}:{self.VLLM_PORT}/v1" + else: + print("Warning: --opencode-vllm-environment not provided; assuming external vLLM server is running.") + + @staticmethod + def add_args(parser: Any) -> None: + """Add command line arguments for OpenCode configuration.""" + parser.add_argument("--opencode-model-name", type=str, + default="localvllm/openai/gpt-oss-120b", + help="Model name in provider/model format (e.g. 'localvllm/openai/gpt-oss-120b').") + parser.add_argument("--opencode-provider-base-url", type=str, + default="http://127.0.0.1:8000/v1", + help="Base URL of the OpenAI-compatible vLLM server (e.g. 'http://127.0.0.1:8008/v1').") + parser.add_argument("--opencode-xdg-scratch-dir", type=str, + help="Base directory for OpenCode XDG data/config. Defaults to $PSCRATCH if set, else /tmp/opencode_xdg.") + parser.add_argument("--opencode-vllm-environment", type=str, + help="Path to the Python environment that has vLLM installed. If provided, launches vLLM automatically.") + parser.add_argument("--opencode-vllm-yaml-config", type=str, + help="Path to vLLM YAML config file to pass via --config.") + + @staticmethod + def parse_args(args: Any) -> Dict[str, Any]: + """Parse command line arguments for OpenCode configuration.""" + return { + "opencode_model_name": args.opencode_model_name, + "opencode_provider_base_url": args.opencode_provider_base_url, + "opencode_xdg_scratch_dir": args.opencode_xdg_scratch_dir, + "opencode_vllm_environment": args.opencode_vllm_environment, + "opencode_vllm_yaml_config": args.opencode_vllm_yaml_config, + } + + def translate(self) -> None: + """Execute the complete translation process using OpenCode CLI.""" + try: + self._execute_translation_workflow() + finally: + self.cleanup_temp_repo() + + def _execute_translation_workflow(self) -> None: + """Execute the main translation workflow steps.""" + self.generate_translation_task() + self.initialize_temp_repo() + + if self.run_opencode(): + self._fix_makefile_tabs_and_duplicates() + print("Saving translated output...") + self.save_output(self._output_path) + self.remove_unnecessary_output_files() + self.write_experiment_metadata() + else: + print("Translation failed.") + + def generate_translation_task(self) -> None: + """Generate the translation task file for OpenCode.""" + print("Generating translation task...") + + translation_task = self._create_translation_task_content() + + try: + with open(self._translation_task_path, "w", encoding="utf-8") as f: + f.write(translation_task) + print(f"Translation task generated: {self._translation_task_path}") + except IOError as e: + print(f"Error writing translation task: {e}") + raise + + def _create_translation_task_content(self) -> str: + """Create the content for the translation task file.""" + data = self._dst_config + + prompt = ( + f"You are a helpful coding assistant. You are helping a software developer translate a " + f"codebase from the {self._src_model} execution model to the {self._dst_model} execution " + f"model.\n\n" + f"The codebase is called {data['app']}. Its path is {self.TEMP_REPO_PATH}. Given this code " + f"repository, translate the {data['app']} codebase's {self._src_model}-specific files to " + f"the {self._dst_model} execution model.\n\n" + f"The new files should be in {data['filename_desc']} and all old {self._src_model} files " + f"must be deleted. You may use standard command-line tools (e.g., the `rm` command) to " + f"remove obsolete {self._src_model}-specific files. A new {data['build_filename']} should " + f"be made to compile accordingly with the new files.\n\n" + f"Ensure that the user can compile this code using, for example, `{data['ex_build_cmd']}` " + f"to build the code for {data['ex_build_desc']}. Ensure also that the command line " + f"interface after translation still works as expected, so that, for example, " + f"`{data['ex_run_cmd']}` still works to run the code with {data['ex_run_desc']}." + ) + return prompt.strip() + + def initialize_temp_repo(self) -> None: + """Initialize the temporary repository and perform initial Git setup.""" + print("Initializing temporary Git repository...") + self._prepare_temp_directory() + self._copy_source_to_temp() + self._initialize_git_repo() + + def _prepare_temp_directory(self) -> None: + """Remove existing temp directory if it exists.""" + if os.path.exists(self._temp_repo_path): + print("The temporary repository exists. Removing the repository...") + shutil.rmtree(self._temp_repo_path) + + def _copy_source_to_temp(self) -> None: + """Copy the original repository to the temporary directory.""" + shutil.copytree(self._input_repo.path, self._temp_repo_path, dirs_exist_ok=True) + + def _initialize_git_repo(self) -> None: + """Initialize Git repository and make initial commit.""" + subprocess.run(self.GIT_INIT, cwd=self._temp_repo_path, check=True) + subprocess.run(self.GIT_ADD_ALL, cwd=self._temp_repo_path, check=True) + subprocess.run(self.GIT_COMMIT_INITIAL, cwd=self._temp_repo_path, check=True) + + def run_opencode(self) -> bool: + """Run the OpenCode CLI command. OpenCode modifies files in-place.""" + try: + with open(self._translation_task_path, "r", encoding="utf-8") as f: + prompt = f.read() + except IOError as e: + print(f"Error reading translation task: {e}") + return False + + command = self._build_opencode_command(prompt) + env = self._build_opencode_env() + print(f"Running OpenCode command: {' '.join(command[:4])} ...") + + try: + proc = subprocess.Popen( + command, text=True, cwd=self._temp_repo_path, env=env, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + stdin=subprocess.DEVNULL, + ) + log_lines = [] + for line in proc.stdout: + print(line, end="", flush=True) + log_lines.append(line) + proc.wait() + if proc.returncode != 0: + print(f"OpenCode exited with non-zero status: {proc.returncode}") + return False + print("OpenCode command executed successfully.") + if self._log_interactions: + self._write_interaction_log("".join(log_lines)) + return True + except Exception as e: + print(f"An error occurred running OpenCode: {e}") + return False + + def _write_interaction_log(self, stdout: str) -> None: + """Write OpenCode's JSON event stream to a log file alongside the output.""" + log_path = os.path.join(self._output_path, "..", "opencode_interactions.jsonl") + os.makedirs(os.path.dirname(log_path), exist_ok=True) + try: + with open(log_path, "w", encoding="utf-8") as f: + f.write(stdout) + except OSError as e: + print(f"Warning: could not write interaction log: {e}") + + def _build_opencode_command(self, prompt: str) -> List[str]: + """Build the OpenCode CLI command.""" + return ["opencode", "run", "--format", "json", "--model", self._opencode_model_name, prompt] + + def _build_opencode_env(self) -> dict: + """Build the subprocess environment and write opencode.json config.""" + env = os.environ.copy() + + xdg_data = os.path.join(self._xdg_scratch_dir, "xdg-data") + xdg_config = os.path.join(self._xdg_scratch_dir, "xdg-config") + os.makedirs(os.path.join(xdg_data, "opencode"), exist_ok=True) + os.makedirs(os.path.join(xdg_config, "opencode"), exist_ok=True) + + env["XDG_DATA_HOME"] = xdg_data + env["XDG_CONFIG_HOME"] = xdg_config + + if "OPENAI_API_KEY" not in env: + env["OPENAI_API_KEY"] = "dummy-local-key" + + self._write_opencode_config(xdg_config) + return env + + def _write_opencode_config(self, xdg_config: str) -> None: + """Write opencode.json with the local vLLM provider configuration.""" + config = { + "$schema": "https://opencode.ai/config.json", + "tools": {"*": True}, + "provider": { + self._provider_id: { + "npm": "@ai-sdk/openai-compatible", + "name": "Local vLLM", + "options": { + "baseURL": self._provider_base_url, + "apiKey": "{env:OPENAI_API_KEY}", + }, + "models": { + self._model_id: { + "id": self._model_id, + "name": f"{self._model_id} via vLLM", + "reasoning": True, + "tool_call": True, + "tools": True, + "capabilities": {"temperature": True}, + } + }, + } + }, + } + + config_path = os.path.join(xdg_config, "opencode", "opencode.json") + with open(config_path, "w", encoding="utf-8") as f: + json.dump(config, f, indent=2) + + def save_output(self, output_dir: str) -> None: + """Copy the contents of the temporary repository to the final output directory.""" + try: + if os.path.exists(output_dir): + shutil.rmtree(output_dir) + shutil.copytree(self._temp_repo_path, output_dir, dirs_exist_ok=True) + + git_dir = os.path.join(output_dir, ".git") + if os.path.exists(git_dir): + shutil.rmtree(git_dir) + except (OSError, shutil.Error) as e: + print(f"Error saving output: {e}") + raise + + def remove_unnecessary_output_files(self) -> None: + """Remove unnecessary files (any .cu or .cuh files) from the output.""" + print(f"Cleaning the output repository: {self._output_path}") + + try: + self._remove_files_by_extension(self._output_path, self.REMOVE_EXTENSIONS) + print(f"Finished cleaning the output repository: {self._output_path}") + except OSError as e: + print(f"Error cleaning output files: {e}") + raise + + def _remove_files_by_extension(self, directory: str, extensions: tuple) -> None: + """Remove files with specified extensions from a directory tree.""" + for root, _, files in os.walk(directory): + for file in files: + if file.endswith(extensions): + os.remove(os.path.join(root, file)) + + def _fix_makefile_tabs_and_duplicates(self) -> None: + makefile = Path(self._temp_repo_path) / "Makefile" + if not makefile.exists(): + return + + lines = makefile.read_text(encoding="utf-8", errors="replace").splitlines(True) + + # Remove exact duplicate lines (preserve order) + print("Removing duplicate lines in the Makefile...") + seen = set() + deduped = [] + for line in lines: + if line not in seen: + seen.add(line) + deduped.append(line) + lines = deduped + + # Enforce Makefile tab rules + print("Fixing Makefile tabs...") + i = 0 + while i < len(lines) - 1: + curr = lines[i].lstrip() + nxt = lines[i + 1] + is_rule = ":" in curr + is_conditional = curr.startswith(("ifeq", "ifneq", "ifdef", "ifndef", "else")) + if is_rule or is_conditional: + if nxt.strip() and not nxt.startswith("\t") and not nxt.lstrip().startswith("#"): + lines[i + 1] = "\t" + nxt + i += 1 + + makefile.write_text("".join(lines), encoding="utf-8") + + def write_experiment_metadata(self) -> None: + """Write experiment metadata to a JSON file in the output directory.""" + exp_meta_fpath = os.path.join(self._output_path, "..", self.EXPERIMENT_METADATA_FILENAME) + + try: + os.makedirs(os.path.dirname(exp_meta_fpath), exist_ok=True) + + metadata = self._create_experiment_metadata() + + with open(exp_meta_fpath, "w", encoding="utf-8") as f: + json.dump(metadata, f, indent=4) + + print(f"Experiment metadata written to {exp_meta_fpath}.") + except (OSError, json.JSONEncodeError) as e: + print(f"Error writing experiment metadata: {e}") + raise + + def _create_experiment_metadata(self) -> Dict[str, Any]: + """Create the experiment metadata dictionary.""" + output_number = int(self._output_path.split("/")[-2][7:]) + + return { + "app": self._dst_config["app"], + "prompt_strategy": "OpenCode", + "llm_name": self._opencode_model_name, + "source_model": self._src_model, + "dest_model": self._dst_model, + "output_number": output_number, + "path": self._output_path, + } + + def cleanup_temp_repo(self) -> None: + """Remove the temporary repository.""" + print("Cleaning up temporary repository...") + + try: + if os.path.exists(self._temp_repo_path): + shutil.rmtree(self._temp_repo_path) + print("Temporary repository cleaned up.") + except OSError as e: + print(f"Error cleaning up temporary repository: {e}") + + def _launch_vllm_server(self, environment_path: str, yaml_config: Optional[str] = None): + """Launch a vLLM server in the background using the given Python environment.""" + if subprocess.run( + ["curl", f"http://{self.VLLM_HOST}:{self.VLLM_PORT}/health"], + capture_output=True, text=True, check=False + ).returncode == 0: + return None + + py_executable = os.path.join(environment_path, "bin", "python") + vllm_command = [ + py_executable, "-m", "vllm.entrypoints.openai.api_server", + "--tool-call-parser", "openai", + "--enable-auto-tool-choice", + "--reasoning-parser", "openai_gptoss", + "--host", self.VLLM_HOST, + "--port", str(self.VLLM_PORT), + "--model", self._model_id, + ] + vllm_api_key = os.getenv("VLLM_API_KEY") + if vllm_api_key is not None: + vllm_command.extend(["--api-key", vllm_api_key]) + if yaml_config: + vllm_command.extend(["--config", yaml_config]) + + print("Full vLLM subprocess command:", " ".join(vllm_command)) + vllm_server = subprocess.Popen(vllm_command) + + checking, num_attempts = True, 0 + while checking and num_attempts < self._MAX_SERVE_CHECK_ATTEMPTS: + status = subprocess.run( + ["curl", f"http://{self.VLLM_HOST}:{self.VLLM_PORT}/health"], + capture_output=True, text=True, check=False + ) + if status.returncode == 0: + checking = False + else: + print(f"vLLM server not ready, checking again after {self.SERVE_CHECK_COOLDOWN} seconds...") + time.sleep(self.SERVE_CHECK_COOLDOWN) + num_attempts += 1 + + atexit.register(vllm_server.terminate) + print("vLLM server ready.") + return vllm_server diff --git a/src/translate/swe_agent/swe_agent_translator.py b/src/translate/swe_agent/swe_agent_translator.py index 9cb1839d..0acb49a9 100644 --- a/src/translate/swe_agent/swe_agent_translator.py +++ b/src/translate/swe_agent/swe_agent_translator.py @@ -5,7 +5,10 @@ import shutil import subprocess import json -from typing import List, Optional, Dict, Any +import time +import atexit +from pathlib import Path +from typing import List, Optional, Dict, Any, Union # local imports from translator import Translator @@ -16,10 +19,13 @@ class SWEAgentTranslator(Translator): # Constants TEMP_REPO_PATH = "/tmp/temp_sweagent_repo" + CONTAINER_REPO_PATH = "/temp_sweagent_repo" TRANSLATION_TASK_FILENAME = "translation_task.md" TRAJECTORIES_DIR = "trajectories" PATCH_FILENAME = "temp.patch" EXPERIMENT_METADATA_FILENAME = "experiment_metadata.json" + SERVE_CHECK_COOLDOWN = 10 + _MAX_SERVE_CHECK_ATTEMPTS = 100 # File extensions to remove from output REMOVE_EXTENSIONS = (".cu", ".cuh") @@ -32,6 +38,10 @@ class SWEAgentTranslator(Translator): # Instance variables _swe_agent_model_name: str _swe_agent_per_instance_cost_limit: float + _swe_agent_config: Optional[List[str]] + _swe_agent_parser: Optional[str] + _swe_agent_max_input_token: Optional[int] + _temp_repo_path: str _translation_task_path: str _output_path: str @@ -47,7 +57,12 @@ def __init__( dry: bool = False, hide_progress: bool = False, swe_agent_model_name: Optional[str] = None, - swe_agent_per_instance_cost_limit: float = 0.06 + swe_agent_per_instance_cost_limit: float = 0.06, + swe_agent_config: Optional[Union[str, List[str]]] = None, + swe_agent_parser: Optional[str] = None, + swe_agent_max_input_token: Optional[int] = None, + vllm_environment: Optional[str] = None, + vllm_yaml_config: Optional[str] = None, ) -> None: super().__init__( input_repo, @@ -62,31 +77,101 @@ def __init__( self._swe_agent_model_name = swe_agent_model_name self._swe_agent_per_instance_cost_limit = swe_agent_per_instance_cost_limit + self._swe_agent_parser = swe_agent_parser + self._swe_agent_max_input_token = swe_agent_max_input_token + + # Handle a single-config file or multi-config files + if isinstance(swe_agent_config, str): + self._swe_agent_config = [swe_agent_config] + else: + self._swe_agent_config = swe_agent_config + self._temp_repo_path = self.TEMP_REPO_PATH self._translation_task_path = os.path.join( self._input_repo.path, self.TRANSLATION_TASK_FILENAME ) self._output_path = os.path.join(self._output_paths[0], "repo") + self._vllm_environment = vllm_environment + self._vllm_yaml_config = vllm_yaml_config + + if self._is_ollama_model(self._swe_agent_model_name): + if self._swe_agent_parser is None: + self._swe_agent_parser = "thought_action" + if self._swe_agent_max_input_token is None: + self._swe_agent_max_input_token = 4096 + self._launch_ollama_server() + + else: + if self._vllm_environment: + self._launch_vllm_server(self._vllm_environment, self._vllm_yaml_config) + else: + print("Warning: vLLM environment not provided; assuming external vLLM server is running.") + + @staticmethod + def _is_ollama_model(name: str) -> bool: + name = (name or "").lower() + return name.startswith("ollama/") + + + def _launch_ollama_server(self) -> None: + """Launch an Ollama server in the background.""" + # Check that ollama is installed + if not shutil.which("ollama"): + raise ValueError("Ollama is not in the path. Please install Ollama and add it to the path.") + # Early exit if ollama is already running + if subprocess.run(["ollama", "list"], capture_output=True, text=True).returncode == 0: + return + ollama_command = ["ollama", "serve"] + subprocess.Popen(ollama_command, + stdout=subprocess.DEVNULL, + stderr=subprocess.STDOUT, + stdin=subprocess.DEVNULL, + start_new_session=True) + # Check that the server is running + checking = True + while checking: + status = subprocess.run(["ollama", "list"], capture_output=True, text=True) + if status.returncode == 0: + checking = False + else: + print(f"Ollama server not ready, checking again after {self.SERVE_CHECK_COOLDOWN} seconds...") + time.sleep(self.SERVE_CHECK_COOLDOWN) + print(f"Ollama server ready.") + return + @staticmethod def add_args(parser: Any) -> None: """Add command line arguments for SWE-agent configuration.""" parser.add_argument("--swe-agent-model-name", type=str, - help="Name of the agent model to use (e.g. 'gpt-4o').") + help="Name of the agent model to use (e.g. 'gpt-4o', 'ollama/llama3.2:latest').") parser.add_argument("--swe-agent-per-instance-cost-limit", type=float, - help="Per-instance cost limit for the agent model.") - + help="Per-instance cost limit for the agent model; set to 0 for local models.") + parser.add_argument("--swe-agent-config", action="append", + help="May be specified multiple times; default config file is used if none is provided.") + parser.add_argument("--swe-agent-parser", type=str, choices=["thought_action", "function_calling"], + help="Parsing strategy. Use 'thought_action' for local/Ollama models.") + parser.add_argument("--swe-agent-max-input-token", type=int, + help="Override max input tokens to avoid local-model warnings.") + parser.add_argument("--vllm-environment", type=str, + help="Path to the Python environment that has vLLM installed (e.g. ~/pssg-venv).") + parser.add_argument("--vllm-yaml-config", type=str, + help="Path to vLLM YAML config file to pass via --config.") @staticmethod def parse_args(args: Any) -> Dict[str, Any]: """Parse command line arguments for SWE-agent configuration.""" return { "swe_agent_model_name": args.swe_agent_model_name, - "swe_agent_per_instance_cost_limit": args.swe_agent_per_instance_cost_limit + "swe_agent_per_instance_cost_limit": args.swe_agent_per_instance_cost_limit, + "swe_agent_config": args.swe_agent_config, + "swe_agent_parser": args.swe_agent_parser, + "swe_agent_max_input_token": args.swe_agent_max_input_token, + "vllm_environment": args.vllm_environment, + "vllm_yaml_config": args.vllm_yaml_config, } - def translate(self) -> None: """Execute the complete translation process using SWE-agent. @@ -108,6 +193,7 @@ def _execute_translation_workflow(self) -> None: self.initialize_temp_repo() if self.run_swe_agent(): + self._fix_makefile_tabs_and_duplicates() print("Saving translated output...") self.save_output(self._output_path) self.remove_unnecessary_output_files() @@ -138,12 +224,13 @@ def _create_translation_task_content(self) -> str: f"You are a helpful coding assistant. You are helping a software developer translate a " f"codebase from the {self._src_model} execution model to the {self._dst_model} execution " f"model.\n\n" - f"The codebase is called {data['app']}. Its path is {data['path']}. Given this code " + f"The codebase is called {data['app']}. Its path is {self.CONTAINER_REPO_PATH}. Given this code " f"repository, translate the {data['app']} codebase's {self._src_model}-specific files to " f"the {self._dst_model} execution model.\n\n" f"The new files should be in {data['filename_desc']} and all old {self._src_model} files " - f"must be deleted. A new {data['build_filename']} should be made to compile accordingly " - f"with the new files.\n\n" + f"must be deleted. You may use standard command-line tools (e.g., the `rm` command) to " + f"remove obsolete {self._src_model}-specific files. A new {data['build_filename']} should " + f"be made to compile accordingly with the new files.\n\n" f"Ensure that the user can compile this code using, for example, `{data['ex_build_cmd']}` " f"to build the code for {data['ex_build_desc']}. Ensure also that the command line " f"interface after translation still works as expected, so that, for example, " @@ -175,7 +262,6 @@ def _initialize_git_repo(self) -> None: subprocess.run(self.GIT_ADD_ALL, cwd=self._temp_repo_path, check=True) subprocess.run(self.GIT_COMMIT_INITIAL, cwd=self._temp_repo_path, check=True) - def run_swe_agent(self) -> bool: """Run the SWE-agent command and apply the resulting patch.""" command = self._build_swe_agent_command() @@ -197,15 +283,26 @@ def run_swe_agent(self) -> bool: def _build_swe_agent_command(self) -> List[str]: """Build the SWE-agent command with all required parameters.""" - return [ + cmd = [ "sweagent", "run", - f"--agent.model.name={self._swe_agent_model_name}", - f"--agent.model.per_instance_cost_limit={self._swe_agent_per_instance_cost_limit}", f"--env.repo.path={self._temp_repo_path}", - "--env.deployment.image=python", f"--problem_statement.path={self._translation_task_path}", ] + if self._swe_agent_model_name: + cmd.append(f"--agent.model.name={self._swe_agent_model_name}") + if self._swe_agent_per_instance_cost_limit: + cmd.append(f"--agent.model.per_instance_cost_limit={self._swe_agent_per_instance_cost_limit}") + if self._swe_agent_parser: + cmd.append(f"--agent.tools.parse_function.type={self._swe_agent_parser}") + if self._swe_agent_max_input_token: + cmd.append(f"--agent.model.max_input_tokens={self._swe_agent_max_input_token}") + if self._swe_agent_config: + for cfg in self._swe_agent_config: + cmd.extend(["--config", cfg]) + + return cmd + def _apply_swe_agent_patch(self) -> bool: """Find and apply the patch file generated by SWE-agent.""" print("Applying patch...") @@ -283,6 +380,45 @@ def _remove_files_by_extension(self, directory: str, extensions: tuple) -> None: file_path = os.path.join(root, file) os.remove(file_path) + def _fix_makefile_tabs_and_duplicates(self) -> None: + makefile = Path(self._temp_repo_path) / "Makefile" + if not makefile.exists(): + return + + lines = makefile.read_text(encoding="utf-8", errors="replace").splitlines(True) + + # 1) Remove exact duplicate lines (preserve order) + print("Removing duplicate lines in the Makefile...") + seen = set() + duplicates = [] + for line in lines: + if line not in seen: + seen.add(line) + duplicates.append(line) + lines = duplicates + + # 2) Enforce Makefile tab rules + print("Fixing Makefile tabs...") + i = 0 + while i < len(lines) - 1: + curr = lines[i].lstrip() + nxt = lines[i + 1] + + is_rule = ":" in curr + is_conditional = curr.startswith(( + "ifeq", + "ifneq", + "ifdef", + "ifndef", + "else" + )) + + if is_rule or is_conditional: + if nxt.strip() and not nxt.startswith("\t") and not nxt.lstrip().startswith("#"): + lines[i + 1] = "\t" + nxt + i += 1 + + makefile.write_text("".join(lines), encoding="utf-8") def write_experiment_metadata(self) -> None: """Write experiment metadata to a JSON file in the output directory.""" @@ -327,3 +463,44 @@ def cleanup_temp_repo(self) -> None: except OSError as e: print(f"Error cleaning up temporary repository: {e}") # Don't raise here as this is cleanup code + + def _launch_vllm_server(self, environment_path: str, yaml_config: Optional[str] = None): + """Launch a vLLM server in the background using the Python environment directory + provided. + """ + # Early exit if vLLM server is already running + if subprocess.run(["curl", "http://127.0.0.1:8000/health"], capture_output=True, + text=True, check=False).returncode == 0: + return None + py_executable = os.path.join(environment_path, "bin", "python") + vllm_command = [ + py_executable, "-m", "vllm.entrypoints.openai.api_server", + "--tool-call-parser", "openai", + "--enable-auto-tool-choice", + "--reasoning-parser", "openai_gptoss", + "--host", "127.0.0.1", + "--port", "8000", + ] + vllm_api_key = os.getenv("VLLM_API_KEY") + if self._swe_agent_model_name is not None: + vllm_command.extend(["--model", self._swe_agent_model_name]) + if vllm_api_key is not None: + vllm_command.extend(["--api-key", vllm_api_key]) + if yaml_config: + vllm_command.extend(["--config", yaml_config]) + print("Full vLLM subprocess command:", " ".join(vllm_command)) + vllm_server = subprocess.Popen(vllm_command) + # Ping the server until it is ready at the health endpoint + checking, num_attempts = True, 0 + while checking and num_attempts < self._MAX_SERVE_CHECK_ATTEMPTS: + status = subprocess.run(["curl", "http://127.0.0.1:8000/health"], capture_output=True, + text=True, check=False) + if status.returncode == 0: + checking = False + else: + print(f"VLLM server not ready, checking again after {self.SERVE_CHECK_COOLDOWN} seconds...") + time.sleep(self.SERVE_CHECK_COOLDOWN) + num_attempts += 1 + atexit.register(vllm_server.terminate) + print("VLLM server ready.") + return vllm_server diff --git a/src/translate/translate.py b/src/translate/translate.py index 87a59df2..1dfd4db0 100755 --- a/src/translate/translate.py +++ b/src/translate/translate.py @@ -18,6 +18,8 @@ from naive.naive_translator import NaiveTranslator from top_down_agentic.top_down_agentic import TopDownAgenticTranslator from swe_agent.swe_agent_translator import SWEAgentTranslator +from codex.codex_translator import CodexTranslator +from opencode.opencode_translator import OpenCodeTranslator def get_args(): parser = ArgumentParser(description=__doc__) @@ -25,7 +27,7 @@ def get_args(): parser.add_argument("-o", "--output", type=str, required=True, help="Path to the output source code repository.") parser.add_argument("-c", "--config", type=str, required=True, help="Path to translation destination model configuration file containing prompt fill-ins.") parser.add_argument("-f", "--force-overwrite", action="store_true", help="Force overwrite of existing output directory.") - parser.add_argument("--method", choices=["naive", "top-down-agentic", "swe-agent"], required=True, help="The translation method to use.") + parser.add_argument("--method", choices=["naive", "top-down-agentic", "swe-agent", "codex", "opencode"], required=True, help="The translation method to use.") parser.add_argument("--src-model", type=str, required=True, help="The source execution model.") parser.add_argument("--dst-model", type=str, required=True, help="The destination execution model.") parser.add_argument("--output-id", type=int, required=True, help="The integer ID of the output, used to count repeat instances of the same translation configuration.") @@ -48,6 +50,14 @@ def get_args(): swe_agent_args = parser.add_argument_group("SWE-agent translation") SWEAgentTranslator.add_args(swe_agent_args) + # subgroup for Codex translation method + codex_args = parser.add_argument_group("Codex translation") + CodexTranslator.add_args(codex_args) + + # subgroup for OpenCode translation method + opencode_args = parser.add_argument_group("OpenCode translation") + OpenCodeTranslator.add_args(opencode_args) + return parser.parse_args() def get_translator_cls(method: str): @@ -57,6 +67,10 @@ def get_translator_cls(method: str): return TopDownAgenticTranslator if method == "swe-agent": return SWEAgentTranslator + if method == "codex": + return CodexTranslator + if method == "opencode": + return OpenCodeTranslator raise ValueError(f"Translation method {method} not recognized.") diff --git a/targets/XSBench/kokkos/target.json b/targets/XSBench/kokkos/target.json index bbade92e..446358ca 100644 --- a/targets/XSBench/kokkos/target.json +++ b/targets/XSBench/kokkos/target.json @@ -2,7 +2,7 @@ "app": "xsbench", "model": "kokkos", "path": "targets/XSBench/kokkos/repo", - "dependencies": ["gnu", "cuda", "kokkos"], + "dependencies": ["gnu", "cuda", "kokkos", "ninja"], "build_commands_debug": "cmake -DKOKKOS_BACKEND=CUDA -DCMAKE_CXX_COMPILER=g++ -GNinja -Bbuild . && cmake --build build/", "build_commands_perf": "cmake -DKOKKOS_BACKEND=CUDA -DCMAKE_CXX_COMPILER=g++ -GNinja -Bbuild . && cmake --build build/", "build_timeout": 120, diff --git a/targets/llm.c/kokkos/target.json b/targets/llm.c/kokkos/target.json index f92feb8f..f6ef533f 100644 --- a/targets/llm.c/kokkos/target.json +++ b/targets/llm.c/kokkos/target.json @@ -2,7 +2,7 @@ "app": "llm.c", "model": "kokkos", "path": "targets/llm.c/kokkos/repo", - "dependencies": ["gnu", "cuda", "kokkos"], + "dependencies": ["gnu", "cuda", "kokkos", "ninja"], "setup_commands": ["cp $SCRATCH/llmc_inputs/*.bin ."], "build_commands_debug": "cmake -DKOKKOS_BACKEND=CUDA -DCMAKE_CXX_COMPILER=g++ -GNinja -Bbuild . && cmake --build build/", "build_commands_perf": "cmake -DKOKKOS_BACKEND=CUDA -DCMAKE_CXX_COMPILER=g++ -GNinja -Bbuild . && cmake --build build/", diff --git a/targets/microXOR/cuda/repo/translation_task.md b/targets/microXOR/cuda/repo/translation_task.md deleted file mode 100644 index ac9db36c..00000000 --- a/targets/microXOR/cuda/repo/translation_task.md +++ /dev/null @@ -1,7 +0,0 @@ -You are a helpful coding assistant. You are helping a software developer translate a codebase from the cuda execution model to the openmp-offload execution model. - -The codebase is called microxor. Its path is targets/microXOR/openmp-offload/repo. Given this code repository, translate the microxor codebase's cuda-specific files to the openmp-offload execution model. - -The new files should be in C++ and all old cuda files must be deleted. A new Makefile should be made to compile accordingly with the new files. - -Ensure that the user can compile this code using, for example, `make SM_VERSION=sm_80 CXX_COMPILER=clang++` to build the code for a system with an NVIDIA GPU with compute capability 80 compiled with clang++. Ensure also that the command line interface after translation still works as expected, so that, for example, `./microXOR.exe 1024 32` still works to run the code with a 1024 by 1024 input matrix and a kernel with 32 times 32 threads per block. \ No newline at end of file diff --git a/targets/microXOR/kokkos/target.json b/targets/microXOR/kokkos/target.json index bdd37b2e..f4f072a8 100644 --- a/targets/microXOR/kokkos/target.json +++ b/targets/microXOR/kokkos/target.json @@ -2,7 +2,7 @@ "app": "microxor", "model": "kokkos", "path": "targets/microXOR/kokkos/repo", - "dependencies": ["gnu", "cuda", "kokkos"], + "dependencies": ["gnu", "cuda", "kokkos", "ninja"], "build_commands_debug": "cmake -DKOKKOS_BACKEND=CUDA -DCMAKE_CXX_COMPILER=g++ -GNinja -Bbuild . && cmake --build build/", "build_commands_perf": "cmake -DKOKKOS_BACKEND=CUDA -DCMAKE_CXX_COMPILER=g++ -GNinja -Bbuild . && cmake --build build/", "build_timeout": 120, diff --git a/targets/microXORh/kokkos/target.json b/targets/microXORh/kokkos/target.json index bc2f65d4..d4fd0875 100644 --- a/targets/microXORh/kokkos/target.json +++ b/targets/microXORh/kokkos/target.json @@ -2,7 +2,7 @@ "app": "microxorh", "model": "kokkos", "path": "targets/microXORh/kokkos/repo", - "dependencies": ["gnu", "cuda", "kokkos"], + "dependencies": ["gnu", "cuda", "kokkos", "ninja"], "build_commands_debug": "cmake -DKOKKOS_BACKEND=CUDA -DCMAKE_CXX_COMPILER=g++ -GNinja -Bbuild . && cmake --build build/", "build_commands_perf": "cmake -DKOKKOS_BACKEND=CUDA -DCMAKE_CXX_COMPILER=g++ -GNinja -Bbuild . && cmake --build build/", "build_timeout": 120, diff --git a/targets/nanoXOR/kokkos/target.json b/targets/nanoXOR/kokkos/target.json index ab421183..62decf17 100644 --- a/targets/nanoXOR/kokkos/target.json +++ b/targets/nanoXOR/kokkos/target.json @@ -2,7 +2,7 @@ "app": "nanoxor", "model": "kokkos", "path": "targets/nanoXOR/kokkos/repo", - "dependencies": ["gnu", "cuda", "kokkos"], + "dependencies": ["gnu", "cuda", "kokkos", "ninja"], "build_commands_debug": "cmake -DKOKKOS_BACKEND=CUDA -DCMAKE_CXX_COMPILER=g++ -GNinja -Bbuild . && cmake --build build/", "build_commands_perf": "cmake -DKOKKOS_BACKEND=CUDA -DCMAKE_CXX_COMPILER=g++ -GNinja -Bbuild . && cmake --build build/", "build_timeout": 120,