Skip to content
Merged
3 changes: 1 addition & 2 deletions .github/workflows/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ name: main

on:
push:
pull_request:
branches: [main]

concurrency:
group: release-${{ github.ref }}
Expand All @@ -14,6 +12,7 @@ jobs:
runs-on: ubuntu-latest
env:
API_KEY_OPENAI: ${{ secrets.API_KEY_OPENAI }}
API_KEY_CLAUDE: ${{ secrets.API_KEY_CLAUDE }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
Expand Down
18 changes: 16 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![semantic-release: angular](https://img.shields.io/badge/semantic--release-angular-e10079?logo=semantic-release)](https://github.com/semantic-release/semantic-release)

**Model Mux** is a TypeScript library inspired by Python's LiteLLM, designed for the Google ADK ecosystem. It provides a unified interface to interact with multiple LLM providers _(currently OpenAI, with Anthropic planned)_.
**Model Mux** is a TypeScript library inspired by Python's LiteLLM, designed for the Google ADK ecosystem. It provides a unified interface to interact with multiple LLM providers _(currently OpenAI and Anthropic)_.

## Installation

Expand Down Expand Up @@ -39,7 +39,21 @@ const agent = new LlmAgent({
Example using **Anthropic** models:

```typescript
...wip...
import { LlmAgent } from '@google/adk';
import { ModelMux } from 'model-mux';

const model = 'claude-opus-4-6';
const baseUrl = 'https://api.anthropic.com';
const apiKey = process.env.ANTHROPIC_API_KEY;

const modelMux = new ModelMux({ model, baseUrl, apiKey });

const agent = new LlmAgent({
name: 'anthropic-agent',
description: 'An agent for Anthropic Claude',
instruction: 'Be helpful and concise.',
model: modelMux,
});
```

> **Note:** </br>
Expand Down
2 changes: 1 addition & 1 deletion eslint.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import globals from 'globals';
import tseslint from 'typescript-eslint';

export default defineConfig([
{ ignores: ['coverage/'] },
{ ignores: ['coverage/', 'dist/'] },
{
files: ['**/*.{js,mjs,cjs,ts,mts,cts}'],
plugins: { js },
Expand Down
53 changes: 51 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@
"vitest": "^4.0.18"
},
"dependencies": {
"@anthropic-ai/sdk": "0.79.0",
"@google/adk": "^0.4.0",
"openai": "^6.25.0"
}
Expand Down
4 changes: 2 additions & 2 deletions src/adapter-factory.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import type { BaseAdapter } from './adapter.js';
import { AnthropicAdapter } from './anthropic/anthropic-adapter.js';
import { OpenAIAdapter } from './openai/openai-adapter.js';

export abstract class AdapterFactory {
Expand All @@ -9,8 +10,7 @@ export abstract class AdapterFactory {
apiKey: string,
): BaseAdapter {
if (model.startsWith('gpt-')) return new OpenAIAdapter(model, baseUrl, headers, apiKey);
// if (model.startsWith("claude-"))
// return new AnthropicAdapter(model, baseUrl, headers, apiKey);
if (model.startsWith('claude-')) return new AnthropicAdapter(model, baseUrl, headers, apiKey);
throw new Error(`Unsupported model/provider: ${model}`);
}
}
79 changes: 73 additions & 6 deletions src/anthropic/anthropic-adapter.ts
Original file line number Diff line number Diff line change
@@ -1,20 +1,87 @@
import Anthropic from '@anthropic-ai/sdk';
import type { BaseLlmConnection, LlmRequest, LlmResponse } from '@google/adk';
import { BaseAdapter } from '../adapter.js';

/* eslint-disable */
export class AnthropicAdapter extends BaseAdapter {
private readonly client: Anthropic;

constructor(model: string, baseUrl: string, headers: Record<string, string>, apiKey: string) {
super(model, baseUrl, headers, apiKey);
this.client = new Anthropic({
apiKey: apiKey,
baseURL: baseUrl,
defaultHeaders: headers,
});
}

async *stream(llmRequest: LlmRequest): AsyncGenerator<LlmResponse, void> {
// Implementation for streaming responses from Anthropic
throw new Error('Streaming not implemented for Anthropic yet');
const stream = this.client.messages.stream({
model: this.model,
max_tokens: llmRequest.config?.maxOutputTokens ?? 1024,
messages: [{ role: 'user', content: this.mapInput(llmRequest) }],
temperature: llmRequest.config?.temperature ?? undefined,
});

try {
for await (const event of stream) {
if (event.type === 'content_block_delta' && event.delta.type === 'text_delta') {
yield {
content: {
role: 'model',
parts: [{ text: event.delta.text }],
},
partial: true,
} satisfies LlmResponse;
}
}

yield {
content: {
role: 'model',
parts: [{ text: '' }],
},
partial: false,
} satisfies LlmResponse;
} catch (error) {
throw new Error(
`Anthropic stream creation failed: ${error instanceof Error ? error.message : String(error)}`,
);
}
}

async generate(llmRequest: LlmRequest): Promise<LlmResponse> {
// Implementation for generating responses from Anthropic
throw new Error('One-shot generation not implemented for Anthropic yet');
let response;
try {
response = await this.client.messages.create({
model: this.model,
max_tokens: llmRequest.config?.maxOutputTokens ?? 1024,
messages: [{ role: 'user', content: this.mapInput(llmRequest) }],
temperature: llmRequest.config?.temperature ?? undefined,
});
} catch (error) {
throw new Error(
`Anthropic response creation failed: ${error instanceof Error ? error.message : String(error)}`,
);
}

return {
content: {
role: 'model',
parts: [
{
text: response.content
.filter((block) => block.type === 'text')
.map((block) => block.text)
.join(''),
},
],
},
} satisfies LlmResponse;
}

// eslint-disable-next-line @typescript-eslint/no-unused-vars
async connect(llmRequest: LlmRequest): Promise<BaseLlmConnection> {
// Implementation for connecting to Anthropic
throw new Error('Connection not implemented for Anthropic yet');
throw new Error('Not implemented');
}
}
106 changes: 106 additions & 0 deletions test/integration/anthropic/anthropic.it.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
import {
InMemorySessionService,
LlmAgent,
Runner,
StreamingMode,
isFinalResponse,
stringifyContent,
} from '@google/adk';
import dotenv from 'dotenv';
import { ModelMux } from '../../../src/index.js';

const models = ['claude-opus-4-6', 'claude-sonnet-4-5-20250929', 'claude-haiku-4-5'];

describe.each(models)('anthropic integration: %s', (model) => {
const baseUrl = 'https://api.anthropic.com';
const appName = 'test-app';
const userId = 'test-user';

beforeAll(() => {
dotenv.config();
});

it('should generate an answer to a one-shot ad hoc request', async () => {
// Arrange
const apiKey = process.env.API_KEY_CLAUDE || '';
const sessionId = `test-session-${model}`;

const sessionService = new InMemorySessionService();
await sessionService.createSession({ appName, userId, sessionId });

// Act 1
const modelMux = new ModelMux({ model, baseUrl, apiKey });

// Act 2
const agent = new LlmAgent({
name: 'test-agent',
description: 'An agent for testing purposes',
instruction: 'Return "yes" or "no", nothing else.',
model: modelMux,
});

// Act 3
const runner = new Runner({ appName, agent, sessionService });
const events = await runner.runAsync({
userId,
sessionId,
newMessage: {
role: 'user',
parts: [{ text: 'Is this thing working?' }],
},
});

// Act 4
let answer;
for await (const event of events) {
if (isFinalResponse(event)) answer = stringifyContent(event).trim();
}

// Assert
expect(answer).toBeTruthy();
});

it('should stream an answer in multiple chunks to an ad hoc request', async () => {
// Arrange
const apiKey = process.env.API_KEY_CLAUDE || '';
const sessionId = `test-session-stream-${model}`;

const sessionService = new InMemorySessionService();
await sessionService.createSession({ appName, userId, sessionId });

// Act 1
const modelMux = new ModelMux({ model, baseUrl, apiKey });

// Act 2
const agent = new LlmAgent({
name: 'test-agent',
description: 'An agent for testing purposes',
instruction: 'Return "yes" or "no", nothing else.',
model: modelMux,
});

// Act 3
const runner = new Runner({ appName, agent, sessionService });
const events = await runner.runAsync({
userId,
sessionId,
newMessage: {
role: 'user',
parts: [{ text: 'Is this thing working?' }],
},
runConfig: { streamingMode: StreamingMode.SSE },
});

// Act 4
const chunks: string[] = [];
for await (const event of events) {
const text = stringifyContent(event).trim();
if (text) chunks.push(text);
}
const answer = chunks.join('');

// Assert
expect(chunks.length).toBeGreaterThan(0);
expect(answer).toBeTruthy();
});
});
Loading
Loading