Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ public class GeminiChatModel extends ChatModelBase {
* Creates a new Gemini chat model instance.
*
* @param apiKey the API key for authentication (for Gemini API)
* @param baseUrl the custom base URL for Gemini API (null for default)
* @param modelName the model name to use (e.g., "gemini-2.0-flash",
* "gemini-1.5-pro")
* @param streamEnabled whether streaming should be enabled
Expand All @@ -90,6 +91,7 @@ public class GeminiChatModel extends ChatModelBase {
*/
public GeminiChatModel(
String apiKey,
String baseUrl,
String modelName,
boolean streamEnabled,
String project,
Expand All @@ -106,7 +108,12 @@ public GeminiChatModel(
this.project = project;
this.location = location;
this.vertexAI = vertexAI;
this.httpOptions = httpOptions;
this.httpOptions =
baseUrl == null
? httpOptions
: (httpOptions == null
? HttpOptions.builder().baseUrl(baseUrl).build()
: httpOptions.toBuilder().baseUrl(baseUrl).build());
this.credentials = credentials;
this.clientOptions = clientOptions;
this.defaultOptions =
Expand Down Expand Up @@ -136,8 +143,8 @@ public GeminiChatModel(
}

// Configure HTTP and client options
if (httpOptions != null) {
clientBuilder.httpOptions(httpOptions);
if (this.httpOptions != null) {
clientBuilder.httpOptions(this.httpOptions);
}
if (clientOptions != null) {
clientBuilder.clientOptions(clientOptions);
Expand All @@ -146,6 +153,51 @@ public GeminiChatModel(
this.client = clientBuilder.build();
}

/**
* Creates a new Gemini chat model instance with the default Gemini API endpoint.
*
* @param apiKey the API key for authentication (for Gemini API)
* @param modelName the model name to use (e.g., "gemini-2.0-flash",
* "gemini-1.5-pro")
* @param streamEnabled whether streaming should be enabled
* @param project the Google Cloud project ID (for Vertex AI)
* @param location the Google Cloud location (for Vertex AI, e.g.,
* "us-central1")
* @param vertexAI whether to use Vertex AI APIs (null for auto-detection)
* @param httpOptions HTTP options for the client
* @param credentials Google credentials (for Vertex AI)
* @param clientOptions client options for the API client
* @param defaultOptions default generation options
* @param formatter the message formatter to use (null for default Gemini
* formatter)
*/
public GeminiChatModel(
String apiKey,
String modelName,
boolean streamEnabled,
String project,
String location,
Boolean vertexAI,
HttpOptions httpOptions,
GoogleCredentials credentials,
ClientOptions clientOptions,
GenerateOptions defaultOptions,
Formatter<Content, GenerateContentResponse, GenerateContentConfig.Builder> formatter) {
this(
apiKey,
null,
modelName,
streamEnabled,
project,
location,
vertexAI,
httpOptions,
credentials,
clientOptions,
defaultOptions,
formatter);
}

/**
* Stream chat completion responses from Gemini's API.
*
Expand Down Expand Up @@ -281,6 +333,7 @@ public static Builder builder() {
*/
public static class Builder {
private String apiKey;
private String baseUrl;
private String modelName = "gemini-2.5-flash";
private boolean streamEnabled = true;
private String project;
Expand All @@ -304,6 +357,17 @@ public Builder apiKey(String apiKey) {
return this;
}

/**
* Sets the custom base URL (for Gemini API).
*
* @param baseUrl the custom Gemini API base URL
* @return this builder
*/
public Builder baseUrl(String baseUrl) {
this.baseUrl = baseUrl;
return this;
}

/**
* Sets the model name.
*
Expand Down Expand Up @@ -424,6 +488,7 @@ public Builder formatter(
public GeminiChatModel build() {
return new GeminiChatModel(
apiKey,
baseUrl,
modelName,
streamEnabled,
project,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import io.agentscope.core.formatter.gemini.GeminiChatFormatter;
import io.agentscope.core.formatter.gemini.GeminiMultiAgentFormatter;
import io.agentscope.core.model.test.ModelTestUtils;
import java.lang.reflect.Field;
import java.util.List;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
Expand Down Expand Up @@ -300,6 +301,49 @@ void testHttpOptionsConfiguration() {
assertNotNull(modelWithHttpOptions);
}

@Test
@DisplayName("Should configure custom base URL")
void testBaseUrlConfiguration() throws Exception {
String baseUrl = "https://custom-gemini-endpoint.example";

GeminiChatModel model =
GeminiChatModel.builder()
.apiKey(mockApiKey)
.modelName("gemini-2.0-flash")
.baseUrl(baseUrl)
.build();

assertNotNull(model);
assertEquals(baseUrl, getHttpOptions(model).baseUrl().orElse(null));
}

@Test
@DisplayName("Should override HTTP options base URL while preserving other settings")
void testBaseUrlOverridesHttpOptionsBaseUrl() throws Exception {
HttpOptions httpOptions =
HttpOptions.builder()
.baseUrl("https://original-gemini-endpoint.example")
.apiVersion("v1beta")
.timeout(3210)
.build();

GeminiChatModel model =
GeminiChatModel.builder()
.apiKey(mockApiKey)
.modelName("gemini-2.0-flash")
.httpOptions(httpOptions)
.baseUrl("https://override-gemini-endpoint.example")
.build();

HttpOptions effectiveHttpOptions = getHttpOptions(model);
assertNotNull(effectiveHttpOptions);
assertEquals(
"https://override-gemini-endpoint.example",
effectiveHttpOptions.baseUrl().orElse(null));
assertEquals("v1beta", effectiveHttpOptions.apiVersion().orElse(null));
assertEquals(3210, effectiveHttpOptions.timeout().orElse(null));
}

@Test
@DisplayName("Should handle all generation options")
void testAllGenerateOptions() {
Expand Down Expand Up @@ -454,4 +498,10 @@ void testNullFormatter() {
assertNotNull(model);
// Should use default GeminiChatFormatter
}

private static HttpOptions getHttpOptions(GeminiChatModel model) throws Exception {
Field httpOptionsField = GeminiChatModel.class.getDeclaredField("httpOptions");
httpOptionsField.setAccessible(true);
return (HttpOptions) httpOptionsField.get(model);
}
}
6 changes: 5 additions & 1 deletion docs/en/task/model.md
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@ Google's Gemini series models, supporting both Gemini API and Vertex AI.
GeminiChatModel model = GeminiChatModel.builder()
.apiKey(System.getenv("GEMINI_API_KEY"))
.modelName("gemini-2.5-flash") // Default
.baseUrl("https://your-gateway.example") // Optional
.build();
```

Expand All @@ -156,13 +157,16 @@ GeminiChatModel model = GeminiChatModel.builder()
| Option | Description |
|--------|-------------|
| `apiKey` | Gemini API key |
| `baseUrl` | Custom Gemini API endpoint (optional) |
| `modelName` | Model name, default `gemini-2.5-flash` |
| `project` | GCP project ID (Vertex AI) |
| `location` | GCP region (Vertex AI) |
| `vertexAI` | Whether to use Vertex AI |
| `credentials` | GCP credentials (Vertex AI) |
| `streamEnabled` | Enable streaming, default `true` |

For endpoint override, use `baseUrl(...)`. For more advanced transport or proxy setup, continue to use `httpOptions(...)` or `clientOptions(...)`.

## Ollama

Self-hosted open-source LLM platform supporting various models.
Expand Down Expand Up @@ -419,4 +423,4 @@ DashScopeChatModel model = DashScopeChatModel.builder()
| Single-agent conversation | `ChatFormatter` (default) |
| Pipeline sequential execution | `MultiAgentFormatter` |
| MsgHub group chat | `MultiAgentFormatter` |
| Multi-agent debate | `MultiAgentFormatter` |
| Multi-agent debate | `MultiAgentFormatter` |
6 changes: 5 additions & 1 deletion docs/zh/task/model.md
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ Google 的 Gemini 系列模型,支持 Gemini API 和 Vertex AI。
GeminiChatModel model = GeminiChatModel.builder()
.apiKey(System.getenv("GEMINI_API_KEY"))
.modelName("gemini-2.5-flash") // 默认值
.baseUrl("https://your-gateway.example") // 可选
.build();
```

Expand All @@ -153,13 +154,16 @@ GeminiChatModel model = GeminiChatModel.builder()
| 配置项 | 说明 |
|--------|------|
| `apiKey` | Gemini API 密钥 |
| `baseUrl` | 自定义 Gemini API 端点(可选) |
| `modelName` | 模型名称,默认 `gemini-2.5-flash` |
| `project` | GCP 项目 ID(Vertex AI) |
| `location` | GCP 区域(Vertex AI) |
| `vertexAI` | 是否使用 Vertex AI |
| `credentials` | GCP 凭证(Vertex AI) |
| `streamEnabled` | 是否启用流式输出,默认 `true` |

如需覆盖请求端点,可使用 `baseUrl(...)`。更高级的传输层或代理配置,仍建议通过 `httpOptions(...)` 或 `clientOptions(...)` 处理。

## Ollama

自托管开源 LLM 平台,支持多种模型。
Expand Down Expand Up @@ -415,4 +419,4 @@ DashScopeChatModel model = DashScopeChatModel.builder()
| 单智能体对话 | `ChatFormatter`(默认) |
| Pipeline 顺序执行 | `MultiAgentFormatter` |
| MsgHub 群聊 | `MultiAgentFormatter` |
| 多智能体辩论 | `MultiAgentFormatter` |
| 多智能体辩论 | `MultiAgentFormatter` |
Loading