Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 44 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,50 @@ export NEO4J_USERNAME= your NEO4J_USERNAME

export NEO4J_PASSWORD= your NEO4J_PASSWORD

#### Using MiniMax as an alternative LLM provider

Medical-Graph-RAG supports [MiniMax](https://www.minimaxi.com/) as an alternative LLM provider via their OpenAI-compatible API. MiniMax-M2.7 offers 204K context window, which is well-suited for processing long medical documents.

To use MiniMax instead of OpenAI:

```bash
export MINIMAX_API_KEY=your_minimax_api_key
# Optional: explicitly set the provider (auto-detected from MINIMAX_API_KEY)
export LLM_PROVIDER=minimax
# Optional: override the default model
export LLM_MODEL=MiniMax-M2.7-highspeed
```

Available MiniMax models: `MiniMax-M2.7` (default, 204K context), `MiniMax-M2.7-highspeed` (faster, 204K context).

> **Note:** MiniMax does not provide a public embedding API. When using MiniMax as the LLM provider, embeddings will still use OpenAI's `text-embedding-3-small` model. Ensure `OPENAI_API_KEY` is set for embedding operations.

For the `nano_graphrag` pipeline, pass the MiniMax completion function:

```python
from nano_graphrag import GraphRAG
from nano_graphrag._llm import minimax_m27_complete

graph_func = GraphRAG(
working_dir="./nanotest",
best_model_func=minimax_m27_complete,
cheap_model_func=minimax_m27_complete,
)
```

For the CAMEL agent framework, use the MiniMax model type:

```python
from camel.models import ModelFactory
from camel.types import ModelPlatformType, ModelType

model = ModelFactory.create(
model_platform=ModelPlatformType.MINIMAX,
model_type=ModelType.MINIMAX_M27,
model_config_dict={"temperature": 0.2},
)
```

### 2. Construct the graph (use "mimic_ex" dataset as an example)
1. Download mimic_ex [here](https://huggingface.co/datasets/Morson/mimic_ex), put that under your data path, like ./dataset/mimic_ex

Expand Down
3 changes: 3 additions & 0 deletions camel/configs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from .gemini_config import Gemini_API_PARAMS, GeminiConfig
from .groq_config import GROQ_API_PARAMS, GroqConfig
from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
from .minimax_config import MINIMAX_API_PARAMS, MiniMaxConfig
from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig, OpenSourceConfig
Expand All @@ -33,6 +34,8 @@
'OpenSourceConfig',
'LiteLLMConfig',
'LITELLM_API_PARAMS',
'MiniMaxConfig',
'MINIMAX_API_PARAMS',
'OllamaConfig',
'OLLAMA_API_PARAMS',
'ZhipuAIConfig',
Expand Down
50 changes: 50 additions & 0 deletions camel/configs/minimax_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
from __future__ import annotations

from typing import Optional, Sequence, Union

from openai._types import NOT_GIVEN, NotGiven

from camel.configs.base_config import BaseConfig


class MiniMaxConfig(BaseConfig):
r"""Defines the parameters for generating chat completions using the
MiniMax API (OpenAI-compatible).

MiniMax requires temperature in (0.0, 1.0]. Values outside this range
are clamped automatically.

Args:
temperature (float, optional): Sampling temperature, clamped to
(0.0, 1.0] for MiniMax. (default: :obj:`0.2`)
top_p (float, optional): Nucleus sampling parameter.
(default: :obj:`1.0`)
max_tokens (int, optional): Maximum number of tokens to generate.
(default: :obj:`NOT_GIVEN`)
stream (bool, optional): Whether to stream partial results.
(default: :obj:`False`)
stop (str or list, optional): Stop sequences.
(default: :obj:`NOT_GIVEN`)
"""

temperature: float = 0.2
top_p: float = 1.0
stream: bool = False
stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
max_tokens: Union[int, NotGiven] = NOT_GIVEN


MINIMAX_API_PARAMS = {param for param in MiniMaxConfig.model_fields.keys()}
2 changes: 2 additions & 0 deletions camel/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from .gemini_model import GeminiModel
from .groq_model import GroqModel
from .litellm_model import LiteLLMModel
from .minimax_model import MiniMaxModel
from .mistral_model import MistralModel
from .model_factory import ModelFactory
from .nemotron_model import NemotronModel
Expand All @@ -33,6 +34,7 @@
'OpenAIModel',
'AzureOpenAIModel',
'AnthropicModel',
'MiniMaxModel',
'MistralModel',
'GroqModel',
'StubModel',
Expand Down
111 changes: 111 additions & 0 deletions camel/models/minimax_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
import os
from typing import Any, Dict, List, Optional, Union

from openai import OpenAI, Stream

from camel.configs import MINIMAX_API_PARAMS
from camel.messages import OpenAIMessage
from camel.models import BaseModelBackend
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
from camel.utils import (
BaseTokenCounter,
OpenAITokenCounter,
api_keys_required,
)


class MiniMaxModel(BaseModelBackend):
r"""MiniMax API in a unified BaseModelBackend interface.

Uses MiniMax's OpenAI-compatible endpoint at https://api.minimax.io/v1.
Supports MiniMax-M2.7 and MiniMax-M2.7-highspeed models (204K context).
Temperature is automatically clamped to (0.0, 1.0].
"""

def __init__(
self,
model_type: ModelType,
model_config_dict: Dict[str, Any],
api_key: Optional[str] = None,
url: Optional[str] = None,
token_counter: Optional[BaseTokenCounter] = None,
) -> None:
super().__init__(
model_type, model_config_dict, api_key, url, token_counter
)
self._url = url or os.environ.get(
"MINIMAX_API_BASE_URL", "https://api.minimax.io/v1"
)
self._api_key = api_key or os.environ.get("MINIMAX_API_KEY")
self._client = OpenAI(
timeout=60,
max_retries=3,
base_url=self._url,
api_key=self._api_key,
)

@property
def token_counter(self) -> BaseTokenCounter:
if not self._token_counter:
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O)
return self._token_counter

@api_keys_required("MINIMAX_API_KEY")
def run(
self,
messages: List[OpenAIMessage],
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
r"""Runs inference of MiniMax chat completion.

Args:
messages (List[OpenAIMessage]): Message list with the chat history
in OpenAI API format.

Returns:
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
`ChatCompletion` in the non-stream mode, or
`Stream[ChatCompletionChunk]` in the stream mode.
"""
config = dict(self.model_config_dict)
# Clamp temperature to MiniMax's valid range (0.0, 1.0]
if "temperature" in config:
config["temperature"] = max(0.01, min(config["temperature"], 1.0))

response = self._client.chat.completions.create(
messages=messages,
model=self.model_type.value,
**config,
)
return response

def check_model_config(self):
r"""Check whether the model configuration contains any
unexpected arguments to MiniMax API.

Raises:
ValueError: If the model configuration dictionary contains any
unexpected arguments.
"""
for param in self.model_config_dict:
if param not in MINIMAX_API_PARAMS:
raise ValueError(
f"Unexpected argument `{param}` is "
"input into MiniMax model backend."
)

@property
def stream(self) -> bool:
return self.model_config_dict.get('stream', False)
3 changes: 3 additions & 0 deletions camel/models/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from camel.models.gemini_model import GeminiModel
from camel.models.groq_model import GroqModel
from camel.models.litellm_model import LiteLLMModel
from camel.models.minimax_model import MiniMaxModel
from camel.models.mistral_model import MistralModel
from camel.models.ollama_model import OllamaModel
from camel.models.open_source_model import OpenSourceModel
Expand Down Expand Up @@ -90,6 +91,8 @@ def create(
model_class = GeminiModel
elif model_platform.is_mistral and model_type.is_mistral:
model_class = MistralModel
elif model_platform.is_minimax and model_type.is_minimax:
model_class = MiniMaxModel
elif model_type == ModelType.STUB:
model_class = StubModel
else:
Expand Down
23 changes: 23 additions & 0 deletions camel/types/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,10 @@ class ModelType(Enum):
MISTRAL_MIXTRAL_8x22B = "open-mixtral-8x22b"
MISTRAL_CODESTRAL_MAMBA = "open-codestral-mamba"

# MiniMax AI Models
MINIMAX_M27 = "MiniMax-M2.7"
MINIMAX_M27_HIGHSPEED = "MiniMax-M2.7-highspeed"

@property
def value_for_tiktoken(self) -> str:
return (
Expand Down Expand Up @@ -191,6 +195,14 @@ def is_nvidia(self) -> bool:
ModelType.NEMOTRON_4_REWARD,
}

@property
def is_minimax(self) -> bool:
r"""Returns whether this type of models is a MiniMax model."""
return self in {
ModelType.MINIMAX_M27,
ModelType.MINIMAX_M27_HIGHSPEED,
}

@property
def is_gemini(self) -> bool:
return self in {ModelType.GEMINI_1_5_FLASH, ModelType.GEMINI_1_5_PRO}
Expand Down Expand Up @@ -265,6 +277,11 @@ def token_limit(self) -> int:
ModelType.CLAUDE_3_5_SONNET,
}:
return 200_000
elif self in {
ModelType.MINIMAX_M27,
ModelType.MINIMAX_M27_HIGHSPEED,
}:
return 204_000
elif self in {
ModelType.MISTRAL_CODESTRAL_MAMBA,
}:
Expand Down Expand Up @@ -448,6 +465,7 @@ class ModelPlatformType(Enum):
GEMINI = "gemini"
VLLM = "vllm"
MISTRAL = "mistral"
MINIMAX = "minimax"

@property
def is_openai(self) -> bool:
Expand Down Expand Up @@ -504,6 +522,11 @@ def is_gemini(self) -> bool:
r"""Returns whether this platform is Gemini."""
return self is ModelPlatformType.GEMINI

@property
def is_minimax(self) -> bool:
r"""Returns whether this platform is MiniMax."""
return self is ModelPlatformType.MINIMAX


class AudioModelType(Enum):
TTS_1 = "tts-1"
Expand Down
61 changes: 61 additions & 0 deletions nano_graphrag/_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,41 @@ async def openai_complete_if_cache(
return response.choices[0].message.content


async def minimax_complete_if_cache(
model, prompt, system_prompt=None, history_messages=[], **kwargs
) -> str:
"""MiniMax completion via OpenAI-compatible API with temperature clamping."""
minimax_async_client = AsyncOpenAI(
api_key=os.getenv("MINIMAX_API_KEY"),
base_url=os.getenv("MINIMAX_API_BASE_URL", "https://api.minimax.io/v1"),
)
hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
messages.extend(history_messages)
messages.append({"role": "user", "content": prompt})
if hashing_kv is not None:
args_hash = compute_args_hash(model, messages)
if_cache_return = await hashing_kv.get_by_id(args_hash)
if if_cache_return is not None:
return if_cache_return["return"]

# Clamp temperature for MiniMax: must be in (0.0, 1.0]
if "temperature" in kwargs:
kwargs["temperature"] = max(0.01, min(kwargs["temperature"], 1.0))

response = await minimax_async_client.chat.completions.create(
model=model, messages=messages, **kwargs
)

if hashing_kv is not None:
await hashing_kv.upsert(
{args_hash: {"return": response.choices[0].message.content, "model": model}}
)
return response.choices[0].message.content


async def gpt_4o_complete(
prompt, system_prompt=None, history_messages=[], **kwargs
) -> str:
Expand All @@ -60,6 +95,32 @@ async def gpt_4o_mini_complete(
)


async def minimax_m27_complete(
prompt, system_prompt=None, history_messages=[], **kwargs
) -> str:
"""MiniMax M2.7 completion (204K context)."""
return await minimax_complete_if_cache(
"MiniMax-M2.7",
prompt,
system_prompt=system_prompt,
history_messages=history_messages,
**kwargs,
)


async def minimax_m27_highspeed_complete(
prompt, system_prompt=None, history_messages=[], **kwargs
) -> str:
"""MiniMax M2.7-highspeed completion (204K context, faster inference)."""
return await minimax_complete_if_cache(
"MiniMax-M2.7-highspeed",
prompt,
system_prompt=system_prompt,
history_messages=history_messages,
**kwargs,
)


@wrap_embedding_func_with_attrs(embedding_dim=1536, max_token_size=8192)
async def openai_embedding(texts: list[str]) -> np.ndarray:
openai_async_client = AsyncOpenAI(
Expand Down
Loading