diff --git a/README.md b/README.md
index c4af1d0..fb93986 100644
--- a/README.md
+++ b/README.md
@@ -181,6 +181,33 @@ python -u app.py \
Notice that `-e` flag can save a lot of memory.
+### Using MiniMax as LLM Provider
+
+InternGPT supports [MiniMax](https://www.minimax.io/) as an alternative LLM backend. MiniMax provides powerful language models (M2.7, M2.5) via an OpenAI-compatible API.
+
+**Option 1: Environment variables**
+
+Set the following environment variables before starting the app:
+```shell
+export LLM_PROVIDER=minimax
+export MINIMAX_API_KEY=your_minimax_api_key
+```
+
+If only `MINIMAX_API_KEY` is set (without `OPENAI_API_KEY`), MiniMax is auto-detected.
+
+**Option 2: UI selector**
+
+Select "minimax" from the provider dropdown on the login page and enter your MiniMax API key.
+
+**Available MiniMax models:**
+
+| Model | Description |
+|---|---|
+| `MiniMax-M2.7` | Latest flagship model (default) |
+| `MiniMax-M2.7-highspeed` | Fast variant of M2.7 |
+| `MiniMax-M2.5` | Previous generation, 204K context |
+| `MiniMax-M2.5-highspeed` | Fast variant of M2.5 |
+
### Selectively Loading Features
When you only want to try DragGAN, you just need to load StyleGAN and open the tab "DragGAN":
```shell
diff --git a/app.py b/app.py
index a0f4e54..1c7d196 100644
--- a/app.py
+++ b/app.py
@@ -28,6 +28,7 @@
# from iGPT.models import *
from iGPT.controllers import ConversationBot
+from iGPT.controllers.llm_provider import create_llm, detect_provider, MiniMaxLLM
import openai
from langchain.llms.openai import OpenAI
@@ -146,7 +147,7 @@ def cut_dialogue_history(history_memory, keep_last_n_words=500):
return '\n' + '\n'.join(paragraphs)
-def login_with_key(bot, debug, api_key):
+def login_with_key(bot, debug, api_key, provider="openai"):
# Just for debug
print('===>logging in')
user_state = [{}]
@@ -157,14 +158,18 @@ def login_with_key(bot, debug, api_key):
else:
if api_key and len(api_key) > 30:
print(api_key)
- os.environ["OPENAI_API_KEY"] = api_key
- openai.api_key = api_key
try:
- llm = OpenAI(temperature=0)
+ if provider == "minimax":
+ os.environ["MINIMAX_API_KEY"] = api_key
+ llm = create_llm(provider="minimax", api_key=api_key)
+ else:
+ os.environ["OPENAI_API_KEY"] = api_key
+ openai.api_key = api_key
+ llm = create_llm(provider="openai", api_key=api_key)
llm('Hi!')
response = 'Success!'
is_error = False
- user_state = bot.init_agent()
+ user_state = bot.init_agent(provider=provider, api_key=api_key)
except Exception as err:
# gr.update(visible=True)
print(err)
@@ -173,7 +178,7 @@ def login_with_key(bot, debug, api_key):
else:
is_error = True
response = 'Incorrect key, please input again'
-
+
return gr.update(visible=not is_error), gr.update(visible=is_error), gr.update(visible=is_error, value=response), user_state
@@ -240,15 +245,21 @@ def change_max_iter(max_iters):
)
with gr.Row(visible=True, elem_id='login') as login:
- with gr.Column(scale=0.6, min_width=0) :
+ with gr.Column(scale=0.4, min_width=0) :
openai_api_key_text = gr.Textbox(
- placeholder="Input openAI API key",
+ placeholder="Input API key (OpenAI or MiniMax)",
show_label=False,
- label="OpenAI API Key",
+ label="API Key",
lines=1,
type="password").style(container=False)
+ with gr.Column(scale=0.2, min_width=0):
+ provider_selector = gr.Dropdown(
+ choices=["openai", "minimax"],
+ value="openai",
+ label="LLM Provider",
+ show_label=False).style(container=False)
with gr.Column(scale=0.4, min_width=0):
- key_submit_button = gr.Button(value="Please log in with your OpenAI API Key", interactive=True, variant='primary').style(container=False)
+ key_submit_button = gr.Button(value="Log in with your API Key", interactive=True, variant='primary').style(container=False)
with gr.Row(visible=False) as user_interface:
with gr.Column(scale=0.5, elem_id="text_input") as chat_part:
@@ -375,8 +386,8 @@ def change_max_iter(max_iters):
login_func = partial(login_with_key, bot, args.debug)
- openai_api_key_text.submit(login_func, [openai_api_key_text], [user_interface, openai_api_key_text, key_submit_button, user_state])
- key_submit_button.click(login_func, [openai_api_key_text, ], [user_interface, openai_api_key_text, key_submit_button, user_state])
+ openai_api_key_text.submit(login_func, [openai_api_key_text, provider_selector], [user_interface, openai_api_key_text, key_submit_button, user_state])
+ key_submit_button.click(login_func, [openai_api_key_text, provider_selector], [user_interface, openai_api_key_text, key_submit_button, user_state])
txt.submit(
lambda: gr.update(interactive=False), [], [send_btn]).then(
diff --git a/iGPT/controllers/ConversationBot.py b/iGPT/controllers/ConversationBot.py
index 7371d4a..d16bd5a 100644
--- a/iGPT/controllers/ConversationBot.py
+++ b/iGPT/controllers/ConversationBot.py
@@ -16,7 +16,8 @@
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
-from langchain.llms.openai import OpenAI
+
+from .llm_provider import create_llm
from ..models import *
from iGPT.models.utils import (gen_new_name, to_image,
@@ -159,9 +160,9 @@ def __init__(self, load_dict, e_mode=False, chat_disabled=False):
self.tools.append(Tool(name=func.name, description=func.description, func=func))
- def init_agent(self):
+ def init_agent(self, provider=None, api_key=None):
memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
- llm = OpenAI(temperature=0)
+ llm = create_llm(provider=provider, api_key=api_key, temperature=0)
agent = initialize_agent(
self.tools,
llm,
diff --git a/iGPT/controllers/__init__.py b/iGPT/controllers/__init__.py
index e2229c0..44c85cb 100644
--- a/iGPT/controllers/__init__.py
+++ b/iGPT/controllers/__init__.py
@@ -1,6 +1,10 @@
from .ConversationBot import ConversationBot
+from .llm_provider import MiniMaxLLM, create_llm, detect_provider
__all__ = [
'ConversationBot',
+ 'MiniMaxLLM',
+ 'create_llm',
+ 'detect_provider',
]
diff --git a/iGPT/controllers/llm_provider.py b/iGPT/controllers/llm_provider.py
new file mode 100644
index 0000000..f3ee60f
--- /dev/null
+++ b/iGPT/controllers/llm_provider.py
@@ -0,0 +1,203 @@
+"""
+LLM provider factory for InternGPT.
+
+Supports OpenAI (default) and MiniMax as LLM backends.
+MiniMax models use the OpenAI-compatible chat completion API
+(https://api.minimax.io/v1).
+
+Usage:
+ # Auto-detect from environment variables
+ llm = create_llm()
+
+ # Explicit provider selection
+ llm = create_llm(provider="minimax")
+"""
+
+import os
+import re
+from typing import Any, List, Mapping, Optional
+
+import openai
+
+# Handle both old (0.x) and new (1.x+) openai SDK
+_OPENAI_V1 = hasattr(openai, "__version__") and int(openai.__version__.split(".")[0]) >= 1
+
+try:
+ from langchain.llms.base import LLM
+except ImportError:
+ from langchain_core.language_models.llms import LLM
+
+try:
+ from langchain.llms.openai import OpenAI
+except ImportError:
+ from langchain_openai import OpenAI
+
+
+# ---------------------------------------------------------------------------
+# MiniMax-specific constants
+# ---------------------------------------------------------------------------
+MINIMAX_API_BASE = "https://api.minimax.io/v1"
+MINIMAX_DEFAULT_MODEL = "MiniMax-M2.7"
+MINIMAX_MODELS = [
+ "MiniMax-M2.7",
+ "MiniMax-M2.7-highspeed",
+ "MiniMax-M2.5",
+ "MiniMax-M2.5-highspeed",
+]
+
+
+def _chat_completion(model, messages, temperature, stop, api_key, api_base):
+ """Call chat completion API, compatible with both old and new openai SDK."""
+ if _OPENAI_V1:
+ client = openai.OpenAI(api_key=api_key, base_url=api_base)
+ kwargs = dict(model=model, messages=messages, temperature=temperature)
+ if stop:
+ kwargs["stop"] = stop
+ response = client.chat.completions.create(**kwargs)
+ return response.choices[0].message.content or ""
+ else:
+ response = openai.ChatCompletion.create(
+ model=model,
+ messages=messages,
+ temperature=temperature,
+ stop=stop,
+ api_key=api_key,
+ api_base=api_base,
+ )
+ return response.choices[0].message.content or ""
+
+
+class MiniMaxLLM(LLM):
+ """LangChain LLM wrapper for MiniMax via OpenAI-compatible API.
+
+ MiniMax exposes an OpenAI-compatible ``/v1/chat/completions`` endpoint.
+ This class translates a plain-text prompt into a single-user-message chat
+ request so that it can be used with LangChain agents that expect a
+ ``BaseLLM`` interface.
+
+ Parameters
+ ----------
+ model_name : str
+ MiniMax model identifier (default ``MiniMax-M2.7``).
+ temperature : float
+ Sampling temperature. MiniMax requires ``temperature`` in (0, 1],
+ so values <= 0 are clamped to 0.01.
+ api_key : str
+ MiniMax API key. Falls back to the ``MINIMAX_API_KEY`` env var.
+ api_base : str
+ Base URL for the MiniMax API (default ``https://api.minimax.io/v1``).
+ """
+
+ model_name: str = MINIMAX_DEFAULT_MODEL
+ temperature: float = 0.01
+ api_key: str = ""
+ api_base: str = MINIMAX_API_BASE
+
+ def __init__(self, **kwargs: Any):
+ super().__init__(**kwargs)
+ if not self.api_key:
+ self.api_key = os.environ.get("MINIMAX_API_KEY", "")
+ # Clamp temperature to MiniMax's valid range (0, 1]
+ if self.temperature <= 0:
+ self.temperature = 0.01
+
+ @property
+ def _llm_type(self) -> str:
+ return "minimax"
+
+ @property
+ def _identifying_params(self) -> Mapping[str, Any]:
+ return {
+ "model_name": self.model_name,
+ "temperature": self.temperature,
+ "api_base": self.api_base,
+ }
+
+ def _strip_think_tags(self, text: str) -> str:
+ """Remove ... blocks from model output."""
+ return re.sub(r".*?", "", text, flags=re.DOTALL).strip()
+
+ def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
+ """Call MiniMax chat completion API with a single user message."""
+ content = _chat_completion(
+ model=self.model_name,
+ messages=[{"role": "user", "content": prompt}],
+ temperature=self.temperature,
+ stop=stop,
+ api_key=self.api_key,
+ api_base=self.api_base,
+ )
+ return self._strip_think_tags(content)
+
+
+# ---------------------------------------------------------------------------
+# Provider detection helpers
+# ---------------------------------------------------------------------------
+SUPPORTED_PROVIDERS = ("openai", "minimax")
+
+
+def detect_provider() -> str:
+ """Detect the LLM provider from environment variables.
+
+ Priority:
+ 1. ``LLM_PROVIDER`` env var (explicit).
+ 2. If ``MINIMAX_API_KEY`` is set and ``OPENAI_API_KEY`` is not → minimax.
+ 3. Default → openai.
+ """
+ explicit = os.environ.get("LLM_PROVIDER", "").lower().strip()
+ if explicit in SUPPORTED_PROVIDERS:
+ return explicit
+
+ has_minimax = bool(os.environ.get("MINIMAX_API_KEY"))
+ has_openai = bool(os.environ.get("OPENAI_API_KEY"))
+
+ if has_minimax and not has_openai:
+ return "minimax"
+
+ return "openai"
+
+
+def create_llm(
+ provider: Optional[str] = None,
+ api_key: Optional[str] = None,
+ model_name: Optional[str] = None,
+ temperature: float = 0,
+) -> LLM:
+ """Create an LLM instance for the given provider.
+
+ Parameters
+ ----------
+ provider : str, optional
+ ``"openai"`` or ``"minimax"``. Auto-detected if *None*.
+ api_key : str, optional
+ API key override. Otherwise read from the environment.
+ model_name : str, optional
+ Model name override.
+ temperature : float
+ Sampling temperature (default 0).
+
+ Returns
+ -------
+ langchain.llms.base.LLM
+ A LangChain-compatible LLM instance.
+ """
+ if provider is None:
+ provider = detect_provider()
+
+ provider = provider.lower().strip()
+
+ if provider == "minimax":
+ kwargs: dict = {"temperature": temperature}
+ if api_key:
+ kwargs["api_key"] = api_key
+ if model_name:
+ kwargs["model_name"] = model_name
+ return MiniMaxLLM(**kwargs)
+
+ # Default: OpenAI
+ kwargs_openai: dict = {"temperature": temperature}
+ if api_key:
+ kwargs_openai["openai_api_key"] = api_key
+ if model_name:
+ kwargs_openai["model_name"] = model_name
+ return OpenAI(**kwargs_openai)
diff --git a/tests/test_llm_provider.py b/tests/test_llm_provider.py
new file mode 100644
index 0000000..e24a83a
--- /dev/null
+++ b/tests/test_llm_provider.py
@@ -0,0 +1,229 @@
+"""Unit tests for the InternGPT LLM provider module."""
+
+import importlib.util
+import os
+import sys
+import unittest
+from unittest.mock import MagicMock, patch
+
+# Load llm_provider directly from file to avoid iGPT.__init__ model imports
+_mod_path = os.path.join(
+ os.path.dirname(__file__), "..", "iGPT", "controllers", "llm_provider.py"
+)
+_spec = importlib.util.spec_from_file_location("llm_provider", _mod_path)
+llm_mod = importlib.util.module_from_spec(_spec)
+_spec.loader.exec_module(llm_mod)
+
+MiniMaxLLM = llm_mod.MiniMaxLLM
+create_llm = llm_mod.create_llm
+detect_provider = llm_mod.detect_provider
+MINIMAX_API_BASE = llm_mod.MINIMAX_API_BASE
+MINIMAX_DEFAULT_MODEL = llm_mod.MINIMAX_DEFAULT_MODEL
+MINIMAX_MODELS = llm_mod.MINIMAX_MODELS
+SUPPORTED_PROVIDERS = llm_mod.SUPPORTED_PROVIDERS
+_chat_completion = llm_mod._chat_completion
+
+
+class TestMiniMaxLLM(unittest.TestCase):
+ """Tests for the MiniMaxLLM class."""
+
+ def test_default_parameters(self):
+ llm = MiniMaxLLM(api_key="test-key")
+ self.assertEqual(llm.model_name, MINIMAX_DEFAULT_MODEL)
+ self.assertEqual(llm.api_base, MINIMAX_API_BASE)
+ self.assertEqual(llm.api_key, "test-key")
+ self.assertEqual(llm._llm_type, "minimax")
+
+ def test_temperature_clamping_zero(self):
+ llm = MiniMaxLLM(api_key="test-key", temperature=0)
+ self.assertEqual(llm.temperature, 0.01)
+
+ def test_temperature_clamping_negative(self):
+ llm = MiniMaxLLM(api_key="test-key", temperature=-1)
+ self.assertEqual(llm.temperature, 0.01)
+
+ def test_temperature_valid_preserved(self):
+ llm = MiniMaxLLM(api_key="test-key", temperature=0.5)
+ self.assertEqual(llm.temperature, 0.5)
+
+ def test_temperature_one_preserved(self):
+ llm = MiniMaxLLM(api_key="test-key", temperature=1.0)
+ self.assertEqual(llm.temperature, 1.0)
+
+ def test_custom_model_name(self):
+ llm = MiniMaxLLM(api_key="key", model_name="MiniMax-M2.7-highspeed")
+ self.assertEqual(llm.model_name, "MiniMax-M2.7-highspeed")
+
+ def test_custom_api_base(self):
+ llm = MiniMaxLLM(api_key="key", api_base="https://custom.api/v1")
+ self.assertEqual(llm.api_base, "https://custom.api/v1")
+
+ @patch.dict(os.environ, {"MINIMAX_API_KEY": "env-key"}, clear=False)
+ def test_api_key_from_env(self):
+ llm = MiniMaxLLM()
+ self.assertEqual(llm.api_key, "env-key")
+
+ def test_identifying_params(self):
+ llm = MiniMaxLLM(api_key="key")
+ params = llm._identifying_params
+ self.assertIn("model_name", params)
+ self.assertIn("temperature", params)
+ self.assertIn("api_base", params)
+
+ def test_strip_think_tags_basic(self):
+ llm = MiniMaxLLM(api_key="key")
+ text = "internal reasoningHello world"
+ self.assertEqual(llm._strip_think_tags(text), "Hello world")
+
+ def test_strip_think_tags_multiline(self):
+ llm = MiniMaxLLM(api_key="key")
+ text = "\nstep 1\nstep 2\n\nResult: 42"
+ self.assertEqual(llm._strip_think_tags(text), "Result: 42")
+
+ def test_strip_think_tags_no_tags(self):
+ llm = MiniMaxLLM(api_key="key")
+ text = "No thinking here"
+ self.assertEqual(llm._strip_think_tags(text), "No thinking here")
+
+ def test_strip_think_tags_multiple(self):
+ llm = MiniMaxLLM(api_key="key")
+ text = "aHello bWorld"
+ self.assertEqual(llm._strip_think_tags(text), "Hello World")
+
+ @patch.object(llm_mod, "_chat_completion")
+ def test_call_invokes_chat_completion(self, mock_cc):
+ mock_cc.return_value = "response text"
+ llm = MiniMaxLLM(api_key="test-key", model_name="MiniMax-M2.7")
+ result = llm._call("Hello")
+
+ mock_cc.assert_called_once_with(
+ model="MiniMax-M2.7",
+ messages=[{"role": "user", "content": "Hello"}],
+ temperature=0.01,
+ stop=None,
+ api_key="test-key",
+ api_base=MINIMAX_API_BASE,
+ )
+ self.assertEqual(result, "response text")
+
+ @patch.object(llm_mod, "_chat_completion")
+ def test_call_strips_think_tags_from_response(self, mock_cc):
+ mock_cc.return_value = "reasoningFinal answer"
+ llm = MiniMaxLLM(api_key="test-key")
+ result = llm._call("Question")
+ self.assertEqual(result, "Final answer")
+
+ @patch.object(llm_mod, "_chat_completion")
+ def test_call_with_stop_sequences(self, mock_cc):
+ mock_cc.return_value = "answer"
+ llm = MiniMaxLLM(api_key="test-key")
+ llm._call("prompt", stop=["\n"])
+ call_kwargs = mock_cc.call_args[1]
+ self.assertEqual(call_kwargs["stop"], ["\n"])
+
+ @patch.object(llm_mod, "_chat_completion")
+ def test_call_handles_empty_response(self, mock_cc):
+ mock_cc.return_value = ""
+ llm = MiniMaxLLM(api_key="test-key")
+ result = llm._call("prompt")
+ self.assertEqual(result, "")
+
+
+class TestDetectProvider(unittest.TestCase):
+ """Tests for the detect_provider function."""
+
+ @patch.dict(os.environ, {"LLM_PROVIDER": "minimax"}, clear=False)
+ def test_explicit_minimax(self):
+ self.assertEqual(detect_provider(), "minimax")
+
+ @patch.dict(os.environ, {"LLM_PROVIDER": "openai"}, clear=False)
+ def test_explicit_openai(self):
+ self.assertEqual(detect_provider(), "openai")
+
+ @patch.dict(os.environ, {"LLM_PROVIDER": "MINIMAX"}, clear=False)
+ def test_case_insensitive(self):
+ self.assertEqual(detect_provider(), "minimax")
+
+ def test_auto_detect_minimax(self):
+ env = os.environ.copy()
+ env.pop("OPENAI_API_KEY", None)
+ env.pop("LLM_PROVIDER", None)
+ env["MINIMAX_API_KEY"] = "key123"
+ with patch.dict(os.environ, env, clear=True):
+ self.assertEqual(detect_provider(), "minimax")
+
+ def test_default_openai(self):
+ env = os.environ.copy()
+ env.pop("MINIMAX_API_KEY", None)
+ env.pop("LLM_PROVIDER", None)
+ env.pop("OPENAI_API_KEY", None)
+ with patch.dict(os.environ, env, clear=True):
+ self.assertEqual(detect_provider(), "openai")
+
+ def test_both_keys_defaults_openai(self):
+ env = os.environ.copy()
+ env.pop("LLM_PROVIDER", None)
+ env["MINIMAX_API_KEY"] = "mm_key"
+ env["OPENAI_API_KEY"] = "oai_key"
+ with patch.dict(os.environ, env, clear=True):
+ self.assertEqual(detect_provider(), "openai")
+
+
+class TestCreateLLM(unittest.TestCase):
+ """Tests for the create_llm factory function."""
+
+ def test_create_minimax_explicit(self):
+ llm = create_llm(provider="minimax", api_key="test-key")
+ self.assertIsInstance(llm, MiniMaxLLM)
+ self.assertEqual(llm.api_key, "test-key")
+
+ def test_create_minimax_with_model(self):
+ llm = create_llm(
+ provider="minimax",
+ api_key="key",
+ model_name="MiniMax-M2.7-highspeed",
+ )
+ self.assertIsInstance(llm, MiniMaxLLM)
+ self.assertEqual(llm.model_name, "MiniMax-M2.7-highspeed")
+
+ def test_create_minimax_temperature(self):
+ llm = create_llm(provider="minimax", api_key="key", temperature=0.8)
+ self.assertEqual(llm.temperature, 0.8)
+
+ def test_create_minimax_zero_temperature_clamped(self):
+ llm = create_llm(provider="minimax", api_key="key", temperature=0)
+ self.assertEqual(llm.temperature, 0.01)
+
+ @patch.object(llm_mod, "detect_provider", return_value="minimax")
+ @patch.dict(os.environ, {"MINIMAX_API_KEY": "env-key"}, clear=False)
+ def test_auto_detect_minimax(self, mock_detect):
+ llm = create_llm()
+ self.assertIsInstance(llm, MiniMaxLLM)
+
+ def test_provider_case_insensitive(self):
+ llm = create_llm(provider="MiniMax", api_key="key")
+ self.assertIsInstance(llm, MiniMaxLLM)
+
+
+class TestConstants(unittest.TestCase):
+ """Tests for module-level constants."""
+
+ def test_minimax_api_base(self):
+ self.assertEqual(MINIMAX_API_BASE, "https://api.minimax.io/v1")
+
+ def test_minimax_default_model(self):
+ self.assertEqual(MINIMAX_DEFAULT_MODEL, "MiniMax-M2.7")
+
+ def test_minimax_models_list(self):
+ self.assertIn("MiniMax-M2.7", MINIMAX_MODELS)
+ self.assertIn("MiniMax-M2.7-highspeed", MINIMAX_MODELS)
+ self.assertIn("MiniMax-M2.5", MINIMAX_MODELS)
+ self.assertIn("MiniMax-M2.5-highspeed", MINIMAX_MODELS)
+
+ def test_supported_providers(self):
+ self.assertIn("openai", SUPPORTED_PROVIDERS)
+ self.assertIn("minimax", SUPPORTED_PROVIDERS)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/test_llm_provider_integration.py b/tests/test_llm_provider_integration.py
new file mode 100644
index 0000000..7b128c9
--- /dev/null
+++ b/tests/test_llm_provider_integration.py
@@ -0,0 +1,57 @@
+"""Integration tests for MiniMax LLM provider.
+
+These tests require a valid MINIMAX_API_KEY environment variable.
+Skip automatically when the key is not available.
+"""
+
+import importlib.util
+import os
+import sys
+import unittest
+
+# Load llm_provider directly from file to avoid iGPT.__init__ model imports
+_mod_path = os.path.join(
+ os.path.dirname(__file__), "..", "iGPT", "controllers", "llm_provider.py"
+)
+_spec = importlib.util.spec_from_file_location("llm_provider", _mod_path)
+llm_mod = importlib.util.module_from_spec(_spec)
+_spec.loader.exec_module(llm_mod)
+
+MiniMaxLLM = llm_mod.MiniMaxLLM
+create_llm = llm_mod.create_llm
+
+# Skip the entire module if no API key is available
+MINIMAX_API_KEY = os.environ.get("MINIMAX_API_KEY", "")
+SKIP_REASON = "MINIMAX_API_KEY not set"
+
+
+@unittest.skipUnless(MINIMAX_API_KEY, SKIP_REASON)
+class TestMiniMaxIntegration(unittest.TestCase):
+ """Live integration tests against the MiniMax API."""
+
+ def test_basic_completion(self):
+ llm = MiniMaxLLM(api_key=MINIMAX_API_KEY, temperature=0.01)
+ result = llm._call("Say hello in one word.")
+ self.assertIsInstance(result, str)
+ self.assertTrue(len(result) > 0)
+
+ def test_highspeed_model(self):
+ llm = MiniMaxLLM(
+ api_key=MINIMAX_API_KEY,
+ model_name="MiniMax-M2.7-highspeed",
+ temperature=0.01,
+ )
+ result = llm._call("What is 2+2? Answer with just the number.")
+ self.assertIsInstance(result, str)
+ self.assertIn("4", result)
+
+ def test_create_llm_factory(self):
+ llm = create_llm(provider="minimax", api_key=MINIMAX_API_KEY)
+ self.assertIsInstance(llm, MiniMaxLLM)
+ result = llm._call("Reply with the word 'pong'.")
+ self.assertIsInstance(result, str)
+ self.assertTrue(len(result) > 0)
+
+
+if __name__ == "__main__":
+ unittest.main()