Skip to content

Commit 87b894b

Browse files
committed
feat(iflow): add support for glm-5, minimax-m2.5, qwen3-32b, tstars2.0, iflow-rome-30ba3b
Add five new models to the iFlow provider to maintain feature parity with the competitor repository (router-for-me/CLIProxyAPI): - glm-5: Added to HARDCODED_MODELS, ENABLE_THINKING_MODELS, and GLM_MODELS (thinking support with GLM-style clear_thinking handling) - minimax-m2.5: Added to HARDCODED_MODELS and REASONING_SPLIT_MODELS (thinking support via reasoning_split boolean) - qwen3-32b: Added to HARDCODED_MODELS and ENABLE_THINKING_MODELS (thinking support) - tstars2.0: Added to HARDCODED_MODELS (multimodal assistant) - iflow-rome-30ba3b: Added to HARDCODED_MODELS (iFlow Rome model) Also update REASONING_PRESERVATION_MODELS_PREFIXES to include "glm-5" and "tstars" prefixes, and alphabetically sort the HARDCODED_MODELS list for maintainability. Closes: #129
1 parent 66d8f68 commit 87b894b

File tree

1 file changed

+18
-13
lines changed

1 file changed

+18
-13
lines changed

src/rotator_library/providers/iflow_provider.py

Lines changed: 18 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -31,25 +31,30 @@
3131
HARDCODED_MODELS = [
3232
"glm-4.6",
3333
"glm-4.7",
34-
"minimax-m2",
35-
"minimax-m2.1",
36-
"qwen3-coder-plus",
34+
"glm-5",
35+
"iflow-rome-30ba3b",
3736
"kimi-k2",
3837
"kimi-k2-0905",
3938
"kimi-k2-thinking", # Seems to not work, but should
4039
"kimi-k2.5", # Seems to not work, but should
40+
"minimax-m2",
41+
"minimax-m2.1",
42+
"minimax-m2.5",
43+
"qwen3-32b",
44+
"qwen3-235b",
45+
"qwen3-235b-a22b-instruct",
46+
"qwen3-235b-a22b-thinking-2507",
47+
"qwen3-coder-plus",
4148
"qwen3-max",
4249
"qwen3-max-preview",
43-
"qwen3-235b-a22b-thinking-2507",
50+
"qwen3-vl-plus",
4451
"deepseek-v3.2-reasoner",
4552
"deepseek-v3.2-chat",
4653
"deepseek-v3.2", # seems to not work, but should. Use above variants instead
4754
"deepseek-v3.1",
4855
"deepseek-v3",
4956
"deepseek-r1",
50-
"qwen3-vl-plus",
51-
"qwen3-235b-a22b-instruct",
52-
"qwen3-235b",
57+
"tstars2.0",
5358
]
5459

5560
# OpenAI-compatible parameters supported by iFlow API
@@ -83,20 +88,22 @@
8388
ENABLE_THINKING_MODELS = {
8489
"glm-4.6",
8590
"glm-4.7",
91+
"glm-5",
8692
"qwen3-max-preview",
93+
"qwen3-32b",
8794
"deepseek-v3.2",
8895
"deepseek-v3.1",
8996
}
9097

9198
# GLM models need additional clear_thinking=false when thinking is enabled
92-
GLM_MODELS = {"glm-4.6", "glm-4.7"}
99+
GLM_MODELS = {"glm-4.6", "glm-4.7", "glm-5"}
93100

94101
# Models using reasoning_split (boolean) instead of enable_thinking
95-
REASONING_SPLIT_MODELS = {"minimax-m2", "minimax-m2.1"}
102+
REASONING_SPLIT_MODELS = {"minimax-m2", "minimax-m2.1", "minimax-m2.5"}
96103

97104
# Models that benefit from reasoning_content preservation in message history
98105
# (for multi-turn conversations)
99-
REASONING_PRESERVATION_MODELS_PREFIXES = ("glm-4", "minimax-m2")
106+
REASONING_PRESERVATION_MODELS_PREFIXES = ("glm-4", "glm-5", "minimax-m2", "tstars")
100107

101108
# Cache file path for reasoning content preservation
102109
_REASONING_CACHE_FILE = (
@@ -1022,9 +1029,7 @@ async def stream_handler(response_stream, attempt=1):
10221029
else:
10231030
if not error_text:
10241031
content_type = response.headers.get("content-type", "")
1025-
error_text = (
1026-
f"(empty response body, content-type={content_type})"
1027-
)
1032+
error_text = f"(empty response body, content-type={content_type})"
10281033
error_msg = (
10291034
f"iFlow HTTP {response.status_code} error: {error_text}"
10301035
)

0 commit comments

Comments
 (0)