From 7d77f520f287de52cf04b6a443aa3cf8cb8b2168 Mon Sep 17 00:00:00 2001 From: alcholiclg <2609599189@qq.com> Date: Sat, 7 Feb 2026 04:08:16 +0800 Subject: [PATCH 1/2] fix webui for windows; fix config loading in deep reseach webui --- README.md | 6 + README_ZH.md | 6 + projects/deep_research/v2/reporter.yaml | 2 +- projects/deep_research/v2/researcher.yaml | 2 +- projects/deep_research/v2/searcher.yaml | 2 +- requirements/research.txt | 1 + webui/backend/api.py | 38 +++- webui/backend/config_manager.py | 35 +++ webui/backend/deep_research_worker.py | 129 ++++++++--- webui/backend/deep_research_worker_manager.py | 18 +- webui/backend/websocket_handler.py | 1 + .../src/components/SettingsDialog.tsx | 203 +++++++++++++++++- webui/scripts/start-webui.ps1 | 8 + 13 files changed, 416 insertions(+), 35 deletions(-) create mode 100644 webui/scripts/start-webui.ps1 diff --git a/README.md b/README.md index 338c38e35..ae03ea8c3 100644 --- a/README.md +++ b/README.md @@ -558,6 +558,12 @@ MS-Agent provides a modern web interface for interacting with agents. Built with ms-agent ui ``` +**Windows tip:** If the console shows garbled text, use the PowerShell helper: + +```powershell +webui/scripts/start-webui.ps1 +``` + The browser will automatically open at http://localhost:7860 **Command Options:** diff --git a/README_ZH.md b/README_ZH.md index d28fb32e0..1e0161d50 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -568,6 +568,12 @@ MS-Agent提供了一个简洁轻量的Web界面,用于与智能体进行交互 ms-agent ui ``` +**Windows 提示:** 若控制台出现乱码,建议使用 PowerShell 启动脚本: + +```powershell +webui/scripts/start-webui.ps1 +``` + 浏览器打开: http://localhost:7860 **命令参数** diff --git a/projects/deep_research/v2/reporter.yaml b/projects/deep_research/v2/reporter.yaml index 0aeee39d1..7bec68a32 100644 --- a/projects/deep_research/v2/reporter.yaml +++ b/projects/deep_research/v2/reporter.yaml @@ -2,7 +2,7 @@ llm: service: openai model: qwen-plus openai_api_key: - openai_base_url: https://dashscope.aliyuncs.com/compatible-mode/v1 + openai_base_url: generation_config: diff --git a/projects/deep_research/v2/researcher.yaml b/projects/deep_research/v2/researcher.yaml index dca93f796..a5e428ab0 100644 --- a/projects/deep_research/v2/researcher.yaml +++ b/projects/deep_research/v2/researcher.yaml @@ -2,7 +2,7 @@ llm: service: openai model: qwen3-max openai_api_key: - openai_base_url: https://dashscope.aliyuncs.com/compatible-mode/v1 + openai_base_url: generation_config: diff --git a/projects/deep_research/v2/searcher.yaml b/projects/deep_research/v2/searcher.yaml index 6882df894..c37db5ebf 100644 --- a/projects/deep_research/v2/searcher.yaml +++ b/projects/deep_research/v2/searcher.yaml @@ -2,7 +2,7 @@ llm: service: openai model: qwen-plus openai_api_key: - openai_base_url: https://dashscope.aliyuncs.com/compatible-mode/v1 + openai_base_url: generation_config: diff --git a/requirements/research.txt b/requirements/research.txt index 1cf31b0e2..67ce2d3fd 100644 --- a/requirements/research.txt +++ b/requirements/research.txt @@ -13,3 +13,4 @@ pandas Pillow python-dotenv requests +rich diff --git a/webui/backend/api.py b/webui/backend/api.py index c0d3f4225..126ac4c02 100644 --- a/webui/backend/api.py +++ b/webui/backend/api.py @@ -9,7 +9,7 @@ from fastapi import APIRouter, HTTPException, Query from fastapi.responses import FileResponse -from pydantic import BaseModel +from pydantic import BaseModel, Field # Import shared instances from shared import config_manager, project_discovery, session_manager @@ -86,6 +86,29 @@ class SearchKeysConfig(BaseModel): serpapi_api_key: Optional[str] = None +class DeepResearchAgentConfig(BaseModel): + model: Optional[str] = '' + api_key: Optional[str] = '' + base_url: Optional[str] = '' + + +class DeepResearchSearchConfig(BaseModel): + summarizer_model: Optional[str] = '' + summarizer_api_key: Optional[str] = '' + summarizer_base_url: Optional[str] = '' + + +class DeepResearchConfig(BaseModel): + researcher: DeepResearchAgentConfig = Field( + default_factory=DeepResearchAgentConfig) + searcher: DeepResearchAgentConfig = Field( + default_factory=DeepResearchAgentConfig) + reporter: DeepResearchAgentConfig = Field( + default_factory=DeepResearchAgentConfig) + search: DeepResearchSearchConfig = Field( + default_factory=DeepResearchSearchConfig) + + class MCPServer(BaseModel): name: str type: str # 'stdio' or 'sse' @@ -330,6 +353,19 @@ async def update_search_keys_config(config: SearchKeysConfig): return {'status': 'updated'} +@router.get('/config/deep_research') +async def get_deep_research_config(): + """Get deep research configuration""" + return config_manager.get_deep_research_config() + + +@router.put('/config/deep_research') +async def update_deep_research_config(config: DeepResearchConfig): + """Update deep research configuration""" + config_manager.update_deep_research_config(config.model_dump()) + return {'status': 'updated'} + + @router.post('/config/mcp/servers') async def add_mcp_server(server: MCPServer): """Add a new MCP server""" diff --git a/webui/backend/config_manager.py b/webui/backend/config_manager.py index 949fe624f..8a43b0d22 100644 --- a/webui/backend/config_manager.py +++ b/webui/backend/config_manager.py @@ -23,6 +23,28 @@ class ConfigManager: 'temperature_enabled': False, 'max_tokens': None }, + 'deep_research': { + 'researcher': { + 'model': '', + 'api_key': '', + 'base_url': '' + }, + 'searcher': { + 'model': '', + 'api_key': '', + 'base_url': '' + }, + 'reporter': { + 'model': '', + 'api_key': '', + 'base_url': '' + }, + 'search': { + 'summarizer_model': '', + 'summarizer_api_key': '', + 'summarizer_base_url': '' + } + }, 'edit_file_config': { 'api_key': '', 'base_url': 'https://api.morphllm.com/v1', @@ -169,6 +191,19 @@ def update_search_keys(self, search_keys: Dict[str, Any]): self._config['search_keys'] = search_keys self._save_config() + def get_deep_research_config(self) -> Dict[str, Any]: + """Get deep research configuration""" + config = self._load_config() + return config.get('deep_research', + self.DEFAULT_CONFIG['deep_research']) + + def update_deep_research_config(self, deep_research_config: Dict[str, + Any]): + """Update deep research configuration""" + self._load_config() + self._config['deep_research'] = deep_research_config + self._save_config() + def add_mcp_server(self, name: str, server_config: Dict[str, Any]): """Add a new MCP server""" self._load_config() diff --git a/webui/backend/deep_research_worker.py b/webui/backend/deep_research_worker.py index c87a7879b..617990f90 100644 --- a/webui/backend/deep_research_worker.py +++ b/webui/backend/deep_research_worker.py @@ -52,44 +52,70 @@ def _load_llm_config() -> Dict[str, Any]: return {} +def _load_deep_research_config() -> Dict[str, Any]: + raw = os.environ.get('MS_AGENT_DEEP_RESEARCH_CONFIG') + if not raw: + return {} + try: + return json.loads(raw) + except Exception: + return {} + + +def _normalize_agent_override(raw: Optional[Dict[str, Any]]) -> Dict[str, str]: + raw = raw or {} + return { + 'model': str(raw.get('model') or ''), + 'api_key': str(raw.get('api_key') or ''), + 'base_url': str(raw.get('base_url') or ''), + } + + +def _resolve_agent_llm_config(role: str, llm_config: Dict[str, Any], + dr_config: Dict[str, Any]) -> Dict[str, str]: + overrides = _normalize_agent_override((dr_config or {}).get(role)) + return { + 'model': overrides.get('model') or str(llm_config.get('model') or ''), + 'api_key': overrides.get('api_key') or str(llm_config.get('api_key') + or ''), + 'base_url': overrides.get('base_url') or str( + llm_config.get('base_url') or ''), + } + + +def _normalize_search_override(raw: Optional[Dict[str, Any]]) -> Dict[str, str]: + raw = raw or {} + return { + 'summarizer_model': str(raw.get('summarizer_model') or ''), + 'summarizer_api_key': str(raw.get('summarizer_api_key') or ''), + 'summarizer_base_url': str(raw.get('summarizer_base_url') or ''), + } + + def _build_config_override(llm_config: Dict[str, Any], - output_dir: str) -> Optional[Dict[str, Any]]: + output_dir: str, + dr_config: Dict[str, Any]) -> Optional[Dict[str, + Any]]: override: Dict[str, Any] = {} if output_dir: override['output_dir'] = output_dir llm_override: Dict[str, Any] = {} - provider = (llm_config.get('provider') or '').strip() - model = llm_config.get('model') - api_key = llm_config.get('api_key') - base_url = llm_config.get('base_url') + resolved = _resolve_agent_llm_config('researcher', llm_config, dr_config) + model = resolved.get('model') + api_key = resolved.get('api_key') + base_url = resolved.get('base_url') temperature = llm_config.get('temperature') temperature_enabled = bool(llm_config.get('temperature_enabled', False)) max_tokens = llm_config.get('max_tokens') - if provider in {'modelscope', 'openai', 'anthropic', 'dashscope'}: - llm_override['service'] = provider - else: - llm_override['service'] = 'openai' - if model: llm_override['model'] = model - if llm_override['service'] == 'modelscope': - if api_key: - llm_override['modelscope_api_key'] = api_key - if base_url: - llm_override['modelscope_base_url'] = base_url - elif llm_override['service'] == 'anthropic': - if api_key: - llm_override['anthropic_api_key'] = api_key - if base_url: - llm_override['anthropic_base_url'] = base_url - else: - if api_key: - llm_override['openai_api_key'] = api_key - if base_url: - llm_override['openai_base_url'] = base_url + if api_key: + llm_override['openai_api_key'] = api_key + if base_url: + llm_override['openai_base_url'] = base_url if llm_override: override['llm'] = llm_override @@ -187,7 +213,9 @@ async def consume_subagent_events(): eventizer.process(history) llm_config = _load_llm_config() - config_override = _build_config_override(llm_config, args.output_dir) + dr_config = _load_deep_research_config() + config_override = _build_config_override(llm_config, args.output_dir, + dr_config) config_override = OmegaConf.create( config_override) if config_override else None @@ -215,6 +243,55 @@ async def prepare_tools_with_callback(): updated['output_dir'] = args.output_dir spec.inline_config = updated + tool_name = str(spec.tool_name or '') + if 'searcher' in tool_name: + resolved = _resolve_agent_llm_config( + 'searcher', llm_config, dr_config) + search_override = _normalize_search_override( + (dr_config or {}).get('search')) + elif 'reporter' in tool_name: + resolved = _resolve_agent_llm_config( + 'reporter', llm_config, dr_config) + search_override = {} + else: + resolved = {} + search_override = {} + + if resolved: + updated = dict(spec.inline_config or {}) + llm_cfg = dict(updated.get('llm') or {}) + if resolved.get('model'): + llm_cfg['model'] = resolved['model'] + if resolved.get('api_key'): + llm_cfg['openai_api_key'] = resolved['api_key'] + if resolved.get('base_url'): + llm_cfg['openai_base_url'] = resolved['base_url'] + if llm_cfg: + updated['llm'] = llm_cfg + if search_override: + tools_cfg = dict(updated.get('tools') or {}) + web_cfg = dict(tools_cfg.get('web_search') or {}) + if search_override.get('summarizer_model'): + web_cfg['summarizer_model'] = search_override[ + 'summarizer_model'] + if search_override.get('summarizer_api_key'): + web_cfg['summarizer_api_key'] = search_override[ + 'summarizer_api_key'] + if search_override.get('summarizer_base_url'): + web_cfg['summarizer_base_url'] = search_override[ + 'summarizer_base_url'] + if web_cfg: + tools_cfg['web_search'] = web_cfg + updated['tools'] = tools_cfg + spec.inline_config = updated + + env_cfg = dict(spec.env or {}) + if resolved.get('api_key'): + env_cfg['OPENAI_API_KEY'] = resolved['api_key'] + if resolved.get('base_url'): + env_cfg['OPENAI_BASE_URL'] = resolved['base_url'] + spec.env = env_cfg + agent.prepare_tools = prepare_tools_with_callback artifact_task = asyncio.create_task( diff --git a/webui/backend/deep_research_worker_manager.py b/webui/backend/deep_research_worker_manager.py index 4847f0061..e30baee5c 100644 --- a/webui/backend/deep_research_worker_manager.py +++ b/webui/backend/deep_research_worker_manager.py @@ -26,13 +26,24 @@ def _get_worker_path(self) -> Path: return Path(__file__).resolve().parent / 'deep_research_worker.py' def _build_env(self, env_vars: Optional[Dict[str, str]], - llm_config: Optional[Dict[str, Any]]) -> Dict[str, str]: + llm_config: Optional[Dict[str, Any]], + deep_research_config: Optional[Dict[str, Any]]) -> Dict[str, str]: env = os.environ.copy() if env_vars: env.update({k: v for k, v in env_vars.items() if v}) if llm_config: env['MS_AGENT_LLM_CONFIG'] = json.dumps( llm_config, ensure_ascii=False) + if deep_research_config: + env['MS_AGENT_DEEP_RESEARCH_CONFIG'] = json.dumps( + deep_research_config, ensure_ascii=False) + + api_key = (llm_config or {}).get('api_key') + base_url = (llm_config or {}).get('base_url') + if api_key and not env.get('OPENAI_API_KEY'): + env['OPENAI_API_KEY'] = api_key + if base_url and not env.get('OPENAI_BASE_URL'): + env['OPENAI_BASE_URL'] = base_url env['PYTHONUNBUFFERED'] = '1' repo_root = str(self._get_repo_root()) existing_path = env.get('PYTHONPATH', '') @@ -48,7 +59,8 @@ async def start(self, config_path: str, output_dir: str, env_vars: Optional[Dict[str, str]] = None, - llm_config: Optional[Dict[str, Any]] = None) -> None: + llm_config: Optional[Dict[str, Any]] = None, + deep_research_config: Optional[Dict[str, Any]] = None) -> None: if session_id in self._processes: await self.stop(session_id) @@ -69,7 +81,7 @@ async def start(self, str(output_dir_path), ] - env = self._build_env(env_vars, llm_config) + env = self._build_env(env_vars, llm_config, deep_research_config) process = await asyncio.create_subprocess_exec( *cmd, diff --git a/webui/backend/websocket_handler.py b/webui/backend/websocket_handler.py index 7d338ef37..7b2830b17 100644 --- a/webui/backend/websocket_handler.py +++ b/webui/backend/websocket_handler.py @@ -266,6 +266,7 @@ async def start_agent(session_id: str, data: Dict[str, Any], output_dir=str(output_dir), env_vars=config_manager.get_env_vars(), llm_config=config_manager.get_llm_config(), + deep_research_config=config_manager.get_deep_research_config(), ) session_manager.update_session(session_id, {'status': 'running'}) await connection_manager.send_to_session(session_id, { diff --git a/webui/frontend/src/components/SettingsDialog.tsx b/webui/frontend/src/components/SettingsDialog.tsx index 1e9a87b32..8c4d49c0f 100644 --- a/webui/frontend/src/components/SettingsDialog.tsx +++ b/webui/frontend/src/components/SettingsDialog.tsx @@ -64,6 +64,25 @@ interface SearchKeysConfig { serpapi_api_key: string; } +interface DeepResearchAgentConfig { + model: string; + api_key: string; + base_url: string; +} + +interface DeepResearchSearchConfig { + summarizer_model: string; + summarizer_api_key: string; + summarizer_base_url: string; +} + +interface DeepResearchConfig { + researcher: DeepResearchAgentConfig; + searcher: DeepResearchAgentConfig; + reporter: DeepResearchAgentConfig; + search: DeepResearchSearchConfig; +} + interface MCPServer { type: 'stdio' | 'sse'; command?: string; @@ -110,10 +129,32 @@ const SettingsDialog: React.FC = ({ open, onClose }) => { exa_api_key: '', serpapi_api_key: '', }); + const [deepResearchConfig, setDeepResearchConfig] = useState({ + researcher: { model: '', api_key: '', base_url: '' }, + searcher: { model: '', api_key: '', base_url: '' }, + reporter: { model: '', api_key: '', base_url: '' }, + search: { summarizer_model: '', summarizer_api_key: '', summarizer_base_url: '' }, + }); const [mcpServers, setMcpServers] = useState>({}); const [newServerName, setNewServerName] = useState(''); const [saveStatus, setSaveStatus] = useState<'idle' | 'saving' | 'saved' | 'error'>('idle'); + const normalizeDeepResearchConfig = (data: Partial | null | undefined): DeepResearchConfig => { + const base: DeepResearchConfig = { + researcher: { model: '', api_key: '', base_url: '' }, + searcher: { model: '', api_key: '', base_url: '' }, + reporter: { model: '', api_key: '', base_url: '' }, + search: { summarizer_model: '', summarizer_api_key: '', summarizer_base_url: '' }, + }; + if (!data) return base; + return { + researcher: { ...base.researcher, ...(data.researcher || {}) }, + searcher: { ...base.searcher, ...(data.searcher || {}) }, + reporter: { ...base.reporter, ...(data.reporter || {}) }, + search: { ...base.search, ...(data.search || {}) }, + }; + }; + // Load config on mount useEffect(() => { if (open) { @@ -123,12 +164,13 @@ const SettingsDialog: React.FC = ({ open, onClose }) => { const loadConfig = async () => { try { - const [llmRes, mcpRes, editFileRes, edgeOnePagesRes, searchKeysRes] = await Promise.all([ + const [llmRes, mcpRes, editFileRes, edgeOnePagesRes, searchKeysRes, deepResearchRes] = await Promise.all([ fetch('/api/config/llm'), fetch('/api/config/mcp'), fetch('/api/config/edit_file'), fetch('/api/config/edgeone_pages'), fetch('/api/config/search_keys'), + fetch('/api/config/deep_research'), ]); if (llmRes.ok) { @@ -161,6 +203,11 @@ const SettingsDialog: React.FC = ({ open, onClose }) => { const data = await searchKeysRes.json(); setSearchKeysConfig(data); } + + if (deepResearchRes.ok) { + const data = await deepResearchRes.json(); + setDeepResearchConfig(normalizeDeepResearchConfig(data)); + } } catch (error) { console.error('Failed to load config:', error); } @@ -206,7 +253,13 @@ const SettingsDialog: React.FC = ({ open, onClose }) => { body: JSON.stringify(searchKeysConfig), }); - if (llmRes.ok && mcpRes.ok && editFileRes.ok && edgeOnePagesRes.ok && searchKeysRes.ok) { + const deepResearchRes = await fetch('/api/config/deep_research', { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(deepResearchConfig), + }); + + if (llmRes.ok && mcpRes.ok && editFileRes.ok && edgeOnePagesRes.ok && searchKeysRes.ok && deepResearchRes.ok) { setSaveStatus('saved'); setTimeout(() => setSaveStatus('idle'), 2000); } else { @@ -289,6 +342,7 @@ const SettingsDialog: React.FC = ({ open, onClose }) => { + {/* LLM Configuration Tab */} @@ -627,6 +681,151 @@ const SettingsDialog: React.FC = ({ open, onClose }) => { )} + + {/* Deep Research Tab */} + + + + Configure per-agent overrides for Deep Research. Leave fields blank to fall back to the global LLM settings. + + + + Researcher Agent + setDeepResearchConfig((prev) => ({ + ...prev, + researcher: { ...prev.researcher, model: e.target.value }, + }))} + /> + setDeepResearchConfig((prev) => ({ + ...prev, + researcher: { ...prev.researcher, api_key: e.target.value }, + }))} + /> + setDeepResearchConfig((prev) => ({ + ...prev, + researcher: { ...prev.researcher, base_url: e.target.value }, + }))} + /> + + + + + + Searcher Agent + setDeepResearchConfig((prev) => ({ + ...prev, + searcher: { ...prev.searcher, model: e.target.value }, + }))} + /> + setDeepResearchConfig((prev) => ({ + ...prev, + searcher: { ...prev.searcher, api_key: e.target.value }, + }))} + /> + setDeepResearchConfig((prev) => ({ + ...prev, + searcher: { ...prev.searcher, base_url: e.target.value }, + }))} + /> + + + + + + Reporter Agent + setDeepResearchConfig((prev) => ({ + ...prev, + reporter: { ...prev.reporter, model: e.target.value }, + }))} + /> + setDeepResearchConfig((prev) => ({ + ...prev, + reporter: { ...prev.reporter, api_key: e.target.value }, + }))} + /> + setDeepResearchConfig((prev) => ({ + ...prev, + reporter: { ...prev.reporter, base_url: e.target.value }, + }))} + /> + + + + + + Search Summarizer + setDeepResearchConfig((prev) => ({ + ...prev, + search: { ...prev.search, summarizer_model: e.target.value }, + }))} + placeholder="qwen-flash" + helperText="Recommended: low-cost model (default qwen-flash) to reduce token usage." + /> + setDeepResearchConfig((prev) => ({ + ...prev, + search: { ...prev.search, summarizer_api_key: e.target.value }, + }))} + /> + setDeepResearchConfig((prev) => ({ + ...prev, + search: { ...prev.search, summarizer_base_url: e.target.value }, + }))} + /> + + + diff --git a/webui/scripts/start-webui.ps1 b/webui/scripts/start-webui.ps1 new file mode 100644 index 000000000..b7dbeb4a1 --- /dev/null +++ b/webui/scripts/start-webui.ps1 @@ -0,0 +1,8 @@ +$ErrorActionPreference = "Stop" + +# Force UTF-8 output for this session to avoid garbled text in Windows consoles. +$OutputEncoding = [Console]::OutputEncoding = [Text.UTF8Encoding]::UTF8 +$env:PYTHONUTF8 = "1" +chcp 65001 | Out-Null + +ms-agent ui @args From 77925b924aed2ac1973204be7fecae10e5af2654 Mon Sep 17 00:00:00 2001 From: alcholiclg Date: Sat, 7 Feb 2026 04:26:42 +0800 Subject: [PATCH 2/2] fix lint --- webui/backend/config_manager.py | 2 +- webui/backend/deep_research_worker.py | 31 ++++++++++--------- webui/backend/deep_research_worker_manager.py | 26 +++++++++------- 3 files changed, 32 insertions(+), 27 deletions(-) diff --git a/webui/backend/config_manager.py b/webui/backend/config_manager.py index 8a43b0d22..eddf94915 100644 --- a/webui/backend/config_manager.py +++ b/webui/backend/config_manager.py @@ -198,7 +198,7 @@ def get_deep_research_config(self) -> Dict[str, Any]: self.DEFAULT_CONFIG['deep_research']) def update_deep_research_config(self, deep_research_config: Dict[str, - Any]): + Any]): """Update deep research configuration""" self._load_config() self._config['deep_research'] = deep_research_config diff --git a/webui/backend/deep_research_worker.py b/webui/backend/deep_research_worker.py index 617990f90..e8a3480a3 100644 --- a/webui/backend/deep_research_worker.py +++ b/webui/backend/deep_research_worker.py @@ -75,15 +75,17 @@ def _resolve_agent_llm_config(role: str, llm_config: Dict[str, Any], dr_config: Dict[str, Any]) -> Dict[str, str]: overrides = _normalize_agent_override((dr_config or {}).get(role)) return { - 'model': overrides.get('model') or str(llm_config.get('model') or ''), - 'api_key': overrides.get('api_key') or str(llm_config.get('api_key') - or ''), - 'base_url': overrides.get('base_url') or str( - llm_config.get('base_url') or ''), + 'model': + overrides.get('model') or str(llm_config.get('model') or ''), + 'api_key': + overrides.get('api_key') or str(llm_config.get('api_key') or ''), + 'base_url': + overrides.get('base_url') or str(llm_config.get('base_url') or ''), } -def _normalize_search_override(raw: Optional[Dict[str, Any]]) -> Dict[str, str]: +def _normalize_search_override( + raw: Optional[Dict[str, Any]]) -> Dict[str, str]: raw = raw or {} return { 'summarizer_model': str(raw.get('summarizer_model') or ''), @@ -92,10 +94,9 @@ def _normalize_search_override(raw: Optional[Dict[str, Any]]) -> Dict[str, str]: } -def _build_config_override(llm_config: Dict[str, Any], - output_dir: str, - dr_config: Dict[str, Any]) -> Optional[Dict[str, - Any]]: +def _build_config_override( + llm_config: Dict[str, Any], output_dir: str, + dr_config: Dict[str, Any]) -> Optional[Dict[str, Any]]: override: Dict[str, Any] = {} if output_dir: override['output_dir'] = output_dir @@ -275,11 +276,13 @@ async def prepare_tools_with_callback(): web_cfg['summarizer_model'] = search_override[ 'summarizer_model'] if search_override.get('summarizer_api_key'): - web_cfg['summarizer_api_key'] = search_override[ - 'summarizer_api_key'] + web_cfg[ + 'summarizer_api_key'] = search_override[ + 'summarizer_api_key'] if search_override.get('summarizer_base_url'): - web_cfg['summarizer_base_url'] = search_override[ - 'summarizer_base_url'] + web_cfg[ + 'summarizer_base_url'] = search_override[ + 'summarizer_base_url'] if web_cfg: tools_cfg['web_search'] = web_cfg updated['tools'] = tools_cfg diff --git a/webui/backend/deep_research_worker_manager.py b/webui/backend/deep_research_worker_manager.py index e30baee5c..a5eb29d05 100644 --- a/webui/backend/deep_research_worker_manager.py +++ b/webui/backend/deep_research_worker_manager.py @@ -25,9 +25,10 @@ def _get_repo_root(self) -> Path: def _get_worker_path(self) -> Path: return Path(__file__).resolve().parent / 'deep_research_worker.py' - def _build_env(self, env_vars: Optional[Dict[str, str]], - llm_config: Optional[Dict[str, Any]], - deep_research_config: Optional[Dict[str, Any]]) -> Dict[str, str]: + def _build_env( + self, env_vars: Optional[Dict[str, str]], + llm_config: Optional[Dict[str, Any]], + deep_research_config: Optional[Dict[str, Any]]) -> Dict[str, str]: env = os.environ.copy() if env_vars: env.update({k: v for k, v in env_vars.items() if v}) @@ -52,15 +53,16 @@ def _build_env(self, env_vars: Optional[Dict[str, str]], os.pathsep + existing_path if existing_path else '') return env - async def start(self, - session_id: str, - *, - query: str, - config_path: str, - output_dir: str, - env_vars: Optional[Dict[str, str]] = None, - llm_config: Optional[Dict[str, Any]] = None, - deep_research_config: Optional[Dict[str, Any]] = None) -> None: + async def start( + self, + session_id: str, + *, + query: str, + config_path: str, + output_dir: str, + env_vars: Optional[Dict[str, str]] = None, + llm_config: Optional[Dict[str, Any]] = None, + deep_research_config: Optional[Dict[str, Any]] = None) -> None: if session_id in self._processes: await self.stop(session_id)