OpenClaw自定义任意云端模型的配置安装脚本

0 阅读8分钟

本篇文章主要介绍OpenClaw自定义任意云端模型的配置安装脚本及使用方法,主要作用为设置本地模型或自定义模型信息。 作者:任聪聪 日期:2026年3月9日

代码实例

#!/usr/bin/env python3
"""
Cloud Model Configuration Assistant for OpenClaw
自动配置任意云端大模型 (OpenAI, Anthropic, Gemini, DeepSeek, etc.) 到 OpenClaw。
"""

import json
import os
import sys
import urllib.request
import urllib.error
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple

# --- 配置常量 ---
DEFAULT_CONTEXT_WINDOW = 32768
DEFAULT_MAX_TOKENS = 8192
OPENCLAW_MIN_CONTEXT = 16000

# 预定义提供商模板
PROVIDER_TEMPLATES = {
    "openai": {
        "name": "OpenAI",
        "base_url": "https://api.openai.com/v1",
        "api_key_env": "OPENAI_API_KEY",
        "api_type": "openai-completions",
        "models_hint": "gpt-4o, gpt-4-turbo, gpt-3.5-turbo"
    },
    "anthropic": {
        "name": "Anthropic (Claude)",
        "base_url": "https://api.anthropic.com/v1", # OpenClaw 通常通过 adapter 处理,这里假设使用 openai-compatible 代理或原生支持
        # 注意:如果 OpenClaw 原生支持 Anthropic,api_type 可能是 'anthropic'。
        # 此处为了通用性,假设用户可能使用 openai-compatible 网关,或者脚本需特殊处理。
        # 为简化,本脚本主要针对 OpenAI 兼容接口。若需原生 Anthropic,需确认 OpenClaw 具体协议。
        # 这里我们假设 OpenClaw 支持通过 'openai-completions' 类型配合特定 header 或专门类型。
        # *修正*:根据常见架构,若 OpenClaw 支持多协议,这里动态设置。
        "api_type": "openai-completions", # 许多聚合层将 Anthropic 转为 OpenAI 格式,或使用特定 adapter
        "models_hint": "claude-3-5-sonnet-20241022, claude-3-opus-20240229"
    },
    "google": {
        "name": "Google Gemini",
        "base_url": "https://generativelanguage.googleapis.com/v1beta/openai", # Gemini 开放了 OpenAI 兼容接口
        "api_key_env": "GEMINI_API_KEY",
        "api_type": "openai-completions",
        "models_hint": "gemini-1.5-pro, gemini-1.5-flash"
    },
    "deepseek": {
        "name": "DeepSeek",
        "base_url": "https://api.deepseek.com/v1",
        "api_key_env": "DEEPSEEK_API_KEY",
        "api_type": "openai-completions",
        "models_hint": "deepseek-chat, deepseek-coder"
    },
    "moonshot": {
        "name": "Moonshot (Kimi)",
        "base_url": "https://api.moonshot.cn/v1",
        "api_key_env": "MOONSHOT_API_KEY",
        "api_type": "openai-completions",
        "models_hint": "moonshot-v1-8k, moonshot-v1-32k, moonshot-v1-128k"
    },
    "siliconflow": {
        "name": "SiliconFlow (硅基流动)",
        "base_url": "https://api.siliconflow.cn/v1",
        "api_key_env": "SILICONFLOW_API_KEY",
        "api_type": "openai-completions",
        "models_hint": "Qwen/Qwen2.5-72B-Instruct, meta-llama/Llama-3.1-405B-Instruct"
    },
    "custom": {
        "name": "Custom OpenAI-Compatible (vLLM, OneAPI, etc.)",
        "base_url": "", # 用户输入
        "api_key_env": "",
        "api_type": "openai-completions",
        "models_hint": "Any model served by your endpoint"
    }
}

def print_banner():
    print("=" * 70)
    print("☁️  OpenClaw 云端模型配置助手")
    print("=" * 70)
    print()

def get_config_path() -> str:
    possible_paths = [
        os.path.expanduser("~/.openclaw/openclaw.json"),
        os.path.expanduser("~/.moltbot/moltbot.json"),
    ]
    env_path = os.environ.get("OPENCLAW_CONFIG_PATH")
    if env_path:
        return env_path
    for path in possible_paths:
        if os.path.exists(path):
            return path
    return possible_paths[0]

def load_config(path: str) -> Dict[str, Any]:
    if not os.path.exists(path):
        print(f"⚠️  配置文件不存在: {path},将创建新配置...")
        return {"models": {"providers": {}}, "agents": {"defaults": {}}}
    try:
        with open(path, "r", encoding="utf-8") as f:
            return json.load(f)
    except Exception as e:
        print(f"❌ 读取失败: {e}")
        sys.exit(1)

def backup_config(path: str) -> str:
    if not os.path.exists(path):
        return ""
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    backup_path = f"{path}.backup.{timestamp}"
    import shutil
    shutil.copy2(path, backup_path)
    print(f"✅ 已创建备份: {backup_path}")
    return backup_path

def save_config(config: Dict, path: str) -> bool:
    try:
        os.makedirs(os.path.dirname(path), exist_ok=True)
        with open(path, "w", encoding="utf-8") as f:
            json.dump(config, f, indent=2, ensure_ascii=False)
        return True
    except Exception as e:
        print(f"❌ 保存失败: {e}")
        return False

def fetch_models_openai_compat(base_url: str, api_key: str) -> Tuple[List[Dict], Optional[str]]:
    """从兼容 OpenAI 格式的端点获取模型列表"""
    models_url = f"{base_url.rstrip('/')}/models"
    request = urllib.request.Request(models_url, method="GET")
    request.add_header("Authorization", f"Bearer {api_key}")
    
    try:
        with urllib.request.urlopen(request, timeout=15) as response:
            data = json.loads(response.read().decode("utf-8"))
            models = data.get("data", []) if isinstance(data, dict) else data
            # 过滤掉明显的 embedding 模型,除非用户特别需要,这里主要关注 LLM
            llm_models = []
            for m in models:
                if isinstance(m, dict) and "id" in m:
                    mid = m["id"]
                    if any(k in mid.lower() for k in ["embed", "bge", "m3e"]):
                        continue
                    llm_models.append({"id": mid})
            return llm_models, None
    except urllib.error.HTTPError as e:
        return [], f"HTTP 错误 {e.code}: {e.reason} (检查 API Key 或 URL)"
    except urllib.error.URLError as e:
        return [], f"网络错误: {e.reason}"
    except Exception as e:
        return [], str(e)

def get_api_key(provider_key: str, template: Dict) -> str:
    env_var = template.get("api_key_env")
    if env_var and env_var in os.environ:
        key = os.environ[env_var]
        if key:
            print(f"🔑 从环境变量 {env_var} 自动获取 API Key")
            return key
    
    prompt_msg = f"请输入 {template['name']} 的 API Key: "
    if env_var:
        prompt_msg += f"(或设置环境变量 {env_var})\n> "
    else:
        prompt_msg += "\n> "
        
    key = input(prompt_msg).strip()
    if not key:
        print("❌ API Key 不能为空。")
        return get_api_key(provider_key, template)
    return key

def select_provider() -> Tuple[str, Dict]:
    print("📋 选择云服务提供商:")
    keys = list(PROVIDER_TEMPLATES.keys())
    for i, k in enumerate(keys):
        name = PROVIDER_TEMPLATES[k]["name"]
        print(f"   [{i}] {name}")
    
    while True:
        choice = input("\n您的选择 (输入数字或名称): ").strip().lower()
        if choice.isdigit():
            idx = int(choice)
            if 0 <= idx < len(keys):
                return keys[idx], PROVIDER_TEMPLATES[keys[idx]]
        elif choice in PROVIDER_TEMPLATES:
            return choice, PROVIDER_TEMPLATES[choice]
        print("⚠️  无效输入,请重试。")

def build_model_config(model_id: str, context_window: int, cost_input: float, cost_output: float) -> Dict:
    input_types = ["text"]
    if "vl" in model_id.lower() or "vision" in model_id.lower() or "gemini" in model_id.lower():
        input_types.append("image")

    return {
        "id": model_id,
        "name": f"{model_id} (Cloud)",
        "reasoning": "o1" in model_id.lower() or "deepseek-r" in model_id.lower(),
        "input": input_types,
        "cost": {
            "input": cost_input,
            "output": cost_output,
            "cacheRead": 0,
            "cacheWrite": 0
        },
        "contextWindow": context_window,
        "maxTokens": min(DEFAULT_MAX_TOKENS, context_window // 4),
        "compat": {"supportsDeveloperRole": True}
    }

def main():
    print_banner()

    # 1. 选择提供商
    provider_key, template = select_provider()
    provider_id = f"cloud-{provider_key}"
    
    print(f"\n⚙️  配置提供商: {template['name']}")

    # 2. 获取 Base URL
    base_url = template["base_url"]
    if provider_key == "custom":
        base_url = input(f"请输入自定义 API Base URL (例如 http://localhost:8000/v1): ").strip()
        if not base_url:
            print("❌ URL 不能为空")
            sys.exit(1)
    else:
        print(f"   默认 API 端点: {base_url}")
        override = input("   是否覆盖默认端点?(y/N): ").strip().lower()
        if override == 'y':
            base_url = input("   输入新的 Base URL: ").strip()

    # 3. 获取 API Key
    api_key = get_api_key(provider_key, template)

    # 4. 获取模型列表 (如果是 OpenAI 兼容格式)
    available_models = []
    error = None
    
    # 尝试自动探测模型
    if template["api_type"] == "openai-completions":
        print(f"\n🔄 正在从 {base_url} 拉取模型列表...")
        available_models, error = fetch_models_openai_compat(base_url, api_key)
        
        if error:
            print(f"⚠️  自动探测失败: {error}")
            print("   将进入手动输入模式。")
        elif not available_models:
            print("⚠️  未检测到模型列表 (可能接口不支持 /models 或返回空)。")
            print("   将进入手动输入模式。")
    
    # 5. 确定要配置的模型
    final_models = [] # List of Dicts: {'id': ..., 'ctx': ..., 'cost_in': ..., 'cost_out': ...}
    
    if available_models:
        print(f"✅ 发现 {len(available_models)} 个模型。")
        print("\n📋 请选择要启用的模型 (可多选,用逗号分隔索引,或直接回车全选):")
        for i, m in enumerate(available_models):
            print(f"   [{i}] {m['id']}")
        
        selection = input("\n选择 (例如 0,2,5 或 回车全选): ").strip()
        indices = []
        if not selection:
            indices = range(len(available_models))
        else:
            try:
                indices = [int(x.strip()) for x in selection.split(',')]
            except ValueError:
                print("⚠️  输入格式错误,默认全选。")
                indices = range(len(available_models))
        
        for i in indices:
            if 0 <= i < len(available_models):
                final_models.append(available_models[i]["id"])
    else:
        # 手动输入
        print("\n✍️  手动输入模型 ID (例如 gpt-4o, claude-3-5-sonnet-20241022):")
        models_input = input("模型 ID (多个用逗号分隔): ").strip()
        if not models_input:
            print("❌ 至少需要配置一个模型。")
            sys.exit(1)
        final_models = [m.strip() for m in models_input.split(',')]

    # 6. 配置全局参数 (上下文 & 成本)
    print(f"\n🔧 全局参数配置 (可对单个模型微调,此处设默认值):")
    
    ctx_input = input(f"默认上下文窗口 (回车默认 {DEFAULT_CONTEXT_WINDOW}): ").strip()
    context_window = int(ctx_input) if ctx_input else DEFAULT_CONTEXT_WINDOW
    
    if context_window < OPENCLAW_MIN_CONTEXT:
        confirm = input(f"⚠️  警告: {context_window} < {OPENCLAW_MIN_CONTEXT}。继续?(y/N): ").strip().lower()
        if confirm != 'y':
            sys.exit(0)

    cost_in = input("默认输入成本 ($/1M tokens, 回车默认 0): ").strip()
    cost_out = input("默认输出成本 ($/1M tokens, 回车默认 0): ").strip()
    
    try:
        c_in = float(cost_in) if cost_in else 0.0
        c_out = float(cost_out) if cost_out else 0.0
    except ValueError:
        print("⚠️  成本输入无效,设为 0。")
        c_in, c_out = 0.0, 0.0

    # 构建模型配置对象
    model_configs = []
    print("\n🏗️  正在构建模型配置...")
    
    # 如果有多个模型,可以让用户为每个模型单独确认,这里简化为批量应用默认值
    # 高级用法可以在这里循环询问每个模型的特定 ctx/cost
    for mid in final_models:
        # 简单启发式调整某些知名模型的默认值
        m_ctx = context_window
        m_c_in, m_c_out = c_in, c_out
        
        if "128k" in mid.lower() or "gemini-1.5" in mid.lower():
            m_ctx = 131072
        elif "32k" in mid.lower():
            m_ctx = 32768
            
        config_item = build_model_config(mid, m_ctx, m_c_in, m_c_out)
        model_configs.append(config_item)
        print(f"   - {mid} (Context: {m_ctx})")

    # 7. 选择主模型
    if not model_configs:
        print("❌ 没有模型被配置。")
        sys.exit(1)
    
    print(f"\n📋 选择默认主模型:")
    for i, mc in enumerate(model_configs):
        marker = " [推荐]" if i == 0 else ""
        print(f"   [{i}] {mc['id']}{marker}")
    
    choice = input("您的选择 (回车默认第一个): ").strip()
    try:
        idx = int(choice) if choice else 0
        selected_model_id = model_configs[idx]["id"]
    except (ValueError, IndexError):
        selected_model_id = model_configs[0]["id"]
    
    print(f"✅ 主模型设定为: {selected_model_id}")

    # 8. 写入配置
    config_path = get_config_path()
    print(f"\n💾 正在保存配置到 {config_path} ...")
    
    config = load_config(config_path)
    backup_config(config_path)

    # 构造 Provider 块
    provider_block = {
        "baseUrl": base_url,
        "apiKey": api_key,
        "api": template["api_type"],
        "models": model_configs
    }

    # 更新 providers
    if "models" not in config:
        config["models"] = {}
    if "providers" not in config["models"]:
        config["models"]["providers"] = {}
    
    config["models"]["providers"][provider_id] = provider_block

    # 更新 Agents 默认设置
    if "agents" not in config:
        config["agents"] = {}
    if "defaults" not in config["agents"]:
        config["agents"]["defaults"] = {}
    
    defaults = config["agents"]["defaults"]
    defaults["model"] = {"primary": f"{provider_id}/{selected_model_id}"}
    
    # 清理并重建模型映射
    if "models" not in defaults:
        defaults["models"] = {}
    
    prefix = f"{provider_id}/"
    # 删除旧的同提供商映射
    keys_to_del = [k for k in defaults["models"].keys() if k.startswith(prefix)]
    for k in keys_to_del:
        del defaults["models"][k]
    
    # 添加新映射
    for mc in model_configs:
        key = f"{prefix}{mc['id']}"
        # 如果是主模型,给个别名标记(可选逻辑,视 OpenClaw 具体需求)
        defaults["models"][key] = {"alias": provider_id} if mc["id"] == selected_model_id else {}

    if save_config(config, config_path):
        print(f"\n✅ 配置成功!")
    else:
        print("\n❌ 保存失败。")
        sys.exit(1)

    # 9. 总结
    print("\n" + "=" * 70)
    print("📋 配置摘要")
    print("=" * 70)
    print(f"✓ 提供商 ID: {provider_id}")
    print(f"✓ 端点: {base_url}")
    print(f"✓ 已配置模型数: {len(model_configs)}")
    print(f"✓ 主模型: {provider_id}/{selected_model_id}")
    print(f"\n🚀 下一步操作:")
    print(f"   1. 验证配置: openclaw gateway --check (如果支持)")
    print(f"   2. 启动网关: openclaw gateway")
    print(f"   3. 测试对话: openclaw agent --agent main -m \"Hello from cloud\" --new")
    print("=" * 70)

if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        print("\n\n❌ 用户取消操作")
        sys.exit(1)
    except Exception as e:
        print(f"\n❌ 发生未捕获错误: {e}")
        import traceback
        traceback.print_exc()
        sys.exit(1)

运行方式

chmod +x setup_cloud_models.py

file