本篇文章主要讲解,本地部署安装OpenClaw的详细操作方法,从0-1完整安装到自己的本地电脑中。 日期:2026年3月9日 作者:任聪聪
准备材料
三方模型接口
说明:可以选择官方模型的api服务,也可以像聪哥一样选择较为简单的聚合平台进行部署。
这里聪哥使用的是x-aio聚合大模型api平台,比官方的稍微要方便和低价一些。
注册地址:dashboard.x-aio.com/zh/register…
一台笔记本
配置几乎没啥特别要求,最低2内核2gb内存16gb存储即可完成部署,流畅运行。
安装node.js 22版本或nvm进行安装
node.js地址:nodejs.org/
nvm下载地址【推荐】:github.com/coreybutler…
备注:安装都是默认下一步即可,无需其他操作。
安装python
通过官网下载win系统安装包,默认下一步即可完成安装。
python官网:www.python.org/
安装OpenClaw
步骤一、通过nvm安装node指定版本
nvm install 22.12
步骤二、使用npm进行安装
npm install -g openclaw
步骤三、创建openclaw的工作区
mkdir D:\AppData\openclaw
然后切换到这个目录:
cd /d D:\AppData\openclaw
步骤三、进入到工作区后初始化openclaw
openclaw onboard 启动向导
第一和第二个选项选择如下:
备注:按左右按键可切换选项,按enter确认。
进入到模型选择选项后,按上下按键找到如下的跳过选项。
选择跳过后进入到下一项,选择第一个如下图:
选择后下一项,选择如下:
选择后,进入如下界面后继续选择跳过:
继续下一个,选择跳过:
注意:到这里选择yes,使用skill安装方式:
继续下一个,继续跳过:
紧接着几项,全选NO:
进入钩子部分选择如下内容:
步骤四、安装x-aio聚合模型服务
下载部署脚本
curl -o openclaw_setup.py https://cdn.x-aio.com/X-AIO/tools/openclaw_setup.py
注意:需要移动在我们的工作区内。
脚本内容(备份,如果无法下载情况请本地复制创建此文件内容执行):
#!/usr/bin/env python3
"""
Moltbot Provider 配置助手 - X-AIO
支持动态模型列表、自动验证、Embedding 配置
"""
import json
import os
import shutil
import subprocess
import sys
import urllib.request
import urllib.error
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple
BASE_URL = "https://code-api.x-aio.com"
OPENAI_LLM_BASE_URL = f"{BASE_URL}/v1"
ANTHROPIC_LLM_BASE_URL = f"{BASE_URL}/anthropic"
EMBEDDING_BASE_URL = f"{BASE_URL}/v1"
DASHBOARD_API_URL = "https://dashboard.x-aio.com/api/index_view/code_plan_model_list"
X_AIO_USER_AGENT = "X-AIO-OpenClaw-Setup/0.0.1"
def print_banner():
"""打印 banner"""
print("=" * 70)
print("🔧 Moltbot Provider 配置助手")
print("=" * 70)
print()
def get_config_path() -> str:
"""查找配置文件路径"""
possible_paths = [
os.path.expanduser("~/.openclaw/openclaw.json"),
os.path.expanduser("~/.moltbot/moltbot.json"),
os.path.expanduser("~/.clawdbot/clawdbot.json"),
]
env_path = os.environ.get("OPENCLAW_CONFIG_PATH")
if env_path:
return env_path
for path in possible_paths:
if os.path.exists(path):
return path
return possible_paths[0]
def load_config(path: str) -> Dict[str, Any]:
"""加载配置文件"""
if not os.path.exists(path):
print(f"⚠️ 配置文件不存在: {path}")
print("将创建新配置...")
return {}
try:
with open(path, "r", encoding="utf-8") as f:
return json.load(f)
except json.JSONDecodeError as e:
print(f"❌ 配置文件 JSON 格式错误: {e}")
sys.exit(1)
except Exception as e:
print(f"❌ 读取配置文件失败: {e}")
sys.exit(1)
def backup_config(path: str) -> str:
"""创建配置文件备份"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
backup_path = f"{path}.backup.{timestamp}"
if os.path.exists(path):
shutil.copy2(path, backup_path)
print(f"✅ 已创建备份: {backup_path}")
return backup_path
def save_config(config: Dict, path: str) -> bool:
"""保存配置"""
try:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "w", encoding="utf-8") as f:
json.dump(config, f, indent=2, ensure_ascii=False)
return True
except Exception as e:
print(f"❌ 保存配置失败: {e}")
return False
def input_with_default(prompt: str, default: str = "") -> str:
"""带默认值的输入"""
if default:
user_input = input(f"{prompt} [{default}]: ").strip()
return user_input if user_input else default
else:
return input(f"{prompt}: ").strip()
def input_required(prompt: str) -> str:
"""必填输入"""
while True:
value = input(f"{prompt} (必填): ").strip()
if value:
return value
print("⚠️ 此项不能为空")
def confirm(prompt: str, default: bool = True) -> bool:
"""确认提示"""
default_str = "Y/n" if default else "y/N"
while True:
response = input(f"{prompt} [{default_str}]: ").strip().lower()
if not response:
return default
if response in ["y", "yes"]:
return True
if response in ["n", "no"]:
return False
print("⚠️ 请输入 y 或 n")
def fetch_model_details(api_key: str) -> Tuple[List[Dict], Optional[str]]:
"""从 Dashboard API 获取模型详细信息(包括 context 和 tags)"""
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"User-Agent": X_AIO_USER_AGENT,
}
request = urllib.request.Request(DASHBOARD_API_URL, headers=headers, method="GET")
try:
with urllib.request.urlopen(request, timeout=30) as response:
data = json.loads(response.read().decode("utf-8"))
if not isinstance(data, dict):
return [], "返回格式不符合预期(应为 JSON 对象)"
if data.get("code") != "200":
return [], f"API 返回错误: {data.get('message', '未知错误')}"
models_data = data.get("data", [])
if not isinstance(models_data, list):
return [], "返回格式不符合预期(data 字段应为数组)"
# 处理并标准化模型数据
valid_models = []
for model in models_data:
if isinstance(model, dict) and model.get("real_model_name"):
model_id = model["real_model_name"]
# 处理 tags(可能为 null 或列表)
tags = model.get("tags") or []
if tags is None:
tags = []
# 计算上下文窗口(context * 1000)
context = model.get("context", 128)
try:
context_window = int(float(context)) * 1000
except (ValueError, TypeError):
context_window = 128000
# 判断是否支持视觉(tags 包含 "视觉")
supports_vision = "视觉" in tags
# 判断是否支持推理(tags 包含 "推理" 或 "交替推理")
supports_reasoning = "推理" in tags or "交替推理" in tags
valid_models.append(
{
"id": model_id,
"contextWindow": context_window,
"supportsVision": supports_vision,
"supportsReasoning": supports_reasoning,
"tags": tags,
"upstreamType": model.get("upstream_type", "openai"),
"isAvailable": model.get("is_available", "true"),
"ratio": model.get(
"code_plan_calc_call_count_ratio", "1.0"
),
}
)
# 按模型名称排序
valid_models.sort(key=lambda x: x["id"])
return valid_models, None
except urllib.error.HTTPError as e:
if e.code == 401:
return [], "API Key 无效或已过期"
elif e.code == 404:
return [], "API 端点不存在"
else:
return [], f"HTTP 错误 {e.code}: {e.reason}"
except urllib.error.URLError as e:
return [], f"连接失败: {e.reason}"
except Exception as e:
return [], f"请求失败: {str(e)}"
def fetch_models(base_url: str, api_key: str) -> Tuple[List[Dict], Optional[str]]:
"""从 /models 获取模型列表"""
models_url = f"{base_url.rstrip('/')}/models"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"User-Agent": X_AIO_USER_AGENT,
}
request = urllib.request.Request(models_url, headers=headers, method="GET")
try:
with urllib.request.urlopen(request, timeout=30) as response:
data = json.loads(response.read().decode("utf-8"))
if isinstance(data, dict) and "data" in data:
models = data["data"]
elif isinstance(data, list):
models = data
else:
return [], "返回格式不符合预期"
valid_models = []
for model in models:
if isinstance(model, dict) and "id" in model:
valid_models.append(
{
"id": model["id"],
"object": model.get("object", "model"),
"created": model.get("created"),
"owned_by": model.get("owned_by", "unknown"),
}
)
valid_models.sort(key=lambda x: x["id"])
return valid_models, None
except urllib.error.HTTPError as e:
if e.code == 401:
return [], "API Key 无效或已过期"
elif e.code == 404:
return [], f"API 端点不存在"
else:
return [], f"HTTP 错误 {e.code}: {e.reason}"
except urllib.error.URLError as e:
return [], f"连接失败: {e.reason}"
except Exception as e:
return [], f"请求失败: {str(e)}"
def select_from_list(
items: List[Dict], title: str, id_key: str = "id"
) -> Optional[Dict]:
"""通用列表选择器"""
if not items:
return None
print(f"\n📋 {title} ({len(items)} 个选项):\n")
page_size = 20
total_pages = (len(items) + page_size - 1) // page_size
current_page = 0
while True:
start = current_page * page_size
end = min(start + page_size, len(items))
print(f"--- 第 {current_page + 1}/{total_pages} 页 ---")
for i, item in enumerate(items[start:end], start=start):
item_id = item.get(id_key, "unknown")
owned_by = item.get("owned_by", "")
hint = f" by: {owned_by}" if owned_by and owned_by != "unknown" else ""
print(f" [{i}] {item_id}{hint}")
print()
if total_pages > 1:
prompt = f"选择 (0-{len(items) - 1}, n=下一页, p=上一页, q=退出): "
else:
prompt = f"选择 (0-{len(items) - 1}, q=退出): "
choice = input(prompt).strip().lower()
if choice == "q":
return None
elif choice == "n" and current_page < total_pages - 1:
current_page += 1
continue
elif choice == "p" and current_page > 0:
current_page -= 1
continue
try:
index = int(choice)
if 0 <= index < len(items):
return items[index]
else:
print(f"⚠️ 请输入 0-{len(items) - 1} 之间的数字")
except ValueError:
print("⚠️ 无效输入")
def select_from_list_with_default(
items: List[Dict], title: str, default_id: str, id_key: str = "id"
) -> Optional[Dict]:
"""通用列表选择器,支持直接回车选择默认值"""
if not items:
return None
# 查找默认值的索引
default_index = -1
for i, item in enumerate(items):
if item.get(id_key) == default_id:
default_index = i
break
if default_index < 0:
# 默认值不在列表中,使用第一个
default_index = 0
default_id = items[0].get(id_key, "unknown")
print(f"\n📋 {title} ({len(items)} 个选项, 默认: {default_id}):")
print(f"提示: 直接回车选择默认值,或输入数字选择其他选项\n")
page_size = 20
total_pages = (len(items) + page_size - 1) // page_size
current_page = default_index // page_size
while True:
start = current_page * page_size
end = min(start + page_size, len(items))
print(f"--- 第 {current_page + 1}/{total_pages} 页 ---")
for i, item in enumerate(items[start:end], start=start):
item_id = item.get(id_key, "unknown")
marker = " [默认]" if item_id == default_id else ""
print(f" [{i}] {item_id}{marker}")
print()
if total_pages > 1:
prompt = (
f"选择 (0-{len(items) - 1}, n=下一页, p=上一页, 回车=默认, q=退出): "
)
else:
prompt = f"选择 (0-{len(items) - 1}, 回车=默认, q=退出): "
choice = input(prompt).strip().lower()
if choice == "":
# 直接回车,返回默认值
return items[default_index]
elif choice == "q":
return None
elif choice == "n" and current_page < total_pages - 1:
current_page += 1
continue
elif choice == "p" and current_page > 0:
current_page -= 1
continue
try:
index = int(choice)
if 0 <= index < len(items):
return items[index]
else:
print(f"⚠️ 请输入 0-{len(items) - 1} 之间的数字")
except ValueError:
print("⚠️ 无效输入")
def configure_model_details(model_id: str, model_details: Dict) -> Dict:
"""配置模型详细参数(从 API 获取的模型详情自动推断)"""
print(f"\n⚙️ 配置模型 '{model_id}' 参数:\n")
context_window = model_details.get("contextWindow", 128000)
supports_vision = model_details.get("supportsVision", False)
supports_reasoning = model_details.get("supportsReasoning", False)
# 检查环境变量中的最大输出 tokens
env_max_tokens = os.environ.get("X_AIO_MAX_OUTPUT_TOKENS")
if env_max_tokens and env_max_tokens.isdigit():
max_tokens = int(env_max_tokens)
auto_set = True
else:
max_tokens = 16384
auto_set = False
print("📊 从 Dashboard API 自动获取的模型配置:")
print(f" 上下文窗口: {context_window:,} tokens")
print(f" 视觉支持: {'是' if supports_vision else '否'}")
print(f" 推理支持: {'是' if supports_reasoning else '否'}")
tags = model_details.get("tags", [])
if tags:
print(f" 标签: {', '.join(tags)}")
print()
if auto_set:
print(
f"✅ 已从环境变量 X_AIO_MAX_OUTPUT_TOKENS 设置最大输出 tokens: {max_tokens}"
)
else:
max_tok = input(f"最大输出 tokens [{max_tokens}]: ").strip()
max_tokens = int(max_tok) if max_tok.isdigit() else max_tokens
input_types = ["text"]
if supports_vision:
input_types.append("image")
return {
"contextWindow": context_window,
"maxTokens": max_tokens,
"input": input_types,
"reasoning": supports_reasoning,
"costInput": 0,
"costOutput": 0,
}
def build_model_config(model: Dict, max_tokens: int = 16384) -> Dict:
"""构建单个模型的配置"""
model_id = model["id"]
supports_vision = model.get("supportsVision", False)
supports_reasoning = model.get("supportsReasoning", False)
# 输入类型
input_types = ["text"]
if supports_vision:
input_types.append("image")
# 构建模型配置
model_config = {
"id": model_id,
"name": model_id,
"reasoning": supports_reasoning,
"input": input_types,
"cost": {
"input": 0,
"output": 0,
"cacheRead": 0,
"cacheWrite": 0,
},
"contextWindow": model.get("contextWindow", 128000),
"maxTokens": max_tokens,
}
# XAIO-O 开头使用 openai-responses
if model_id.startswith("XAIO-O"):
model_config["api"] = "openai-responses"
else:
# 其他模型需要添加 compat.supportsDeveloperRole = false
model_config["compat"] = {"supportsDeveloperRole": False}
return model_config
def add_provider_to_config(
config: Dict, provider_info: Dict, all_models: List[Dict], max_tokens: int = 16384
) -> Dict:
"""添加 provider 到配置(包含所有模型)"""
if "models" not in config:
config["models"] = {}
if "providers" not in config["models"]:
config["models"]["providers"] = {}
provider_id = provider_info["id"]
if provider_id in config["models"]["providers"]:
print(f"\n⚠️ Provider '{provider_id}' 已存在")
# 检查是否强制覆盖
env_override = os.environ.get("X_AIO_OPENCLAW_CONF_OVERRIDE")
if env_override and env_override != "0":
print(f"✅ 环境变量 X_AIO_OPENCLAW_CONF_OVERRIDE 已设置,直接覆盖现有配置")
else:
if not confirm("是否覆盖现有配置?", default=False):
print("❌ 操作已取消")
return config
# 构建所有模型配置
model_configs = []
for model in all_models:
model_config = build_model_config(model, max_tokens)
model_configs.append(model_config)
provider_config = {
"baseUrl": OPENAI_LLM_BASE_URL,
"apiKey": provider_info["apiKey"],
"api": "openai-completions",
"models": model_configs,
}
if provider_info.get("headers"):
provider_config["headers"] = provider_info["headers"]
config["models"]["providers"][provider_id] = provider_config
return config
def set_default_model(
config: Dict, provider_id: str, model_id: str, all_models: List[Dict]
) -> Dict:
"""设置默认模型(仅设置主模型,不包含 fallbacks)"""
if "agents" not in config:
config["agents"] = {}
if "defaults" not in config["agents"]:
config["agents"]["defaults"] = {}
# 设置主模型(不包含 fallbacks)
primary_key = f"{provider_id}/{model_id}"
config["agents"]["defaults"]["model"] = {
"primary": primary_key,
}
# 构建 models 配置(只包含 LLM 模型,不包含 Embedding 模型)
if "models" not in config["agents"]["defaults"]:
config["agents"]["defaults"]["models"] = {}
# 清理当前 provider 的旧模型(全量覆盖)
provider_prefix = f"{provider_id}/"
keys_to_remove = [
key
for key in config["agents"]["defaults"]["models"].keys()
if key.startswith(provider_prefix)
]
for key in keys_to_remove:
del config["agents"]["defaults"]["models"][key]
# 只添加 LLM 模型到 models 配置
llm_models = [m for m in all_models if not is_embedding_model(m["id"])]
for model in llm_models:
model_key = f"{provider_id}/{model['id']}"
if model_key == primary_key:
config["agents"]["defaults"]["models"][model_key] = {
"alias": provider_id.lower()
}
else:
config["agents"]["defaults"]["models"][model_key] = {}
return config
def is_embedding_model(model_id: str) -> bool:
"""判断模型是否为 Embedding 模型(不区分大小写)"""
model_id_lower = model_id.lower()
embedding_keywords = [
"embed",
"embedding",
"bge",
"m3e",
"text-embedding",
"gte-",
"e5-",
"nomic-embed",
"instructor",
"minilm",
"sentence-transformers",
"baai",
"voyage",
"cohere-embed",
]
return any(keyword in model_id_lower for keyword in embedding_keywords)
def configure_embedding(config: Dict, api_key: str, all_models: List[Dict]) -> Dict:
"""配置 Embedding 模型"""
print("\n📚 从模型列表中选择 Embedding 模型:")
embedding_models = [m for m in all_models if is_embedding_model(m["id"])]
if not embedding_models:
print("⚠️ 未检测到专用 Embedding 模型,显示所有模型:")
embedding_models = all_models
else:
print(f"✅ 已筛选出 {len(embedding_models)} 个 Embedding 模型")
# 检查环境变量或默认选择 Embedding 模型
env_embedding_model = os.environ.get("X_AIO_EMBEDDING_MODEL")
default_embedding_model = "Qwen3-VL-Embedding-8B"
if env_embedding_model:
# 从环境变量获取 Embedding 模型
selected = next(
(m for m in embedding_models if m["id"] == env_embedding_model), None
)
if selected:
print(
f"\n✅ 已从环境变量 X_AIO_EMBEDDING_MODEL 设置 Embedding 模型: {selected['id']}"
)
else:
print(
f"\n⚠️ 环境变量 X_AIO_EMBEDDING_MODEL 指定的模型 '{env_embedding_model}' 不可用"
)
print(f"📋 选择 Embedding 模型 (直接回车默认: {default_embedding_model}):")
selected = select_from_list_with_default(
embedding_models, "选择 Embedding 模型", default_embedding_model
)
else:
# 提示用户选择,显示默认选项
print(f"\n📋 选择 Embedding 模型 (直接回车默认: {default_embedding_model}):")
selected = select_from_list_with_default(
embedding_models, "选择 Embedding 模型", default_embedding_model
)
while not selected:
print("⚠️ Embedding 模型为必选项,请选择一个模型")
selected = select_from_list_with_default(
embedding_models, "选择 Embedding 模型", default_embedding_model
)
memory_config: Dict[str, Any] = {
"enabled": True,
"provider": "openai",
"model": selected["id"],
"remote": {
"baseUrl": EMBEDDING_BASE_URL,
"apiKey": api_key,
"batch": {"enabled": False},
},
}
print(f"✅ Embedding 模型已设置为: {selected['id']}")
print("✅ Batch 模式已自动禁用(避免不兼容问题)")
if "agents" not in config:
config["agents"] = {}
if "defaults" not in config["agents"]:
config["agents"]["defaults"] = {}
config["agents"]["defaults"]["memorySearch"] = memory_config
return config
def add_auth_profile(config: Dict, provider_id: str) -> Dict:
"""添加 auth profile"""
if "auth" not in config:
config["auth"] = {}
if "profiles" not in config["auth"]:
config["auth"]["profiles"] = {}
profile_id = f"{provider_id}:default"
if profile_id not in config["auth"]["profiles"]:
config["auth"]["profiles"][profile_id] = {
"provider": provider_id,
"mode": "api_key",
}
print(f"✅ 已添加 auth profile: {profile_id}")
return config
def create_auth_profiles_json(agent_dir: str, provider_id: str, api_key: str) -> bool:
"""创建 auth-profiles.json"""
try:
os.makedirs(agent_dir, exist_ok=True)
auth_file = os.path.join(agent_dir, "auth-profiles.json")
auth_store = {
"version": 1,
"profiles": {
f"{provider_id}:default": {
"type": "api_key",
"provider": provider_id,
"key": api_key,
"createdAt": int(datetime.now().timestamp() * 1000),
}
},
"order": {},
"lastGood": {},
"usageStats": {},
}
with open(auth_file, "w", encoding="utf-8") as f:
json.dump(auth_store, f, indent=2)
# 设置权限
os.chmod(auth_file, 0o600)
return True
except Exception as e:
print(f"⚠️ 创建 auth-profiles.json 失败: {e}")
return False
def verify_config(config_path: str) -> bool:
"""验证配置"""
print("\n🔍 验证配置...")
try:
with open(config_path, "r", encoding="utf-8") as f:
config = json.load(f)
errors = []
warnings = []
# 检查必要的配置项
if "models" not in config or "providers" not in config.get("models", {}):
errors.append("缺少 models.providers 配置")
if "agents" not in config or "defaults" not in config.get("agents", {}):
errors.append("缺少 agents.defaults 配置")
if "auth" not in config or "profiles" not in config.get("auth", {}):
errors.append("缺少 auth.profiles 配置")
# 检查 provider 一致性
providers = config.get("models", {}).get("providers", {})
auth_profiles = config.get("auth", {}).get("profiles", {})
for provider_id in providers.keys():
expected_profile = f"{provider_id}:default"
matching_profiles = [
p for p in auth_profiles.keys() if p.startswith(f"{provider_id}:")
]
if not matching_profiles:
warnings.append(f"Provider '{provider_id}' 没有对应的 auth profile")
# 检查默认模型
default_model = (
config.get("agents", {})
.get("defaults", {})
.get("model", {})
.get("primary", "")
)
if not default_model:
warnings.append("没有设置默认模型")
else:
provider_part = default_model.split("/")[0] if "/" in default_model else ""
if provider_part and provider_part not in providers:
errors.append(f"默认模型引用了不存在的 provider: {provider_part}")
if errors:
print("❌ 配置错误:")
for error in errors:
print(f" - {error}")
return False
if warnings:
print("⚠️ 配置警告:")
for warning in warnings:
print(f" - {warning}")
if not errors and not warnings:
print("✅ 配置验证通过")
return len(errors) == 0
except json.JSONDecodeError as e:
print(f"❌ JSON 格式错误: {e}")
return False
except Exception as e:
print(f"❌ 验证失败: {e}")
return False
def test_api_connection(base_url: str, api_key: str, model_id: str) -> bool:
"""测试 API 连接"""
print(f"\n🧪 测试 API 连接...")
try:
test_url = f"{base_url.rstrip('/')}/v1/chat/completions"
data = json.dumps(
{
"model": model_id,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 10,
}
).encode("utf-8")
request = urllib.request.Request(
test_url,
data=data,
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"User-Agent": X_AIO_USER_AGENT,
},
method="POST",
)
with urllib.request.urlopen(request, timeout=30) as response:
if response.status == 200:
print("✅ API 连接测试通过")
return True
else:
print(f"⚠️ API 返回状态码: {response.status}")
return False
except urllib.error.HTTPError as e:
print(f"❌ API 测试失败: HTTP {e.code} - {e.reason}")
return False
except Exception as e:
print(f"❌ API 测试失败: {e}")
return False
def print_summary(config: Dict, provider_info: Dict):
"""打印配置摘要"""
print("\n" + "=" * 70)
print("📋 配置摘要")
print("=" * 70)
provider_id = provider_info["id"]
model_key = f"{provider_id}/{provider_info['modelId']}"
print(f"\n✓ Provider: {provider_id}")
print(f"✓ Base URL: {OPENAI_LLM_BASE_URL}")
print(f"✓ API 格式: openai-completions")
print(f"✓ 主模型: {model_key}")
print(f"✓ 上下文窗口: {provider_info.get('contextWindow', 128000)} tokens")
print(f"✓ 最大输出: {provider_info.get('maxTokens', 16384)} tokens")
memory = config.get("agents", {}).get("defaults", {}).get("memorySearch")
if memory and memory.get("enabled"):
print(f"✓ 记忆搜索: 已启用 ({memory.get('model', '未设置')})")
auth_profiles = config.get("auth", {}).get("profiles", {})
if auth_profiles:
print(f"✓ Auth Profiles: {', '.join(auth_profiles.keys())}")
print("\n" + "=" * 70)
def main():
try:
print_banner()
# 获取配置路径
config_path = get_config_path()
print(f"📁 配置文件路径: {config_path}\n")
# 加载现有配置
config = load_config(config_path)
# 创建备份
backup_path = backup_config(config_path)
print(f"🔗 Provider: x-aio")
print(f"🔗 LLM 端点: {OPENAI_LLM_BASE_URL} (OpenAI API)")
print(f"🔗 Embedding 端点: {EMBEDDING_BASE_URL} (OpenAI API)\n")
# 获取 API Key(优先从环境变量读取)
env_api_key = os.environ.get("X_AIO_API_KEY")
if env_api_key:
api_key = env_api_key
print(f"✅ 已从环境变量 X_AIO_API_KEY 读取 API Key")
else:
api_key = input_required("API Key")
print(f"\n🔄 正在获取可用模型列表...")
# 1. 从 /v1/models 获取用户实际可用的模型
available_models, error1 = fetch_models(OPENAI_LLM_BASE_URL, api_key)
if error1:
print(f"❌ 获取可用模型列表失败: {error1}")
print("\n将切换到手动配置模式...")
model_id = input_required("模型 ID (如: gpt-4, MiniMax-M2.1)")
model_name = input(f"模型显示名称 [{model_id}]: ").strip() or model_id
selected_model = {"id": model_id, "name": model_name, "owned_by": "manual"}
# 手动模式下使用默认参数
model_detail_info = {
"contextWindow": 128000,
"supportsVision": False,
"supportsReasoning": False,
"tags": [],
}
models = []
llm_models = []
embedding_models_list = []
else:
print(f"✅ 从 API 获取到 {len(available_models)} 个可用模型")
# 2. 从 Dashboard API 获取模型详细参数
print(f"🔄 正在从 Dashboard API 获取模型详细参数...")
dashboard_models, error2 = fetch_model_details(api_key)
if error2:
print(f"⚠️ 获取模型详细参数失败: {error2}")
print(" 将使用 API 返回的基础模型信息")
# 使用基础信息
models = [
{
"id": m["id"],
"contextWindow": 128000,
"supportsVision": False,
"supportsReasoning": False,
"tags": [],
}
for m in available_models
]
else:
# 3. 交叉匹配:只保留可用模型,并补充详细参数
available_ids = {m["id"] for m in available_models}
models = [m for m in dashboard_models if m["id"] in available_ids]
print(
f"✅ 成功匹配 {len(models)} 个模型(Dashboard 共 {len(dashboard_models)} 个)"
)
# 分离 LLM 模型和 Embedding 模型
llm_models = [m for m in models if not is_embedding_model(m["id"])]
embedding_models_list = [m for m in models if is_embedding_model(m["id"])]
print(f" LLM 模型: {len(llm_models)} 个")
print(f" Embedding 模型: {len(embedding_models_list)} 个")
# 检查环境变量或默认选择主模型
env_main_model = os.environ.get("X_AIO_MAIN_MODEL")
default_main_model = "Kimi-K2.5"
if env_main_model:
# 从环境变量获取主模型
selected_model = next(
(m for m in llm_models if m["id"] == env_main_model), None
)
if selected_model:
print(
f"\n✅ 已从环境变量 X_AIO_MAIN_MODEL 设置主模型: {selected_model['id']}"
)
else:
print(
f"\n⚠️ 环境变量 X_AIO_MAIN_MODEL 指定的模型 '{env_main_model}' 不可用"
)
selected_model = select_from_list(llm_models, "选择主模型")
else:
# 提示用户选择,显示默认选项
print(f"\n📋 选择主模型 (直接回车默认: {default_main_model}):")
selected_model = select_from_list_with_default(
llm_models, "选择主模型", default_main_model
)
if not selected_model:
print("❌ 未选择模型,退出")
return
print(f"\n✅ 已选择主模型: {selected_model['id']}")
model_detail_info = selected_model
provider_id = "x-aio"
print(f"\n✓ Provider ID: {provider_id}")
# 配置模型详细参数(从 API 获取的详情自动推断)
model_details = configure_model_details(selected_model["id"], model_detail_info)
provider_info = {
"id": provider_id,
"apiKey": api_key,
"modelId": selected_model["id"],
"modelName": selected_model.get("name", selected_model["id"]),
**model_details,
}
# 添加到配置(包含所有模型)
print(f"\n📝 正在添加 Provider: {provider_id}")
print(f" 主模型: {selected_model['id']}")
print(f" 共包含 {len(models) if models else 1} 个模型")
config = add_provider_to_config(
config,
provider_info,
models if models else [selected_model],
model_details.get("maxTokens", 16384),
)
print(f"✅ 已添加 {len(models) if models else 1} 个模型到配置")
# 设为默认(只设置主模型到 agents)
config = set_default_model(
config,
provider_id,
selected_model["id"],
models if models else [selected_model],
)
print(f"✅ 已设置默认模型: {provider_id}/{selected_model['id']}")
print(f"✅ 已添加 {len(models) if models else 1} 个模型到 fallbacks 和 models")
# 添加 auth profile
config = add_auth_profile(config, provider_id)
# 配置 Embedding(必选项)
config = configure_embedding(
config, api_key, embedding_models_list if embedding_models_list else []
)
# 保存配置
if not save_config(config, config_path):
return
print(f"\n✅ 配置已保存到: {config_path}")
# 创建 auth-profiles.json
agent_dir = os.path.expanduser("~/.openclaw/agents/main/agent")
if create_auth_profiles_json(agent_dir, provider_id, api_key):
print(f"✅ Auth profiles 已创建: {agent_dir}/auth-profiles.json")
# 验证配置
verify_config(config_path)
# 测试 API 连接
test_api_connection(BASE_URL, api_key, selected_model["id"])
# 打印摘要
print_summary(config, provider_info)
print("\n" + "=" * 70)
print("✨ 配置完成!")
print("3. 验证配置:")
print(" openclaw doctor")
print(" openclaw models list")
print("\n4. 启动对话:")
print(' openclaw agent --agent main -m "hello"')
print("=" * 70)
except KeyboardInterrupt:
print("\n\n❌ 操作已取消")
sys.exit(1)
except Exception as e:
print(f"\n❌ 错误: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()
运行部署脚本
说明:需要安装python环境,如果没有请退回材料准备阶段。
python3 openclaw_setup.py
获取方式如下:
填写信息及有效期,点击创建即可。
创建完毕后,复制我们的apikey并输入到cmd终端中。
输入密钥:
完成后按下enter进入下一步骤,选择模型及配置参数。
测试是否成功
openclaw agent --agent main -m "你好,世界!"
备注:如果有回复信息,则说明接入成功。
常用命令
# 1. 启动网关 (在新窗口或后台)
openclaw gateway
# 2. 进入聊天界面 (在当前窗口)
openclaw tui
# 3. 检查健康状态
openclaw doctor
# 4. 强制重建记忆索引
openclaw memory reindex
# 5. 确认钩子状态
openclaw hooks list
# (如果 session-memory 没开启,需运行 openclaw onboard 重新勾选)
# 6. 添加渠道
openclaw channels add telegram
# 7. 验证连接
openclaw channels list