diff --git a/agent-runtime/lib/clawhubReconciliation.js b/agent-runtime/lib/clawhubReconciliation.js
new file mode 100644
index 0000000..a8d0b6d
--- /dev/null
+++ b/agent-runtime/lib/clawhubReconciliation.js
@@ -0,0 +1,50 @@
+function normalizeSavedSkillEntry(slug, entry = {}) {
+ const installSlug = String(entry?.installSlug || slug || "").trim();
+ if (!installSlug) return null;
+
+ const author = String(entry?.author || "").trim();
+ const pagePath =
+ String(entry?.pagePath || "").trim() || (author ? `${author}/${installSlug}` : installSlug);
+ const installedAtRaw = String(entry?.installedAt || "").trim();
+ const installedAt =
+ installedAtRaw && !Number.isNaN(new Date(installedAtRaw).getTime())
+ ? new Date(installedAtRaw).toISOString()
+ : new Date().toISOString();
+
+ return {
+ source: "clawhub",
+ installSlug,
+ author,
+ pagePath,
+ installedAt,
+ };
+}
+
+function normalizeSavedSkillEntries(entries = []) {
+ const deduped = new Map();
+ for (const entry of Array.isArray(entries) ? entries : []) {
+ const normalized = normalizeSavedSkillEntry(entry?.installSlug || entry?.slug, entry);
+ if (!normalized) continue;
+ const key = `${normalized.author}:${normalized.installSlug}`;
+ if (!deduped.has(key)) {
+ deduped.set(key, normalized);
+ }
+ }
+ return [...deduped.values()];
+}
+
+function computeMissingSavedSkills(savedSkills = [], installedSkills = []) {
+ const normalizedSaved = normalizeSavedSkillEntries(savedSkills);
+ const installedSlugs = new Set(
+ (Array.isArray(installedSkills) ? installedSkills : [])
+ .map((entry) => String(entry?.slug || "").trim())
+ .filter(Boolean)
+ );
+ return normalizedSaved.filter((entry) => !installedSlugs.has(entry.installSlug));
+}
+
+module.exports = {
+ computeMissingSavedSkills,
+ normalizeSavedSkillEntries,
+ normalizeSavedSkillEntry,
+};
diff --git a/backend-api/__tests__/agents.test.ts b/backend-api/__tests__/agents.test.ts
index 9e5a814..bad045b 100644
--- a/backend-api/__tests__/agents.test.ts
+++ b/backend-api/__tests__/agents.test.ts
@@ -60,8 +60,14 @@ const mockGetDeploymentDefaults = jest.fn().mockResolvedValue({
disk_gb: 10,
});
jest.mock("../db", () => mockDb);
-jest.mock("../redisQueue", () => ({ addDeploymentJob: mockAddDeploymentJob, getDLQJobs: jest.fn(), retryDLQJob: jest.fn() }));
-jest.mock("../scheduler", () => ({ selectNode: jest.fn().mockResolvedValue({ name: "worker-01" }) }));
+jest.mock("../redisQueue", () => ({
+ addDeploymentJob: mockAddDeploymentJob,
+ getDLQJobs: jest.fn(),
+ retryDLQJob: jest.fn(),
+}));
+jest.mock("../scheduler", () => ({
+ selectNode: jest.fn().mockResolvedValue({ name: "worker-01" }),
+}));
jest.mock("../containerManager", () => ({
start: jest.fn().mockResolvedValue({}),
stop: jest.fn().mockResolvedValue({}),
@@ -214,12 +220,14 @@ jest.mock("../agentFiles", () => ({
const app = require("../server");
-const userToken = jwt.sign({ id: "user-1", email: "user@nora.test", role: "user" }, JWT_SECRET, { expiresIn: "1h" });
+const userToken = jwt.sign({ id: "user-1", email: "user@nora.test", role: "user" }, JWT_SECRET, {
+ expiresIn: "1h",
+});
const auth = (req) => req.set("Authorization", `Bearer ${userToken}`);
function createMockFetchResponse({ ok = true, status = 200, body = {}, headers = {} } = {}) {
const normalizedHeaders = Object.fromEntries(
- Object.entries(headers).map(([key, value]) => [key.toLowerCase(), value])
+ Object.entries(headers).map(([key, value]) => [key.toLowerCase(), value]),
);
const rawBody = typeof body === "string" ? body : JSON.stringify(body);
@@ -344,13 +352,15 @@ describe("GET /agents", () => {
describe("GET /agents/:id", () => {
it("preserves warning status when the container is still live", async () => {
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-warning",
- name: "Warning Agent",
- status: "warning",
- user_id: "user-1",
- container_id: "container-1",
- }],
+ rows: [
+ {
+ id: "a-warning",
+ name: "Warning Agent",
+ status: "warning",
+ user_id: "user-1",
+ container_id: "container-1",
+ },
+ ],
});
const res = await auth(request(app).get("/agents/a-warning"));
@@ -364,19 +374,23 @@ describe("GET /agents/:id", () => {
containerManager.status.mockResolvedValueOnce({ running: false });
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-warning-down",
- name: "Warning Down Agent",
- status: "warning",
- user_id: "user-1",
- container_id: "container-warning-down",
- }],
+ rows: [
+ {
+ id: "a-warning-down",
+ name: "Warning Down Agent",
+ status: "warning",
+ user_id: "user-1",
+ container_id: "container-warning-down",
+ },
+ ],
})
.mockResolvedValueOnce({
- rows: [{
- id: "a-warning-down",
- status: "stopped",
- }],
+ rows: [
+ {
+ id: "a-warning-down",
+ status: "stopped",
+ },
+ ],
});
const res = await auth(request(app).get("/agents/a-warning-down"));
@@ -388,19 +402,23 @@ describe("GET /agents/:id", () => {
it("reconciles stopped agents back to running when the container is live", async () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-stopped",
- name: "Stopped Agent",
- status: "stopped",
- user_id: "user-1",
- container_id: "container-2",
- }],
+ rows: [
+ {
+ id: "a-stopped",
+ name: "Stopped Agent",
+ status: "stopped",
+ user_id: "user-1",
+ container_id: "container-2",
+ },
+ ],
})
.mockResolvedValueOnce({
- rows: [{
- id: "a-stopped",
- status: "running",
- }],
+ rows: [
+ {
+ id: "a-stopped",
+ status: "running",
+ },
+ ],
});
const res = await auth(request(app).get("/agents/a-stopped"));
@@ -414,14 +432,16 @@ describe("GET /agents/:id/gateway-url", () => {
it("uses GATEWAY_HOST when returning a published gateway url", async () => {
process.env.GATEWAY_HOST = "gateway.external";
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-gateway",
- container_id: "container-gateway",
- gateway_token: "gateway-token",
- gateway_host_port: 19123,
- user_id: "user-1",
- status: "running",
- }],
+ rows: [
+ {
+ id: "a-gateway",
+ container_id: "container-gateway",
+ gateway_token: "gateway-token",
+ gateway_host_port: 19123,
+ user_id: "user-1",
+ status: "running",
+ },
+ ],
});
const res = await auth(request(app).get("/agents/a-gateway/gateway-url"));
@@ -436,45 +456,46 @@ describe("GET /agents/:id/gateway-url", () => {
});
it("allows gateway url lookups for warning agents so degraded control-plane recovery still works", async () => {
+ process.env.NEXTAUTH_URL = "http://app.nora.test:8080";
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-warning-gateway",
- container_id: "container-warning-gateway",
- gateway_host_port: 19123,
- user_id: "user-1",
- status: "warning",
- }],
+ rows: [
+ {
+ id: "a-warning-gateway",
+ container_id: "container-warning-gateway",
+ gateway_host_port: 19123,
+ user_id: "user-1",
+ status: "warning",
+ },
+ ],
});
- const res = await auth(
- request(app)
- .get("/agents/a-warning-gateway/gateway-url")
- .set("Host", "app.nora.test:8080")
- );
+ const res = await auth(request(app).get("/agents/a-warning-gateway/gateway-url"));
expect(res.status).toBe(200);
expect(res.body).toEqual({
url: "http://app.nora.test:19123",
port: 19123,
});
+
+ delete process.env.NEXTAUTH_URL;
});
it("uses the forwarded request protocol for published gateway urls when the control plane is behind https", async () => {
+ process.env.NEXTAUTH_URL = "https://app.nora.test";
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-https-gateway",
- container_id: "container-https-gateway",
- gateway_host_port: 19123,
- user_id: "user-1",
- status: "running",
- }],
+ rows: [
+ {
+ id: "a-https-gateway",
+ container_id: "container-https-gateway",
+ gateway_host_port: 19123,
+ user_id: "user-1",
+ status: "running",
+ },
+ ],
});
const res = await auth(
- request(app)
- .get("/agents/a-https-gateway/gateway-url")
- .set("Host", "app.nora.test")
- .set("X-Forwarded-Proto", "https")
+ request(app).get("/agents/a-https-gateway/gateway-url").set("X-Forwarded-Proto", "https"),
);
expect(res.status).toBe(200);
@@ -482,21 +503,25 @@ describe("GET /agents/:id/gateway-url", () => {
url: "https://app.nora.test:19123",
port: 19123,
});
+
+ delete process.env.NEXTAUTH_URL;
});
it("uses explicit gateway host and port when the backend records them", async () => {
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-k8s-gateway",
- host: "oclaw-agent-a-k8s.openclaw-agents.svc.cluster.local",
- container_id: "oclaw-agent-a-k8s",
- backend_type: "k8s",
- gateway_host_port: null,
- gateway_host: "nora-kind-control-plane",
- gateway_port: 31879,
- user_id: "user-1",
- status: "running",
- }],
+ rows: [
+ {
+ id: "a-k8s-gateway",
+ host: "oclaw-agent-a-k8s.openclaw-agents.svc.cluster.local",
+ container_id: "oclaw-agent-a-k8s",
+ backend_type: "k8s",
+ gateway_host_port: null,
+ gateway_host: "nora-kind-control-plane",
+ gateway_port: 31879,
+ user_id: "user-1",
+ status: "running",
+ },
+ ],
});
const res = await auth(request(app).get("/agents/a-k8s-gateway/gateway-url"));
@@ -510,13 +535,15 @@ describe("GET /agents/:id/gateway-url", () => {
it("rejects gateway url lookups for stopped agents so stale ports are not exposed", async () => {
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-stopped-gateway",
- container_id: "container-gateway",
- gateway_host_port: 19123,
- user_id: "user-1",
- status: "stopped",
- }],
+ rows: [
+ {
+ id: "a-stopped-gateway",
+ container_id: "container-gateway",
+ gateway_host_port: 19123,
+ user_id: "user-1",
+ status: "stopped",
+ },
+ ],
});
const res = await auth(request(app).get("/agents/a-stopped-gateway/gateway-url"));
@@ -527,13 +554,15 @@ describe("GET /agents/:id/gateway-url", () => {
it("rejects gateway url lookups for error agents so failed control-plane state stays closed", async () => {
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-error-gateway",
- container_id: "container-error-gateway",
- gateway_host_port: 19123,
- user_id: "user-1",
- status: "error",
- }],
+ rows: [
+ {
+ id: "a-error-gateway",
+ container_id: "container-error-gateway",
+ gateway_host_port: 19123,
+ user_id: "user-1",
+ status: "error",
+ },
+ ],
});
const res = await auth(request(app).get("/agents/a-error-gateway/gateway-url"));
@@ -565,24 +594,26 @@ describe("Hermes WebUI routes", () => {
},
});
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-ui",
- user_id: "user-1",
- status: "running",
- runtime_family: "hermes",
- backend_type: "hermes",
- container_id: "hermes-container",
- runtime_host: "10.0.0.40",
- runtime_port: 8642,
- gateway_token: "hermes-token",
- }],
+ rows: [
+ {
+ id: "a-hermes-ui",
+ user_id: "user-1",
+ status: "running",
+ runtime_family: "hermes",
+ backend_type: "hermes",
+ container_id: "hermes-container",
+ runtime_host: "10.0.0.40",
+ runtime_port: 8642,
+ gateway_token: "hermes-token",
+ },
+ ],
});
global.fetch = jest
.fn()
.mockResolvedValueOnce(
createMockFetchResponse({
body: { status: "ok", platform: "hermes-agent" },
- })
+ }),
)
.mockResolvedValueOnce(
createMockFetchResponse({
@@ -590,7 +621,7 @@ describe("Hermes WebUI routes", () => {
object: "list",
data: [{ id: "desk-bot", object: "model" }],
},
- })
+ }),
)
.mockResolvedValueOnce(
createMockFetchResponse({
@@ -600,7 +631,7 @@ describe("Hermes WebUI routes", () => {
gateway_state: "running",
active_sessions: 4,
},
- })
+ }),
);
const res = await auth(request(app).get("/agents/a-hermes-ui/hermes-ui"));
@@ -628,7 +659,7 @@ describe("Hermes WebUI routes", () => {
configuredModel: "gpt-5.4",
configuredProvider: "custom",
configuredBaseUrl: "https://api.openai.com/v1",
- })
+ }),
);
expect(global.fetch).toHaveBeenNthCalledWith(
1,
@@ -638,7 +669,7 @@ describe("Hermes WebUI routes", () => {
headers: expect.objectContaining({
Authorization: "Bearer hermes-token",
}),
- })
+ }),
);
expect(global.fetch).toHaveBeenNthCalledWith(
2,
@@ -648,7 +679,7 @@ describe("Hermes WebUI routes", () => {
headers: expect.objectContaining({
Authorization: "Bearer hermes-token",
}),
- })
+ }),
);
expect(global.fetch).toHaveBeenNthCalledWith(
3,
@@ -658,14 +689,14 @@ describe("Hermes WebUI routes", () => {
headers: expect.objectContaining({
Accept: "application/json",
}),
- })
+ }),
);
expect(res.body.gateway).toEqual(
expect.objectContaining({
state: "running",
activeAgents: 1,
jobsCount: 0,
- })
+ }),
);
});
@@ -690,24 +721,26 @@ describe("Hermes WebUI routes", () => {
},
});
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-ui-old-image",
- user_id: "user-1",
- status: "running",
- runtime_family: "hermes",
- backend_type: "hermes",
- container_id: "hermes-container",
- runtime_host: "10.0.0.41",
- runtime_port: 8642,
- gateway_token: "hermes-token",
- }],
+ rows: [
+ {
+ id: "a-hermes-ui-old-image",
+ user_id: "user-1",
+ status: "running",
+ runtime_family: "hermes",
+ backend_type: "hermes",
+ container_id: "hermes-container",
+ runtime_host: "10.0.0.41",
+ runtime_port: 8642,
+ gateway_token: "hermes-token",
+ },
+ ],
});
global.fetch = jest
.fn()
.mockResolvedValueOnce(
createMockFetchResponse({
body: { status: "ok", platform: "hermes-agent" },
- })
+ }),
)
.mockResolvedValueOnce(
createMockFetchResponse({
@@ -715,21 +748,15 @@ describe("Hermes WebUI routes", () => {
object: "list",
data: [{ id: "desk-bot", object: "model" }],
},
- })
+ }),
)
.mockRejectedValueOnce(new TypeError("fetch failed"));
mockRunContainerCommand.mockResolvedValueOnce({
exitCode: 0,
- output: [
- "STATUS=missing-dashboard",
- "VERSION=Hermes Agent v0.8.0 (2026.4.8)",
- "",
- ].join("\n"),
+ output: ["STATUS=missing-dashboard", "VERSION=Hermes Agent v0.8.0 (2026.4.8)", ""].join("\n"),
});
- const res = await auth(
- request(app).get("/agents/a-hermes-ui-old-image/hermes-ui")
- );
+ const res = await auth(request(app).get("/agents/a-hermes-ui-old-image/hermes-ui"));
expect(res.status).toBe(200);
expect(res.body.dashboard).toEqual({
@@ -742,25 +769,25 @@ describe("Hermes WebUI routes", () => {
"This Hermes image (Hermes Agent v0.8.0 (2026.4.8)) does not include the official dashboard yet. Pull a current Hermes image and redeploy this agent.",
});
expect(mockRunContainerCommand).toHaveBeenCalledTimes(1);
- expect(mockRunContainerCommand.mock.calls[0][1]).toContain(
- ">> /proc/1/fd/1 2>> /proc/1/fd/2"
- );
+ expect(mockRunContainerCommand.mock.calls[0][1]).toContain(">> /proc/1/fd/1 2>> /proc/1/fd/2");
expect(mockRunContainerCommand.mock.calls[0][1]).not.toContain("dashboard.log");
});
it("proxies Hermes chat requests through the runtime API", async () => {
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-chat",
- user_id: "user-1",
- status: "running",
- runtime_family: "hermes",
- backend_type: "hermes",
- container_id: "hermes-container",
- runtime_host: "10.0.0.41",
- runtime_port: 8642,
- gateway_token: "hermes-token",
- }],
+ rows: [
+ {
+ id: "a-hermes-chat",
+ user_id: "user-1",
+ status: "running",
+ runtime_family: "hermes",
+ backend_type: "hermes",
+ container_id: "hermes-container",
+ runtime_host: "10.0.0.41",
+ runtime_port: 8642,
+ gateway_token: "hermes-token",
+ },
+ ],
});
global.fetch = jest.fn().mockResolvedValueOnce(
createMockFetchResponse({
@@ -780,13 +807,15 @@ describe("Hermes WebUI routes", () => {
headers: {
"x-hermes-session-id": "sess-123",
},
- })
+ }),
);
const res = await auth(
- request(app).post("/agents/a-hermes-chat/hermes-ui/chat").send({
- messages: [{ role: "user", content: "Inspect the workspace" }],
- })
+ request(app)
+ .post("/agents/a-hermes-chat/hermes-ui/chat")
+ .send({
+ messages: [{ role: "user", content: "Inspect the workspace" }],
+ }),
);
expect(res.status).toBe(200);
@@ -796,7 +825,7 @@ describe("Hermes WebUI routes", () => {
model: "desk-bot",
sessionId: "sess-123",
usage: expect.objectContaining({ total_tokens: 42 }),
- })
+ }),
);
const [targetUrl, requestOptions] = global.fetch.mock.calls[0];
@@ -808,7 +837,7 @@ describe("Hermes WebUI routes", () => {
Authorization: "Bearer hermes-token",
"Content-Type": "application/json",
}),
- })
+ }),
);
expect(JSON.parse(requestOptions.body)).toEqual({
stream: false,
@@ -818,12 +847,14 @@ describe("Hermes WebUI routes", () => {
it("rejects Hermes cron routes for non-Hermes agents", async () => {
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-openclaw-hermes-ui",
- user_id: "user-1",
- status: "running",
- runtime_family: "openclaw",
- }],
+ rows: [
+ {
+ id: "a-openclaw-hermes-ui",
+ user_id: "user-1",
+ status: "running",
+ runtime_family: "openclaw",
+ },
+ ],
});
const res = await auth(request(app).get("/agents/a-openclaw-hermes-ui/hermes-ui/cron"));
@@ -834,12 +865,14 @@ describe("Hermes WebUI routes", () => {
it("rejects Hermes channel routes when the runtime is not running", async () => {
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-ui-stopped",
- user_id: "user-1",
- status: "stopped",
- runtime_family: "hermes",
- }],
+ rows: [
+ {
+ id: "a-hermes-ui-stopped",
+ user_id: "user-1",
+ status: "stopped",
+ runtime_family: "hermes",
+ },
+ ],
});
const res = await auth(request(app).get("/agents/a-hermes-ui-stopped/hermes-ui/channels"));
@@ -850,24 +883,26 @@ describe("Hermes WebUI routes", () => {
it("proxies Hermes cron list requests", async () => {
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-cron-list",
- user_id: "user-1",
- status: "running",
- runtime_family: "hermes",
- backend_type: "hermes",
- container_id: "hermes-container",
- runtime_host: "10.0.0.42",
- runtime_port: 8642,
- gateway_token: "hermes-token",
- }],
+ rows: [
+ {
+ id: "a-hermes-cron-list",
+ user_id: "user-1",
+ status: "running",
+ runtime_family: "hermes",
+ backend_type: "hermes",
+ container_id: "hermes-container",
+ runtime_host: "10.0.0.42",
+ runtime_port: 8642,
+ gateway_token: "hermes-token",
+ },
+ ],
});
global.fetch = jest.fn().mockResolvedValueOnce(
createMockFetchResponse({
body: {
jobs: [{ id: "job-1", name: "Daily summary" }],
},
- })
+ }),
);
const res = await auth(request(app).get("/agents/a-hermes-cron-list/hermes-ui/cron"));
@@ -881,30 +916,32 @@ describe("Hermes WebUI routes", () => {
headers: expect.objectContaining({
Authorization: "Bearer hermes-token",
}),
- })
+ }),
);
});
it("maps Nora cron create payloads to Hermes prompt payloads", async () => {
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-cron-create",
- user_id: "user-1",
- status: "running",
- runtime_family: "hermes",
- backend_type: "hermes",
- container_id: "hermes-container",
- runtime_host: "10.0.0.43",
- runtime_port: 8642,
- gateway_token: "hermes-token",
- }],
+ rows: [
+ {
+ id: "a-hermes-cron-create",
+ user_id: "user-1",
+ status: "running",
+ runtime_family: "hermes",
+ backend_type: "hermes",
+ container_id: "hermes-container",
+ runtime_host: "10.0.0.43",
+ runtime_port: 8642,
+ gateway_token: "hermes-token",
+ },
+ ],
});
global.fetch = jest.fn().mockResolvedValueOnce(
createMockFetchResponse({
body: {
job: { id: "job-2", name: "Daily summary" },
},
- })
+ }),
);
const res = await auth(
@@ -912,7 +949,7 @@ describe("Hermes WebUI routes", () => {
name: "Daily summary",
schedule: "0 9 * * *",
message: "Summarize the last 24 hours",
- })
+ }),
);
expect(res.status).toBe(200);
@@ -925,7 +962,7 @@ describe("Hermes WebUI routes", () => {
Authorization: "Bearer hermes-token",
"Content-Type": "application/json",
}),
- })
+ }),
);
expect(JSON.parse(requestOptions.body)).toEqual({
name: "Daily summary",
@@ -936,28 +973,30 @@ describe("Hermes WebUI routes", () => {
it("proxies Hermes cron deletions", async () => {
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-cron-delete",
- user_id: "user-1",
- status: "running",
- runtime_family: "hermes",
- backend_type: "hermes",
- container_id: "hermes-container",
- runtime_host: "10.0.0.44",
- runtime_port: 8642,
- gateway_token: "hermes-token",
- }],
+ rows: [
+ {
+ id: "a-hermes-cron-delete",
+ user_id: "user-1",
+ status: "running",
+ runtime_family: "hermes",
+ backend_type: "hermes",
+ container_id: "hermes-container",
+ runtime_host: "10.0.0.44",
+ runtime_port: 8642,
+ gateway_token: "hermes-token",
+ },
+ ],
});
global.fetch = jest.fn().mockResolvedValueOnce(
createMockFetchResponse({
body: {
deleted: true,
},
- })
+ }),
);
const res = await auth(
- request(app).delete("/agents/a-hermes-cron-delete/hermes-ui/cron/job-9")
+ request(app).delete("/agents/a-hermes-cron-delete/hermes-ui/cron/job-9"),
);
expect(res.status).toBe(200);
@@ -969,18 +1008,20 @@ describe("Hermes WebUI routes", () => {
headers: expect.objectContaining({
Authorization: "Bearer hermes-token",
}),
- })
+ }),
);
});
it("lists Hermes channels through the helper", async () => {
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-channel-list",
- user_id: "user-1",
- status: "running",
- runtime_family: "hermes",
- }],
+ rows: [
+ {
+ id: "a-hermes-channel-list",
+ user_id: "user-1",
+ status: "running",
+ runtime_family: "hermes",
+ },
+ ],
});
mockListHermesChannels.mockResolvedValueOnce({
channels: [{ type: "telegram", name: "Telegram" }],
@@ -989,14 +1030,12 @@ describe("Hermes WebUI routes", () => {
directoryUpdatedAt: "2026-04-12T12:00:00.000Z",
});
- const res = await auth(
- request(app).get("/agents/a-hermes-channel-list/hermes-ui/channels")
- );
+ const res = await auth(request(app).get("/agents/a-hermes-channel-list/hermes-ui/channels"));
expect(res.status).toBe(200);
expect(res.body.channels).toEqual([{ type: "telegram", name: "Telegram" }]);
expect(mockListHermesChannels).toHaveBeenCalledWith(
- expect.objectContaining({ id: "a-hermes-channel-list" })
+ expect.objectContaining({ id: "a-hermes-channel-list" }),
);
});
@@ -1007,9 +1046,7 @@ describe("Hermes WebUI routes", () => {
status: "running",
runtime_family: "hermes",
};
- mockDb.query
- .mockResolvedValueOnce({ rows: [agent] })
- .mockResolvedValueOnce({ rows: [agent] });
+ mockDb.query.mockResolvedValueOnce({ rows: [agent] }).mockResolvedValueOnce({ rows: [agent] });
mockSaveHermesChannel
.mockResolvedValueOnce({
payload: { channels: [{ type: "telegram" }] },
@@ -1021,17 +1058,19 @@ describe("Hermes WebUI routes", () => {
});
const createRes = await auth(
- request(app).post("/agents/a-hermes-channel-save/hermes-ui/channels").send({
- type: "Telegram",
- config: { TELEGRAM_BOT_TOKEN: "secret-token" },
- })
+ request(app)
+ .post("/agents/a-hermes-channel-save/hermes-ui/channels")
+ .send({
+ type: "Telegram",
+ config: { TELEGRAM_BOT_TOKEN: "secret-token" },
+ }),
);
const updateRes = await auth(
request(app)
.patch("/agents/a-hermes-channel-save/hermes-ui/channels/telegram")
.send({
config: { TELEGRAM_BOT_TOKEN: "[REDACTED]" },
- })
+ }),
);
expect(createRes.status).toBe(200);
@@ -1041,13 +1080,13 @@ describe("Hermes WebUI routes", () => {
expect.objectContaining({ id: "a-hermes-channel-save" }),
"telegram",
{ TELEGRAM_BOT_TOKEN: "secret-token" },
- { create: true }
+ { create: true },
);
expect(mockSaveHermesChannel).toHaveBeenNthCalledWith(
2,
expect.objectContaining({ id: "a-hermes-channel-save" }),
"telegram",
- { TELEGRAM_BOT_TOKEN: "[REDACTED]" }
+ { TELEGRAM_BOT_TOKEN: "[REDACTED]" },
);
});
@@ -1058,9 +1097,7 @@ describe("Hermes WebUI routes", () => {
status: "running",
runtime_family: "hermes",
};
- mockDb.query
- .mockResolvedValueOnce({ rows: [agent] })
- .mockResolvedValueOnce({ rows: [agent] });
+ mockDb.query.mockResolvedValueOnce({ rows: [agent] }).mockResolvedValueOnce({ rows: [agent] });
mockDeleteHermesChannel.mockResolvedValueOnce({
channels: [],
availableTypes: [{ type: "telegram", label: "Telegram" }],
@@ -1074,23 +1111,21 @@ describe("Hermes WebUI routes", () => {
});
const deleteRes = await auth(
- request(app)
- .delete("/agents/a-hermes-channel-actions/hermes-ui/channels/telegram")
+ request(app).delete("/agents/a-hermes-channel-actions/hermes-ui/channels/telegram"),
);
const testRes = await auth(
- request(app)
- .post("/agents/a-hermes-channel-actions/hermes-ui/channels/telegram/test")
+ request(app).post("/agents/a-hermes-channel-actions/hermes-ui/channels/telegram/test"),
);
expect(deleteRes.status).toBe(200);
expect(testRes.status).toBe(200);
expect(mockDeleteHermesChannel).toHaveBeenCalledWith(
expect.objectContaining({ id: "a-hermes-channel-actions" }),
- "telegram"
+ "telegram",
);
expect(mockTestHermesChannel).toHaveBeenCalledWith(
expect.objectContaining({ id: "a-hermes-channel-actions" }),
- "telegram"
+ "telegram",
);
});
});
@@ -1108,43 +1143,46 @@ describe("Hermes integration sync routes", () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-integration",
- user_id: "user-1",
- name: "Hermes Integration Agent",
- status: "running",
- host: "runtime-host",
- }],
+ rows: [
+ {
+ id: "a-hermes-integration",
+ user_id: "user-1",
+ name: "Hermes Integration Agent",
+ status: "running",
+ host: "runtime-host",
+ },
+ ],
})
.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-integration",
- user_id: "user-1",
- status: "running",
- runtime_family: "hermes",
- }],
+ rows: [
+ {
+ id: "a-hermes-integration",
+ user_id: "user-1",
+ status: "running",
+ runtime_family: "hermes",
+ },
+ ],
})
.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-integration",
- user_id: "user-1",
- status: "running",
- runtime_family: "hermes",
- }],
+ rows: [
+ {
+ id: "a-hermes-integration",
+ user_id: "user-1",
+ status: "running",
+ runtime_family: "hermes",
+ },
+ ],
});
const res = await auth(
request(app).post("/agents/a-hermes-integration/integrations").send({
provider: "slack",
token: "xoxb-secret",
- })
+ }),
);
expect(res.status).toBe(200);
- expect(mockSyncAuthToUserAgents).toHaveBeenCalledWith(
- "user-1",
- "a-hermes-integration"
- );
+ expect(mockSyncAuthToUserAgents).toHaveBeenCalledWith("user-1", "a-hermes-integration");
});
it("returns a 502 when Hermes integration sync fails after disconnect", async () => {
@@ -1160,35 +1198,39 @@ describe("Hermes integration sync routes", () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-integration-failed",
- user_id: "user-1",
- name: "Hermes Integration Agent",
- status: "running",
- host: "runtime-host",
- }],
+ rows: [
+ {
+ id: "a-hermes-integration-failed",
+ user_id: "user-1",
+ name: "Hermes Integration Agent",
+ status: "running",
+ host: "runtime-host",
+ },
+ ],
})
.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-integration-failed",
- user_id: "user-1",
- status: "running",
- runtime_family: "hermes",
- }],
- })
+ rows: [
+ {
+ id: "a-hermes-integration-failed",
+ user_id: "user-1",
+ status: "running",
+ runtime_family: "hermes",
+ },
+ ],
+ })
.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-integration-failed",
- user_id: "user-1",
- status: "running",
- runtime_family: "hermes",
- }],
+ rows: [
+ {
+ id: "a-hermes-integration-failed",
+ user_id: "user-1",
+ status: "running",
+ runtime_family: "hermes",
+ },
+ ],
});
const res = await auth(
- request(app).delete(
- "/agents/a-hermes-integration-failed/integrations/int-hermes-1"
- )
+ request(app).delete("/agents/a-hermes-integration-failed/integrations/int-hermes-1"),
);
expect(res.status).toBe(502);
@@ -1225,11 +1267,9 @@ describe("agent audit logging", () => {
const res = await auth(request(app).post("/agents/agent-start-1/start"));
expect(res.status).toBe(200);
- expect(mockSyncAuthToUserAgents).toHaveBeenCalledWith(
- "user-1",
- "agent-start-1",
- { onlyIfAuthPresent: true }
- );
+ expect(mockSyncAuthToUserAgents).toHaveBeenCalledWith("user-1", "agent-start-1", {
+ onlyIfAuthPresent: true,
+ });
expect(monitoringModule.logEvent).toHaveBeenCalledWith(
"agent_started",
expect.stringContaining("Start Agent"),
@@ -1252,7 +1292,7 @@ describe("agent audit logging", () => {
id: "agent-start-1",
ownerEmail: "user@nora.test",
}),
- })
+ }),
);
});
});
@@ -1261,32 +1301,36 @@ describe("GET /agents/:id/stats", () => {
it("returns normalized live stats with derived rate fields", async () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-metrics",
- user_id: "user-1",
- container_id: "container-metrics",
- backend_type: "docker",
- sandbox_type: "standard",
- status: "running",
- }],
+ rows: [
+ {
+ id: "a-metrics",
+ user_id: "user-1",
+ container_id: "container-metrics",
+ backend_type: "docker",
+ sandbox_type: "standard",
+ status: "running",
+ },
+ ],
})
.mockResolvedValueOnce({
- rows: [{
- cpu_percent: 8,
- memory_usage_mb: 500,
- memory_limit_mb: 2048,
- memory_percent: 24.41,
- network_rx_mb: 5,
- network_tx_mb: 15,
- disk_read_mb: 25,
- disk_write_mb: 35,
- network_rx_rate_mbps: 0.5,
- network_tx_rate_mbps: 1.5,
- disk_read_rate_mbps: 2.5,
- disk_write_rate_mbps: 3.5,
- pids: 4,
- recorded_at: "2026-04-08T00:00:00.000Z",
- }],
+ rows: [
+ {
+ cpu_percent: 8,
+ memory_usage_mb: 500,
+ memory_limit_mb: 2048,
+ memory_percent: 24.41,
+ network_rx_mb: 5,
+ network_tx_mb: 15,
+ disk_read_mb: 25,
+ disk_write_mb: 35,
+ network_rx_rate_mbps: 0.5,
+ network_tx_rate_mbps: 1.5,
+ disk_read_rate_mbps: 2.5,
+ disk_write_rate_mbps: 3.5,
+ pids: 4,
+ recorded_at: "2026-04-08T00:00:00.000Z",
+ },
+ ],
});
const res = await auth(request(app).get("/agents/a-metrics/stats"));
@@ -1362,17 +1406,19 @@ describe("GET /agents/:id/stats", () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-nemo",
- user_id: "user-1",
- container_id: "container-nemo",
- backend_type: "nemoclaw",
- sandbox_type: "nemoclaw",
- status: "running",
- host: "127.0.0.1",
- runtime_host: "127.0.0.1",
- runtime_port: 9090,
- }],
+ rows: [
+ {
+ id: "a-nemo",
+ user_id: "user-1",
+ container_id: "container-nemo",
+ backend_type: "nemoclaw",
+ sandbox_type: "nemoclaw",
+ status: "running",
+ host: "127.0.0.1",
+ runtime_host: "127.0.0.1",
+ runtime_port: 9090,
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] });
@@ -1387,7 +1433,7 @@ describe("GET /agents/:id/stats", () => {
policyActive: true,
policyRuleCount: 2,
pendingApprovalsCount: 1,
- })
+ }),
);
expect(global.fetch).toHaveBeenCalledTimes(3);
});
@@ -1415,50 +1461,56 @@ describe("GET /agents/:id/stats/history", () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-proxmox",
- user_id: "user-1",
- container_id: "vm-101",
- backend_type: "proxmox",
- sandbox_type: "standard",
- status: "running",
- }],
+ rows: [
+ {
+ id: "a-proxmox",
+ user_id: "user-1",
+ container_id: "vm-101",
+ backend_type: "proxmox",
+ sandbox_type: "standard",
+ status: "running",
+ },
+ ],
})
.mockResolvedValueOnce({
- rows: [{
- cpu_percent: 15,
- memory_usage_mb: 1024,
- memory_limit_mb: 4096,
- memory_percent: 25,
- network_rx_mb: 50,
- network_tx_mb: 10,
- disk_read_mb: 25,
- disk_write_mb: 5,
- network_rx_rate_mbps: 1.5,
- network_tx_rate_mbps: 0.5,
- disk_read_rate_mbps: 0.25,
- disk_write_rate_mbps: 0.1,
- pids: 99,
- recorded_at: "2026-04-08T00:00:05.000Z",
- }],
+ rows: [
+ {
+ cpu_percent: 15,
+ memory_usage_mb: 1024,
+ memory_limit_mb: 4096,
+ memory_percent: 25,
+ network_rx_mb: 50,
+ network_tx_mb: 10,
+ disk_read_mb: 25,
+ disk_write_mb: 5,
+ network_rx_rate_mbps: 1.5,
+ network_tx_rate_mbps: 0.5,
+ disk_read_rate_mbps: 0.25,
+ disk_write_rate_mbps: 0.1,
+ pids: 99,
+ recorded_at: "2026-04-08T00:00:05.000Z",
+ },
+ ],
})
.mockResolvedValueOnce({
- rows: [{
- cpu_percent: 15,
- memory_usage_mb: 1024,
- memory_limit_mb: 4096,
- memory_percent: 25,
- network_rx_mb: 50,
- network_tx_mb: 10,
- disk_read_mb: 25,
- disk_write_mb: 5,
- network_rx_rate_mbps: 1.5,
- network_tx_rate_mbps: 0.5,
- disk_read_rate_mbps: 0.25,
- disk_write_rate_mbps: 0.1,
- pids: 99,
- recorded_at: "2026-04-08T00:00:05.000Z",
- }],
+ rows: [
+ {
+ cpu_percent: 15,
+ memory_usage_mb: 1024,
+ memory_limit_mb: 4096,
+ memory_percent: 25,
+ network_rx_mb: 50,
+ network_tx_mb: 10,
+ disk_read_mb: 25,
+ disk_write_mb: 5,
+ network_rx_rate_mbps: 1.5,
+ network_tx_rate_mbps: 0.5,
+ disk_read_rate_mbps: 0.25,
+ disk_write_rate_mbps: 0.1,
+ pids: 99,
+ recorded_at: "2026-04-08T00:00:05.000Z",
+ },
+ ],
});
const res = await auth(request(app).get("/agents/a-proxmox/stats/history?range=15m"));
@@ -1478,21 +1530,23 @@ describe("GET /agents/:id/stats/history", () => {
cpu_percent: 15,
network_rx_rate_mbps: 1.5,
pids: null,
- })
+ }),
);
});
it("uses a 7-day window and returns the live sample when stored history is empty", async () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-empty",
- user_id: "user-1",
- container_id: "container-empty",
- backend_type: "docker",
- sandbox_type: "standard",
- status: "running",
- }],
+ rows: [
+ {
+ id: "a-empty",
+ user_id: "user-1",
+ container_id: "container-empty",
+ backend_type: "docker",
+ sandbox_type: "standard",
+ status: "running",
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] })
.mockResolvedValueOnce({ rows: [] });
@@ -1505,7 +1559,7 @@ describe("GET /agents/:id/stats/history", () => {
expect.objectContaining({
cpu_percent: 12.34,
memory_usage_mb: 512,
- })
+ }),
);
const historyQueryParams = mockDb.query.mock.calls[2][1];
@@ -1526,9 +1580,7 @@ describe("POST /agents/deploy", () => {
it("rejects agent name over 100 chars", async () => {
const longName = "A".repeat(101);
- const res = await auth(
- request(app).post("/agents/deploy").send({ name: longName })
- );
+ const res = await auth(request(app).post("/agents/deploy").send({ name: longName }));
expect(res.status).toBe(400);
expect(res.body.error).toMatch(/100/);
});
@@ -1541,20 +1593,20 @@ describe("POST /agents/deploy", () => {
})
.mockResolvedValueOnce({ rows: [] });
- const res = await auth(
- request(app).post("/agents/deploy").send({ name: "TestAgent" })
- );
+ const res = await auth(request(app).post("/agents/deploy").send({ name: "TestAgent" }));
expect(res.status).toBe(200);
expect(res.body).toHaveProperty("id");
expect(res.body).toHaveProperty("status", "queued");
- expect(mockAddDeploymentJob).toHaveBeenCalledWith(expect.objectContaining({
- id: "a-new",
- name: "TestAgent",
- userId: "user-1",
- backend: "docker",
- specs: { vcpu: 1, ram_mb: 1024, disk_gb: 10 },
- sandbox: "standard",
- }));
+ expect(mockAddDeploymentJob).toHaveBeenCalledWith(
+ expect.objectContaining({
+ id: "a-new",
+ name: "TestAgent",
+ userId: "user-1",
+ backend: "docker",
+ specs: { vcpu: 1, ram_mb: 1024, disk_gb: 10 },
+ sandbox: "standard",
+ }),
+ );
});
it("deploys from a migration draft and attaches the draft to the new agent", async () => {
@@ -1580,15 +1632,17 @@ describe("POST /agents/deploy", () => {
});
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-migrated",
- name: "Imported Support Agent",
- status: "queued",
- user_id: "user-1",
- runtime_family: "openclaw",
- deploy_target: "docker",
- sandbox_profile: "standard",
- }],
+ rows: [
+ {
+ id: "a-migrated",
+ name: "Imported Support Agent",
+ status: "queued",
+ user_id: "user-1",
+ runtime_family: "openclaw",
+ deploy_target: "docker",
+ sandbox_profile: "standard",
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] });
@@ -1596,36 +1650,30 @@ describe("POST /agents/deploy", () => {
request(app).post("/agents/deploy").send({
migration_draft_id: "draft-openclaw-1",
deploy_target: "docker",
- })
+ }),
);
expect(res.status).toBe(200);
- expect(mockGetOwnedMigrationDraft).toHaveBeenCalledWith(
- "draft-openclaw-1",
- "user-1"
- );
+ expect(mockGetOwnedMigrationDraft).toHaveBeenCalledWith("draft-openclaw-1", "user-1");
expect(mockMaterializeManagedMigrationState).toHaveBeenCalledWith(
"user-1",
"a-migrated",
expect.objectContaining({
runtimeFamily: "openclaw",
- })
- );
- expect(mockAttachDraftToAgent).toHaveBeenCalledWith(
- "draft-openclaw-1",
- "a-migrated"
+ }),
);
+ expect(mockAttachDraftToAgent).toHaveBeenCalledWith("draft-openclaw-1", "a-migrated");
expect(JSON.parse(mockDb.query.mock.calls[0][1][10])).toEqual(
expect.objectContaining({
files: [{ path: "README.md", contentBase64: "" }],
- })
+ }),
);
expect(mockAddDeploymentJob).toHaveBeenCalledWith(
expect.objectContaining({
id: "a-migrated",
migration_draft_id: "draft-openclaw-1",
backend: "docker",
- })
+ }),
);
});
@@ -1643,7 +1691,7 @@ describe("POST /agents/deploy", () => {
name: "Mismatch",
runtime_family: "openclaw",
migration_draft_id: "draft-hermes-1",
- })
+ }),
);
expect(res.status).toBe(400);
@@ -1657,16 +1705,18 @@ describe("POST /agents/deploy", () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-deploy",
- name: "Desk Bot",
- status: "queued",
- user_id: "user-1",
- runtime_family: "hermes",
- backend_type: "hermes",
- deploy_target: "docker",
- sandbox_profile: "standard",
- }],
+ rows: [
+ {
+ id: "a-hermes-deploy",
+ name: "Desk Bot",
+ status: "queued",
+ user_id: "user-1",
+ runtime_family: "hermes",
+ backend_type: "hermes",
+ deploy_target: "docker",
+ sandbox_profile: "standard",
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] });
@@ -1674,7 +1724,7 @@ describe("POST /agents/deploy", () => {
request(app).post("/agents/deploy").send({
name: "Desk Bot",
runtime_family: "hermes",
- })
+ }),
);
expect(res.status).toBe(200);
@@ -1685,7 +1735,7 @@ describe("POST /agents/deploy", () => {
id: "a-hermes-deploy",
backend: "hermes",
container_name: expect.stringMatching(/^hermes-agent-desk-bot-/),
- })
+ }),
);
});
@@ -1700,7 +1750,7 @@ describe("POST /agents/deploy", () => {
.mockResolvedValueOnce({ rows: [] });
const res = await auth(
- request(app).post("/agents/deploy").send({ name: "K8sAgent", backend: "k8s" })
+ request(app).post("/agents/deploy").send({ name: "K8sAgent", backend: "k8s" }),
);
expect(res.status).toBe(200);
@@ -1709,7 +1759,7 @@ describe("POST /agents/deploy", () => {
id: "a-k8s",
backend: "k8s",
sandbox: "standard",
- })
+ }),
);
});
@@ -1719,14 +1769,16 @@ describe("POST /agents/deploy", () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-target",
- name: "TargetAgent",
- status: "queued",
- user_id: "user-1",
- backend_type: "k8s",
- sandbox_type: "standard",
- }],
+ rows: [
+ {
+ id: "a-target",
+ name: "TargetAgent",
+ status: "queued",
+ user_id: "user-1",
+ backend_type: "k8s",
+ sandbox_type: "standard",
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] });
@@ -1735,7 +1787,7 @@ describe("POST /agents/deploy", () => {
name: "TargetAgent",
runtime_family: "openclaw",
deploy_target: "k8s",
- })
+ }),
);
expect(res.status).toBe(200);
@@ -1745,23 +1797,17 @@ describe("POST /agents/deploy", () => {
deploy_target: "k8s",
sandbox_profile: "standard",
backend_type: "k8s",
- })
- );
- expect(mockDb.query.mock.calls[0][0]).toEqual(
- expect.stringContaining("runtime_family")
- );
- expect(mockDb.query.mock.calls[0][0]).toEqual(
- expect.stringContaining("deploy_target")
- );
- expect(mockDb.query.mock.calls[0][0]).toEqual(
- expect.stringContaining("sandbox_profile")
+ }),
);
+ expect(mockDb.query.mock.calls[0][0]).toEqual(expect.stringContaining("runtime_family"));
+ expect(mockDb.query.mock.calls[0][0]).toEqual(expect.stringContaining("deploy_target"));
+ expect(mockDb.query.mock.calls[0][0]).toEqual(expect.stringContaining("sandbox_profile"));
expect(mockAddDeploymentJob).toHaveBeenCalledWith(
expect.objectContaining({
id: "a-target",
backend: "k8s",
sandbox: "standard",
- })
+ }),
);
});
@@ -1770,14 +1816,16 @@ describe("POST /agents/deploy", () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-nemo-target",
- name: "Nemo Target Agent",
- status: "queued",
- user_id: "user-1",
- backend_type: "nemoclaw",
- sandbox_type: "nemoclaw",
- }],
+ rows: [
+ {
+ id: "a-nemo-target",
+ name: "Nemo Target Agent",
+ status: "queued",
+ user_id: "user-1",
+ backend_type: "nemoclaw",
+ sandbox_type: "nemoclaw",
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] });
@@ -1786,7 +1834,7 @@ describe("POST /agents/deploy", () => {
name: "Nemo Target Agent",
deploy_target: "docker",
sandbox_profile: "nemoclaw",
- })
+ }),
);
expect(res.status).toBe(200);
@@ -1796,18 +1844,16 @@ describe("POST /agents/deploy", () => {
deploy_target: "docker",
sandbox_profile: "nemoclaw",
backend_type: "nemoclaw",
- })
+ }),
);
const insertParams = mockDb.query.mock.calls[0][1];
- expect(insertParams[9]).toBe(
- "ghcr.io/nvidia/openshell-community/sandboxes/openclaw"
- );
+ expect(insertParams[9]).toBe("ghcr.io/nvidia/openshell-community/sandboxes/openclaw");
expect(mockAddDeploymentJob).toHaveBeenCalledWith(
expect.objectContaining({
id: "a-nemo-target",
backend: "nemoclaw",
sandbox: "nemoclaw",
- })
+ }),
);
});
@@ -1820,7 +1866,7 @@ describe("POST /agents/deploy", () => {
name: "BadSelection",
deploy_target: "k8s",
sandbox_profile: "nemoclaw",
- })
+ }),
);
expect(res.status).toBe(400);
@@ -1834,7 +1880,7 @@ describe("POST /agents/deploy", () => {
request(app).post("/agents/deploy").send({
name: "BadRuntime",
runtime_family: "custom-runtime",
- })
+ }),
);
expect(res.status).toBe(400);
@@ -1856,7 +1902,7 @@ describe("POST /agents/deploy", () => {
vcpu: 999,
ram_mb: 999999,
disk_gb: 999999,
- })
+ }),
);
expect(res.status).toBe(200);
@@ -1872,14 +1918,16 @@ describe("POST /agents/deploy", () => {
16,
32768,
500,
- ])
+ ]),
+ );
+ expect(mockAddDeploymentJob).toHaveBeenCalledWith(
+ expect.objectContaining({
+ id: "a-sanitized",
+ name: "BadName",
+ backend: "docker",
+ specs: { vcpu: 16, ram_mb: 32768, disk_gb: 500 },
+ }),
);
- expect(mockAddDeploymentJob).toHaveBeenCalledWith(expect.objectContaining({
- id: "a-sanitized",
- name: "BadName",
- backend: "docker",
- specs: { vcpu: 16, ram_mb: 32768, disk_gb: 500 },
- }));
});
it("stores the default prebaked image and blank template payload when deploying", async () => {
@@ -1889,19 +1937,108 @@ describe("POST /agents/deploy", () => {
})
.mockResolvedValueOnce({ rows: [] });
- const res = await auth(
- request(app).post("/agents/deploy").send({ name: "Image Agent" })
- );
+ const res = await auth(request(app).post("/agents/deploy").send({ name: "Image Agent" }));
expect(res.status).toBe(200);
const insertParams = mockDb.query.mock.calls[0][1];
expect(insertParams[9]).toBe("nora-openclaw-agent:local");
expect(JSON.parse(insertParams[10])).toEqual(
expect.objectContaining({
- files: [],
+ files: expect.arrayContaining([
+ expect.objectContaining({ path: "AGENTS.md" }),
+ expect.objectContaining({ path: "SOUL.md" }),
+ expect.objectContaining({ path: "TOOLS.md" }),
+ expect.objectContaining({ path: "IDENTITY.md" }),
+ expect.objectContaining({ path: "USER.md" }),
+ expect.objectContaining({ path: "HEARTBEAT.md" }),
+ expect.objectContaining({ path: "MEMORY.md" }),
+ expect.objectContaining({ path: "BOOTSTRAP.md" }),
+ ]),
memoryFiles: [],
metadata: expect.objectContaining({ source: "blank-deploy" }),
+ }),
+ );
+ });
+
+ it("persists normalized clawhub skills during deploy without changing the response shape", async () => {
+ mockDb.query
+ .mockResolvedValueOnce({
+ rows: [
+ {
+ id: "a-clawhub",
+ name: "ClawHub Agent",
+ status: "queued",
+ user_id: "user-1",
+ clawhub_skills: [
+ {
+ source: "clawhub",
+ installSlug: "github",
+ author: "steipete",
+ pagePath: "steipete/github",
+ installedAt: "2026-04-19T12:00:00.000Z",
+ },
+ ],
+ },
+ ],
})
+ .mockResolvedValueOnce({ rows: [] });
+
+ const res = await auth(
+ request(app)
+ .post("/agents/deploy")
+ .send({
+ name: "ClawHub Agent",
+ clawhub_skills: [
+ {
+ source: "clawhub",
+ installSlug: "github",
+ author: "steipete",
+ pagePath: "steipete/github",
+ installedAt: "2026-04-19T12:00:00Z",
+ description: "Should not persist",
+ },
+ {
+ source: "clawhub",
+ installSlug: "github",
+ author: "steipete",
+ pagePath: "steipete/github",
+ installedAt: "2026-04-19T12:05:00Z",
+ },
+ ],
+ }),
+ );
+
+ expect(res.status).toBe(200);
+ expect(res.body).toEqual(
+ expect.objectContaining({
+ id: "a-clawhub",
+ name: "ClawHub Agent",
+ status: "queued",
+ }),
+ );
+
+ const insertParams = mockDb.query.mock.calls[0][1];
+ expect(JSON.parse(insertParams[11])).toEqual([
+ {
+ source: "clawhub",
+ installSlug: "github",
+ author: "steipete",
+ pagePath: "steipete/github",
+ installedAt: "2026-04-19T12:00:00.000Z",
+ },
+ ]);
+
+ expect(mockAddDeploymentJob).toHaveBeenCalledWith(
+ expect.objectContaining({
+ id: "a-clawhub",
+ clawhub_skills: [
+ expect.objectContaining({
+ installSlug: "github",
+ author: "steipete",
+ pagePath: "steipete/github",
+ }),
+ ],
+ }),
);
});
@@ -1926,9 +2063,7 @@ describe("POST /agents/deploy", () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [
- { id: "a-paas", name: "PaaS Agent", status: "queued", user_id: "user-1" },
- ],
+ rows: [{ id: "a-paas", name: "PaaS Agent", status: "queued", user_id: "user-1" }],
})
.mockResolvedValueOnce({ rows: [] });
@@ -1938,7 +2073,7 @@ describe("POST /agents/deploy", () => {
vcpu: 12,
ram_mb: 12288,
disk_gb: 200,
- })
+ }),
);
expect(res.status).toBe(200);
@@ -1946,7 +2081,7 @@ describe("POST /agents/deploy", () => {
expect.objectContaining({
id: "a-paas",
specs: { vcpu: 4, ram_mb: 4096, disk_gb: 50 },
- })
+ }),
);
billing.IS_PAAS = false;
@@ -1962,14 +2097,16 @@ describe("Agent file and export routes", () => {
function mockOwnedAgent(overrides = {}) {
mockDb.query.mockResolvedValueOnce({
- rows: [{
- id: "agent-files-1",
- user_id: "user-1",
- name: "Files Agent",
- runtime_family: "openclaw",
- status: "running",
- ...overrides,
- }],
+ rows: [
+ {
+ id: "agent-files-1",
+ user_id: "user-1",
+ name: "Files Agent",
+ runtime_family: "openclaw",
+ status: "running",
+ ...overrides,
+ },
+ ],
});
}
@@ -1996,7 +2133,7 @@ describe("Agent file and export routes", () => {
],
});
expect(mockRootsForAgent).toHaveBeenCalledWith(
- expect.objectContaining({ id: "agent-files-1" })
+ expect.objectContaining({ id: "agent-files-1" }),
);
});
@@ -2005,13 +2142,11 @@ describe("Agent file and export routes", () => {
mockListFiles.mockResolvedValueOnce({
root: { id: "workspace", label: "Workspace", access: "rw" },
path: "project",
- entries: [
- { name: "index.js", path: "project/index.js", type: "file", size: 42 },
- ],
+ entries: [{ name: "index.js", path: "project/index.js", type: "file", size: 42 }],
});
const res = await auth(
- request(app).get("/agents/agent-files-1/files/tree?root=workspace&path=project")
+ request(app).get("/agents/agent-files-1/files/tree?root=workspace&path=project"),
);
expect(res.status).toBe(200);
@@ -2019,12 +2154,12 @@ describe("Agent file and export routes", () => {
expect.objectContaining({
path: "project",
entries: [expect.objectContaining({ path: "project/index.js" })],
- })
+ }),
);
expect(mockListFiles).toHaveBeenCalledWith(
expect.objectContaining({ id: "agent-files-1" }),
"workspace",
- "project"
+ "project",
);
});
@@ -2040,7 +2175,7 @@ describe("Agent file and export routes", () => {
});
const res = await auth(
- request(app).get("/agents/agent-files-1/files/content?root=workspace&path=project/index.js")
+ request(app).get("/agents/agent-files-1/files/content?root=workspace&path=project/index.js"),
);
expect(res.status).toBe(200);
@@ -2048,12 +2183,12 @@ describe("Agent file and export routes", () => {
expect.objectContaining({
path: "project/index.js",
writable: true,
- })
+ }),
);
expect(mockReadFile).toHaveBeenCalledWith(
expect.objectContaining({ id: "agent-files-1" }),
"workspace",
- "project/index.js"
+ "project/index.js",
);
});
@@ -2068,7 +2203,7 @@ describe("Agent file and export routes", () => {
root: "workspace",
path: "project/index.js",
contentBase64: Buffer.from("hello").toString("base64"),
- })
+ }),
);
expect(res.status).toBe(200);
@@ -2078,7 +2213,7 @@ describe("Agent file and export routes", () => {
"workspace",
"project/index.js",
Buffer.from("hello").toString("base64"),
- 0o644
+ 0o644,
);
});
@@ -2095,7 +2230,7 @@ describe("Agent file and export routes", () => {
request(app)
.get("/agents/agent-files-1/files/download?root=workspace&path=notes.txt")
.buffer(true)
- .parse(binaryParser)
+ .parse(binaryParser),
);
expect(res.status).toBe(200);
@@ -2111,20 +2246,17 @@ describe("Agent file and export routes", () => {
mockPackMigrationBundle.mockResolvedValueOnce(Buffer.from("bundle-data"));
const res = await auth(
- request(app)
- .get("/agents/agent-files-1/export")
- .buffer(true)
- .parse(binaryParser)
+ request(app).get("/agents/agent-files-1/export").buffer(true).parse(binaryParser),
);
expect(res.status).toBe(200);
expect(res.headers["content-type"]).toMatch(/application\/gzip/);
expect(res.headers["content-disposition"]).toContain(
- 'filename="files-agent.nora-migration.tgz"'
+ 'filename="files-agent.nora-migration.tgz"',
);
expect(mockBuildMigrationManifestFromAgent).toHaveBeenCalledWith(
expect.objectContaining({ id: "agent-files-1" }),
- { userId: "user-1" }
+ { userId: "user-1" },
);
expect(mockPackMigrationBundle).toHaveBeenCalledWith(manifest);
expect(Buffer.from(res.body)).toEqual(Buffer.from("bundle-data"));
@@ -2141,16 +2273,14 @@ describe("PATCH /agents/:id", () => {
rows: [{ id: "a-rename", name: "New Name", user_id: "user-1" }],
});
- const res = await auth(
- request(app).patch("/agents/a-rename").send({ name: "New Name" })
- );
+ const res = await auth(request(app).patch("/agents/a-rename").send({ name: "New Name" }));
expect(res.status).toBe(200);
expect(res.body).toHaveProperty("name", "New Name");
expect(mockDb.query).toHaveBeenNthCalledWith(
2,
"UPDATE agents SET name = $1 WHERE id = $2 RETURNING *",
- ["New Name", "a-rename"]
+ ["New Name", "a-rename"],
);
});
});
@@ -2159,42 +2289,50 @@ describe("POST /agents/:id/duplicate", () => {
it("duplicates an agent using stored payload fallback and full clone wiring", async () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-source",
- name: "Source Agent",
- user_id: "user-1",
- status: "stopped",
- sandbox_type: "standard",
- vcpu: 4,
- ram_mb: 4096,
- disk_gb: 50,
- image: "custom/image:latest",
- template_payload: JSON.stringify({
- files: [{ path: "AGENT.md", content: "hello" }],
- memoryFiles: [{ path: "workspace/note.txt", content: "memory" }],
- metadata: { source: "template" },
- }),
- }],
+ rows: [
+ {
+ id: "a-source",
+ name: "Source Agent",
+ user_id: "user-1",
+ status: "stopped",
+ sandbox_type: "standard",
+ vcpu: 4,
+ ram_mb: 4096,
+ disk_gb: 50,
+ image: "custom/image:latest",
+ template_payload: JSON.stringify({
+ files: [{ path: "AGENT.md", content: "hello" }],
+ memoryFiles: [{ path: "workspace/note.txt", content: "memory" }],
+ metadata: { source: "template" },
+ }),
+ },
+ ],
})
.mockResolvedValueOnce({
- rows: [{
- provider: "slack",
- catalog_id: "slack",
- access_token: "secret",
- config: { token: "secret" },
- status: "active",
- }],
+ rows: [
+ {
+ provider: "slack",
+ catalog_id: "slack",
+ access_token: "secret",
+ config: { token: "secret" },
+ status: "active",
+ },
+ ],
})
.mockResolvedValueOnce({
- rows: [{
- type: "email",
- name: "Ops Email",
- config: { smtp_pass: "secret" },
- enabled: true,
- }],
+ rows: [
+ {
+ type: "email",
+ name: "Ops Email",
+ config: { smtp_pass: "secret" },
+ enabled: true,
+ },
+ ],
})
.mockResolvedValueOnce({
- rows: [{ id: "a-duplicate", name: "Source Agent Copy", status: "queued", user_id: "user-1" }],
+ rows: [
+ { id: "a-duplicate", name: "Source Agent Copy", status: "queued", user_id: "user-1" },
+ ],
})
.mockResolvedValueOnce({ rows: [] })
.mockResolvedValueOnce({ rows: [] })
@@ -2204,7 +2342,7 @@ describe("POST /agents/:id/duplicate", () => {
request(app).post("/agents/a-source/duplicate").send({
name: "Source Agent Copy",
clone_mode: "full_clone",
- })
+ }),
);
expect(res.status).toBe(200);
@@ -2221,7 +2359,7 @@ describe("POST /agents/:id/duplicate", () => {
"USER.md",
"HEARTBEAT.md",
"MEMORY.md",
- ])
+ ]),
);
expect(templatePayload.memoryFiles).toEqual([
expect.objectContaining({ path: "workspace/note.txt" }),
@@ -2232,14 +2370,16 @@ describe("POST /agents/:id/duplicate", () => {
expect(templatePayload.wiring.channels).toEqual([
expect.objectContaining({ type: "email", enabled: false }),
]);
- expect(mockAddDeploymentJob).toHaveBeenCalledWith(expect.objectContaining({
- id: "a-duplicate",
- name: "Source Agent Copy",
- backend: "docker",
- image: "custom/image:latest",
- sandbox: "standard",
- specs: { vcpu: 4, ram_mb: 4096, disk_gb: 50 },
- }));
+ expect(mockAddDeploymentJob).toHaveBeenCalledWith(
+ expect.objectContaining({
+ id: "a-duplicate",
+ name: "Source Agent Copy",
+ backend: "docker",
+ image: "custom/image:latest",
+ sandbox: "standard",
+ specs: { vcpu: 4, ram_mb: 4096, disk_gb: 50 },
+ }),
+ );
});
it("recomputes the default image when duplicating onto a different execution target", async () => {
@@ -2248,39 +2388,43 @@ describe("POST /agents/:id/duplicate", () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-source-k8s",
- name: "Source Agent",
- user_id: "user-1",
- status: "stopped",
- runtime_family: "openclaw",
- deploy_target: "docker",
- sandbox_profile: "standard",
- vcpu: 2,
- ram_mb: 2048,
- disk_gb: 20,
- image: "nora-openclaw-agent:local",
- template_payload: JSON.stringify({
- files: [{ path: "AGENT.md", content: "hello" }],
- memoryFiles: [],
- metadata: { source: "template" },
- }),
- }],
+ rows: [
+ {
+ id: "a-source-k8s",
+ name: "Source Agent",
+ user_id: "user-1",
+ status: "stopped",
+ runtime_family: "openclaw",
+ deploy_target: "docker",
+ sandbox_profile: "standard",
+ vcpu: 2,
+ ram_mb: 2048,
+ disk_gb: 20,
+ image: "nora-openclaw-agent:local",
+ template_payload: JSON.stringify({
+ files: [{ path: "AGENT.md", content: "hello" }],
+ memoryFiles: [],
+ metadata: { source: "template" },
+ }),
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] })
.mockResolvedValueOnce({ rows: [] })
.mockResolvedValueOnce({
- rows: [{
- id: "a-duplicate-k8s",
- name: "Source Agent K8s",
- status: "queued",
- user_id: "user-1",
- backend_type: "k8s",
- sandbox_type: "standard",
- runtime_family: "openclaw",
- deploy_target: "k8s",
- sandbox_profile: "standard",
- }],
+ rows: [
+ {
+ id: "a-duplicate-k8s",
+ name: "Source Agent K8s",
+ status: "queued",
+ user_id: "user-1",
+ backend_type: "k8s",
+ sandbox_type: "standard",
+ runtime_family: "openclaw",
+ deploy_target: "k8s",
+ sandbox_profile: "standard",
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] })
.mockResolvedValueOnce({ rows: [] })
@@ -2291,7 +2435,7 @@ describe("POST /agents/:id/duplicate", () => {
name: "Source Agent K8s",
clone_mode: "full_clone",
deploy_target: "k8s",
- })
+ }),
);
expect(res.status).toBe(200);
@@ -2303,7 +2447,7 @@ describe("POST /agents/:id/duplicate", () => {
backend: "k8s",
sandbox: "standard",
image: "node:24-slim",
- })
+ }),
);
});
@@ -2312,36 +2456,40 @@ describe("POST /agents/:id/duplicate", () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-source-hermes",
- name: "Desk Bot",
- user_id: "user-1",
- status: "stopped",
- runtime_family: "openclaw",
- deploy_target: "docker",
- sandbox_profile: "standard",
- vcpu: 2,
- ram_mb: 2048,
- disk_gb: 20,
- image: "nora-openclaw-agent:local",
- template_payload: JSON.stringify({
- files: [{ path: "AGENT.md", content: "hello" }],
- memoryFiles: [],
- metadata: { source: "template" },
- }),
- }],
+ rows: [
+ {
+ id: "a-source-hermes",
+ name: "Desk Bot",
+ user_id: "user-1",
+ status: "stopped",
+ runtime_family: "openclaw",
+ deploy_target: "docker",
+ sandbox_profile: "standard",
+ vcpu: 2,
+ ram_mb: 2048,
+ disk_gb: 20,
+ image: "nora-openclaw-agent:local",
+ template_payload: JSON.stringify({
+ files: [{ path: "AGENT.md", content: "hello" }],
+ memoryFiles: [],
+ metadata: { source: "template" },
+ }),
+ },
+ ],
})
.mockResolvedValueOnce({
- rows: [{
- id: "a-duplicate-hermes",
- name: "Desk Bot Hermes",
- status: "queued",
- user_id: "user-1",
- runtime_family: "hermes",
- backend_type: "hermes",
- deploy_target: "docker",
- sandbox_profile: "standard",
- }],
+ rows: [
+ {
+ id: "a-duplicate-hermes",
+ name: "Desk Bot Hermes",
+ status: "queued",
+ user_id: "user-1",
+ runtime_family: "hermes",
+ backend_type: "hermes",
+ deploy_target: "docker",
+ sandbox_profile: "standard",
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] });
@@ -2350,7 +2498,7 @@ describe("POST /agents/:id/duplicate", () => {
name: "Desk Bot Hermes",
runtime_family: "hermes",
clone_mode: "files_only",
- })
+ }),
);
expect(res.status).toBe(200);
@@ -2361,7 +2509,7 @@ describe("POST /agents/:id/duplicate", () => {
id: "a-duplicate-hermes",
backend: "hermes",
container_name: expect.stringMatching(/^hermes-agent-desk-bot-hermes-/),
- })
+ }),
);
});
});
@@ -2401,14 +2549,16 @@ describe("POST /marketplace/install", () => {
});
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-market",
- name: "COS Agent",
- status: "queued",
- user_id: "user-1",
- backend_type: "docker",
- sandbox_type: "standard",
- }],
+ rows: [
+ {
+ id: "a-market",
+ name: "COS Agent",
+ status: "queued",
+ user_id: "user-1",
+ backend_type: "docker",
+ sandbox_type: "standard",
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] })
.mockResolvedValueOnce({ rows: [] });
@@ -2417,7 +2567,7 @@ describe("POST /marketplace/install", () => {
request(app).post("/marketplace/install").send({
listingId: "listing-1",
name: "COS Agent",
- })
+ }),
);
expect(res.status).toBe(200);
@@ -2426,17 +2576,13 @@ describe("POST /marketplace/install", () => {
runtime_family: "openclaw",
deploy_target: "docker",
sandbox_profile: "standard",
- })
- );
- expect(mockDb.query.mock.calls[0][0]).toEqual(
- expect.stringContaining("runtime_family")
+ }),
);
+ expect(mockDb.query.mock.calls[0][0]).toEqual(expect.stringContaining("runtime_family"));
const insertParams = mockDb.query.mock.calls[0][1];
expect(insertParams[1]).toBe("COS Agent");
expect(insertParams[9]).toBe("nora-openclaw-agent:local");
- expect(
- JSON.parse(insertParams[10]).files.map((file) => file.path)
- ).toEqual(
+ expect(JSON.parse(insertParams[10]).files.map((file) => file.path)).toEqual(
expect.arrayContaining([
"AGENT.md",
"AGENTS.md",
@@ -2447,15 +2593,17 @@ describe("POST /marketplace/install", () => {
"HEARTBEAT.md",
"MEMORY.md",
"BOOTSTRAP.md",
- ])
+ ]),
+ );
+ expect(mockAddDeploymentJob).toHaveBeenCalledWith(
+ expect.objectContaining({
+ id: "a-market",
+ name: "COS Agent",
+ backend: "docker",
+ image: "nora-openclaw-agent:local",
+ sandbox: "standard",
+ }),
);
- expect(mockAddDeploymentJob).toHaveBeenCalledWith(expect.objectContaining({
- id: "a-market",
- name: "COS Agent",
- backend: "docker",
- image: "nora-openclaw-agent:local",
- sandbox: "standard",
- }));
});
it("rejects NemoClaw sandbox installs on non-Docker execution targets", async () => {
@@ -2486,7 +2634,7 @@ describe("POST /marketplace/install", () => {
name: "COS Agent",
deploy_target: "k8s",
sandbox_profile: "nemoclaw",
- })
+ }),
);
expect(res.status).toBe(400);
@@ -2526,17 +2674,19 @@ describe("POST /marketplace/install", () => {
});
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-market-k8s",
- name: "COS Agent K8s",
- status: "queued",
- user_id: "user-1",
- backend_type: "k8s",
- sandbox_type: "standard",
- runtime_family: "openclaw",
- deploy_target: "k8s",
- sandbox_profile: "standard",
- }],
+ rows: [
+ {
+ id: "a-market-k8s",
+ name: "COS Agent K8s",
+ status: "queued",
+ user_id: "user-1",
+ backend_type: "k8s",
+ sandbox_type: "standard",
+ runtime_family: "openclaw",
+ deploy_target: "k8s",
+ sandbox_profile: "standard",
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] })
.mockResolvedValueOnce({ rows: [] });
@@ -2546,7 +2696,7 @@ describe("POST /marketplace/install", () => {
listingId: "listing-1",
name: "COS Agent K8s",
deploy_target: "k8s",
- })
+ }),
);
expect(res.status).toBe(200);
@@ -2558,7 +2708,7 @@ describe("POST /marketplace/install", () => {
backend: "k8s",
sandbox: "standard",
image: "node:24-slim",
- })
+ }),
);
});
@@ -2589,7 +2739,7 @@ describe("POST /marketplace/install", () => {
listingId: "listing-1",
name: "COS Agent",
runtime_family: "future-runtime",
- })
+ }),
);
expect(res.status).toBe(400);
@@ -2602,9 +2752,7 @@ describe("POST /marketplace/install", () => {
describe("marketplace browse, publish, download, and report", () => {
it("lists published marketplace entries for authenticated users", async () => {
const marketplaceModule = require("../marketplace");
- marketplaceModule.listMarketplace.mockResolvedValueOnce([
- { id: "listing-1", name: "Preset" },
- ]);
+ marketplaceModule.listMarketplace.mockResolvedValueOnce([{ id: "listing-1", name: "Preset" }]);
const res = await auth(request(app).get("/marketplace"));
@@ -2623,7 +2771,7 @@ describe("marketplace browse, publish, download, and report", () => {
expect(res.status).toBe(200);
expect(marketplaceModule.listUserListings).toHaveBeenCalledWith("user-1");
expect(res.body[0]).toEqual(
- expect.objectContaining({ id: "listing-1", status: "pending_review" })
+ expect.objectContaining({ id: "listing-1", status: "pending_review" }),
);
});
@@ -2679,7 +2827,7 @@ describe("marketplace browse, publish, download, and report", () => {
expect.objectContaining({ path: "SOUL.md", content: expect.any(String) }),
]),
}),
- })
+ }),
);
});
@@ -2719,35 +2867,31 @@ describe("marketplace browse, publish, download, and report", () => {
},
};
- marketplaceModule.getListing
- .mockResolvedValueOnce(listing)
- .mockResolvedValueOnce({
- ...listing,
- name: "Updated Preset",
- status: "pending_review",
- category: "Support",
- current_version: 3,
- });
- snapshotsModule.getSnapshot
- .mockResolvedValueOnce(snapshot)
- .mockResolvedValueOnce({
- ...snapshot,
- name: "Updated Preset",
- description: "Updated description",
- config: {
- defaults: {
- sandbox: "nemoclaw",
- vcpu: 4,
- ram_mb: 4096,
- disk_gb: 40,
- },
- templatePayload: {
- files: [{ path: "AGENTS.md", content: "updated" }],
- memoryFiles: [],
- wiring: { channels: [], integrations: [] },
- },
+ marketplaceModule.getListing.mockResolvedValueOnce(listing).mockResolvedValueOnce({
+ ...listing,
+ name: "Updated Preset",
+ status: "pending_review",
+ category: "Support",
+ current_version: 3,
+ });
+ snapshotsModule.getSnapshot.mockResolvedValueOnce(snapshot).mockResolvedValueOnce({
+ ...snapshot,
+ name: "Updated Preset",
+ description: "Updated description",
+ config: {
+ defaults: {
+ sandbox: "nemoclaw",
+ vcpu: 4,
+ ram_mb: 4096,
+ disk_gb: 40,
},
- });
+ templatePayload: {
+ files: [{ path: "AGENTS.md", content: "updated" }],
+ memoryFiles: [],
+ wiring: { channels: [], integrations: [] },
+ },
+ },
+ });
snapshotsModule.updateSnapshot.mockResolvedValueOnce({
...snapshot,
name: "Updated Preset",
@@ -2759,23 +2903,25 @@ describe("marketplace browse, publish, download, and report", () => {
});
const res = await auth(
- request(app).patch("/marketplace/listing-1").send({
- name: "Updated Preset",
- description: "Updated description",
- category: "Support",
- slug: "updated-preset",
- currentVersion: 3,
- sandbox: "nemoclaw",
- vcpu: 4,
- ram_mb: 4096,
- disk_gb: 40,
- files: [
- {
- path: "AGENTS.md",
- content: "# Updated\n",
- },
- ],
- })
+ request(app)
+ .patch("/marketplace/listing-1")
+ .send({
+ name: "Updated Preset",
+ description: "Updated description",
+ category: "Support",
+ slug: "updated-preset",
+ currentVersion: 3,
+ sandbox: "nemoclaw",
+ vcpu: 4,
+ ram_mb: 4096,
+ disk_gb: 40,
+ files: [
+ {
+ path: "AGENTS.md",
+ content: "# Updated\n",
+ },
+ ],
+ }),
);
expect(res.status).toBe(200);
@@ -2798,7 +2944,7 @@ describe("marketplace browse, publish, download, and report", () => {
]),
}),
}),
- })
+ }),
);
expect(marketplaceModule.upsertListing).toHaveBeenCalledWith(
expect.objectContaining({
@@ -2806,7 +2952,7 @@ describe("marketplace browse, publish, download, and report", () => {
status: "pending_review",
currentVersion: 3,
category: "Support",
- })
+ }),
);
expect(res.body).toEqual(
expect.objectContaining({
@@ -2814,7 +2960,7 @@ describe("marketplace browse, publish, download, and report", () => {
status: "pending_review",
category: "Support",
current_version: 3,
- })
+ }),
);
});
@@ -2864,7 +3010,7 @@ describe("marketplace browse, publish, download, and report", () => {
description: "Shared operations template",
category: "Operations",
price: "$99/mo",
- })
+ }),
);
expect(res.status).toBe(200);
@@ -2894,7 +3040,7 @@ describe("marketplace browse, publish, download, and report", () => {
wiring: { channels: [], integrations: [] },
}),
}),
- expect.objectContaining({ kind: "community-template", builtIn: false })
+ expect.objectContaining({ kind: "community-template", builtIn: false }),
);
expect(marketplaceModule.upsertListing).toHaveBeenCalledWith(
expect.objectContaining({
@@ -2903,13 +3049,13 @@ describe("marketplace browse, publish, download, and report", () => {
sourceType: "community",
status: "pending_review",
visibility: "public",
- })
+ }),
);
expect(res.body).toEqual(
expect.objectContaining({
id: "listing-community-1",
status: "pending_review",
- })
+ }),
);
});
@@ -2942,7 +3088,7 @@ describe("marketplace browse, publish, download, and report", () => {
name: "Sensitive Template",
description: "Should fail",
category: "Operations",
- })
+ }),
);
expect(res.status).toBe(400);
@@ -2990,9 +3136,7 @@ describe("marketplace browse, publish, download, and report", () => {
const res = await auth(request(app).get("/marketplace/listing-1/download"));
expect(res.status).toBe(200);
- expect(res.headers["content-disposition"]).toContain(
- "chief-of-staff-claw.nora-template.json"
- );
+ expect(res.headers["content-disposition"]).toContain("chief-of-staff-claw.nora-template.json");
expect(marketplaceModule.recordDownload).toHaveBeenCalledWith("listing-1");
expect(res.body).toEqual(
expect.objectContaining({
@@ -3009,7 +3153,7 @@ describe("marketplace browse, publish, download, and report", () => {
expect.objectContaining({ path: "BOOTSTRAP.md" }),
]),
}),
- })
+ }),
);
});
@@ -3033,7 +3177,7 @@ describe("marketplace browse, publish, download, and report", () => {
request(app).post("/marketplace/listing-1/report").send({
reason: "spam",
details: "Low-quality content",
- })
+ }),
);
expect(res.status).toBe(200);
@@ -3043,7 +3187,7 @@ describe("marketplace browse, publish, download, and report", () => {
reporterUserId: "user-1",
reason: "spam",
details: "Low-quality content",
- })
+ }),
);
expect(monitoringModule.logEvent).toHaveBeenCalledWith(
"marketplace_reported",
@@ -3058,7 +3202,7 @@ describe("marketplace browse, publish, download, and report", () => {
reporterUserId: "user-1",
reporterEmail: "user@nora.test",
}),
- })
+ }),
);
});
});
@@ -3080,16 +3224,18 @@ describe("POST /agents/:id/redeploy", () => {
it("allows redeploy when an agent is in warning state", async () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-warning",
- name: "Warning Agent",
- status: "warning",
- sandbox_type: "standard",
- vcpu: 2,
- ram_mb: 2048,
- disk_gb: 20,
- container_name: "oclaw-agent-warning",
- }],
+ rows: [
+ {
+ id: "a-warning",
+ name: "Warning Agent",
+ status: "warning",
+ sandbox_type: "standard",
+ vcpu: 2,
+ ram_mb: 2048,
+ disk_gb: 20,
+ container_name: "oclaw-agent-warning",
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] })
.mockResolvedValueOnce({ rows: [] });
@@ -3110,17 +3256,19 @@ describe("POST /agents/:id/redeploy", () => {
"standard",
"oclaw-agent-warning",
"nora-openclaw-agent:local",
- ]
+ ],
+ );
+ expect(mockAddDeploymentJob).toHaveBeenCalledWith(
+ expect.objectContaining({
+ id: "a-warning",
+ name: "Warning Agent",
+ userId: "user-1",
+ backend: "docker",
+ sandbox: "standard",
+ specs: { vcpu: 2, ram_mb: 2048, disk_gb: 20 },
+ container_name: "oclaw-agent-warning",
+ }),
);
- expect(mockAddDeploymentJob).toHaveBeenCalledWith(expect.objectContaining({
- id: "a-warning",
- name: "Warning Agent",
- userId: "user-1",
- backend: "docker",
- sandbox: "standard",
- specs: { vcpu: 2, ram_mb: 2048, disk_gb: 20 },
- container_name: "oclaw-agent-warning",
- }));
});
it("accepts deploy-target overrides during redeploy and resets the sandbox when needed", async () => {
@@ -3129,19 +3277,21 @@ describe("POST /agents/:id/redeploy", () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-nemo-redeploy",
- name: "Nemo Agent",
- status: "stopped",
- runtime_family: "openclaw",
- deploy_target: "docker",
- sandbox_profile: "nemoclaw",
- vcpu: 2,
- ram_mb: 2048,
- disk_gb: 20,
- container_name: "oclaw-agent-nemo",
- image: null,
- }],
+ rows: [
+ {
+ id: "a-nemo-redeploy",
+ name: "Nemo Agent",
+ status: "stopped",
+ runtime_family: "openclaw",
+ deploy_target: "docker",
+ sandbox_profile: "nemoclaw",
+ vcpu: 2,
+ ram_mb: 2048,
+ disk_gb: 20,
+ container_name: "oclaw-agent-nemo",
+ image: null,
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] })
.mockResolvedValueOnce({ rows: [] });
@@ -3149,32 +3299,28 @@ describe("POST /agents/:id/redeploy", () => {
const res = await auth(
request(app).post("/agents/a-nemo-redeploy/redeploy").send({
deploy_target: "k8s",
- })
+ }),
);
expect(res.status).toBe(200);
expect(res.body).toEqual({ success: true, status: "queued" });
- expect(mockDb.query).toHaveBeenNthCalledWith(
- 2,
- expect.stringContaining("deploy_target = $5"),
- [
- "a-nemo-redeploy",
- "k8s",
- "standard",
- "openclaw",
- "k8s",
- "standard",
- "oclaw-agent-nemo",
- "node:24-slim",
- ]
- );
+ expect(mockDb.query).toHaveBeenNthCalledWith(2, expect.stringContaining("deploy_target = $5"), [
+ "a-nemo-redeploy",
+ "k8s",
+ "standard",
+ "openclaw",
+ "k8s",
+ "standard",
+ "oclaw-agent-nemo",
+ "node:24-slim",
+ ]);
expect(mockAddDeploymentJob).toHaveBeenCalledWith(
expect.objectContaining({
id: "a-nemo-redeploy",
backend: "k8s",
sandbox: "standard",
image: "node:24-slim",
- })
+ }),
);
});
@@ -3184,19 +3330,21 @@ describe("POST /agents/:id/redeploy", () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-docker-redeploy",
- name: "Docker Agent",
- status: "stopped",
- runtime_family: "openclaw",
- deploy_target: "docker",
- sandbox_profile: "standard",
- vcpu: 2,
- ram_mb: 2048,
- disk_gb: 20,
- container_name: "oclaw-agent-docker",
- image: "nora-openclaw-agent:local",
- }],
+ rows: [
+ {
+ id: "a-docker-redeploy",
+ name: "Docker Agent",
+ status: "stopped",
+ runtime_family: "openclaw",
+ deploy_target: "docker",
+ sandbox_profile: "standard",
+ vcpu: 2,
+ ram_mb: 2048,
+ disk_gb: 20,
+ container_name: "oclaw-agent-docker",
+ image: "nora-openclaw-agent:local",
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] })
.mockResolvedValueOnce({ rows: [] });
@@ -3204,31 +3352,27 @@ describe("POST /agents/:id/redeploy", () => {
const res = await auth(
request(app).post("/agents/a-docker-redeploy/redeploy").send({
deploy_target: "k8s",
- })
+ }),
);
expect(res.status).toBe(200);
- expect(mockDb.query).toHaveBeenNthCalledWith(
- 2,
- expect.stringContaining("image = $8"),
- [
- "a-docker-redeploy",
- "k8s",
- "standard",
- "openclaw",
- "k8s",
- "standard",
- "oclaw-agent-docker",
- "node:24-slim",
- ]
- );
+ expect(mockDb.query).toHaveBeenNthCalledWith(2, expect.stringContaining("image = $8"), [
+ "a-docker-redeploy",
+ "k8s",
+ "standard",
+ "openclaw",
+ "k8s",
+ "standard",
+ "oclaw-agent-docker",
+ "node:24-slim",
+ ]);
expect(mockAddDeploymentJob).toHaveBeenCalledWith(
expect.objectContaining({
id: "a-docker-redeploy",
backend: "k8s",
sandbox: "standard",
image: "node:24-slim",
- })
+ }),
);
});
@@ -3237,19 +3381,21 @@ describe("POST /agents/:id/redeploy", () => {
mockDb.query
.mockResolvedValueOnce({
- rows: [{
- id: "a-hermes-redeploy",
- name: "Desk Bot",
- status: "stopped",
- runtime_family: "openclaw",
- deploy_target: "docker",
- sandbox_profile: "standard",
- vcpu: 2,
- ram_mb: 2048,
- disk_gb: 20,
- container_name: "oclaw-agent-desk-bot-old123",
- image: "nora-openclaw-agent:local",
- }],
+ rows: [
+ {
+ id: "a-hermes-redeploy",
+ name: "Desk Bot",
+ status: "stopped",
+ runtime_family: "openclaw",
+ deploy_target: "docker",
+ sandbox_profile: "standard",
+ vcpu: 2,
+ ram_mb: 2048,
+ disk_gb: 20,
+ container_name: "oclaw-agent-desk-bot-old123",
+ image: "nora-openclaw-agent:local",
+ },
+ ],
})
.mockResolvedValueOnce({ rows: [] })
.mockResolvedValueOnce({ rows: [] });
@@ -3257,7 +3403,7 @@ describe("POST /agents/:id/redeploy", () => {
const res = await auth(
request(app).post("/agents/a-hermes-redeploy/redeploy").send({
runtime_family: "hermes",
- })
+ }),
);
expect(res.status).toBe(200);
@@ -3273,7 +3419,7 @@ describe("POST /agents/:id/redeploy", () => {
"standard",
expect.stringMatching(/^hermes-agent-desk-bot-/),
"nousresearch/hermes-agent:latest",
- ]
+ ],
);
expect(mockAddDeploymentJob).toHaveBeenCalledWith(
expect.objectContaining({
@@ -3281,7 +3427,7 @@ describe("POST /agents/:id/redeploy", () => {
backend: "hermes",
container_name: expect.stringMatching(/^hermes-agent-desk-bot-/),
image: "nousresearch/hermes-agent:latest",
- })
+ }),
);
});
diff --git a/backend-api/__tests__/clawhub.test.ts b/backend-api/__tests__/clawhub.test.ts
new file mode 100644
index 0000000..48424e5
--- /dev/null
+++ b/backend-api/__tests__/clawhub.test.ts
@@ -0,0 +1,560 @@
+// @ts-nocheck
+jest.mock("../db", () => ({
+ query: jest.fn(),
+}));
+
+jest.mock("../authSync", () => ({
+ runContainerCommand: jest.fn(),
+}));
+
+jest.mock("../redisQueue", () => ({
+ addClawhubInstallJob: jest.fn(),
+ findInFlightClawhubInstallJob: jest.fn(),
+ getClawhubInstallJobStatus: jest.fn(),
+}));
+
+const { normalizeSkillDetailPayload, parseSkillMarkdown } = require("../clawhubClient");
+const db = require("../db");
+const { runContainerCommand } = require("../authSync");
+const {
+ addClawhubInstallJob,
+ findInFlightClawhubInstallJob,
+ getClawhubInstallJobStatus,
+} = require("../redisQueue");
+const router = require("../routes/clawhub");
+
+function mockJsonResponse(status, payload) {
+ return {
+ ok: status >= 200 && status < 300,
+ status,
+ text: jest.fn().mockResolvedValue(JSON.stringify(payload)),
+ };
+}
+
+function mockTextResponse(status, text) {
+ return {
+ ok: status >= 200 && status < 300,
+ status,
+ text: jest.fn().mockResolvedValue(text),
+ };
+}
+
+describe("clawhub client markdown parsing", () => {
+ it("parses requirements from SKILL.md frontmatter", () => {
+ const parsed = parseSkillMarkdown(`---
+metadata:
+ openclaw:
+ requires:
+ bins:
+ - gh
+ env:
+ - GITHUB_TOKEN
+ config: []
+ install:
+ - kind: node
+ package: "@github/gh-cli"
+---
+# GitHub Skill
+
+Ship pull requests fast.
+`);
+
+ expect(parsed).toEqual({
+ readme: "# GitHub Skill\n\nShip pull requests fast.",
+ requirements: {
+ bins: ["gh"],
+ env: ["GITHUB_TOKEN"],
+ config: [],
+ install: [{ kind: "node", package: "@github/gh-cli" }],
+ },
+ });
+ });
+
+ it("returns null requirements when no openclaw metadata exists", () => {
+ const detail = normalizeSkillDetailPayload(
+ {
+ slug: "plain-skill",
+ name: "Plain Skill",
+ },
+ "# Plain Skill\n\nNo frontmatter here.",
+ );
+
+ expect(detail).toMatchObject({
+ slug: "plain-skill",
+ readme: "# Plain Skill\n\nNo frontmatter here.",
+ requirements: null,
+ });
+ });
+});
+
+describe("clawhub routes", () => {
+ let fetchMock;
+
+ beforeEach(() => {
+ fetchMock = jest.fn();
+ global.fetch = fetchMock;
+ jest.clearAllMocks();
+ });
+
+ afterEach(() => {
+ delete global.fetch;
+ });
+
+ function getRouteHandler(path, method = "get") {
+ const layer = router.stack.find(
+ (entry) => entry.route?.path === path && entry.route.methods?.[method],
+ );
+ if (!layer) {
+ throw new Error(`Route not found: ${method.toUpperCase()} ${path}`);
+ }
+ return layer.route.stack[0].handle;
+ }
+
+ function createMockRes() {
+ return {
+ statusCode: 200,
+ body: undefined,
+ status(code) {
+ this.statusCode = code;
+ return this;
+ },
+ json(payload) {
+ this.body = payload;
+ return this;
+ },
+ };
+ }
+
+ it("returns normalized browse results and caps limit at 50", async () => {
+ const handler = getRouteHandler("/skills");
+ fetchMock
+ .mockResolvedValueOnce(
+ mockJsonResponse(200, { registryBaseUrl: "https://registry.clawhub.ai" }),
+ )
+ .mockResolvedValueOnce(
+ mockJsonResponse(200, {
+ skills: [
+ {
+ slug: "github",
+ name: "GitHub",
+ description: "Manage issues.",
+ downloads: 94200,
+ stars: 1200,
+ updated_at: "2026-04-01T12:00:00Z",
+ },
+ ],
+ next_cursor: "next-page",
+ }),
+ );
+
+ const req = { query: { limit: "70", cursor: "abc" } };
+ const res = createMockRes();
+ await handler(req, res);
+
+ expect(res.statusCode).toBe(200);
+ expect(res.body).toEqual({
+ skills: [
+ {
+ slug: "github",
+ name: "GitHub",
+ description: "Manage issues.",
+ downloads: 94200,
+ stars: 1200,
+ updatedAt: "2026-04-01T12:00:00.000Z",
+ },
+ ],
+ cursor: "next-page",
+ });
+ expect(fetchMock).toHaveBeenNthCalledWith(
+ 2,
+ "https://registry.clawhub.ai/api/v1/skills?limit=50&cursor=abc",
+ expect.any(Object),
+ );
+ });
+
+ it("returns missing_query when search input is empty", async () => {
+ const handler = getRouteHandler("/skills/search");
+ const req = { query: { q: "" } };
+ const res = createMockRes();
+ await handler(req, res);
+
+ expect(res.statusCode).toBe(400);
+ expect(res.body).toEqual({
+ error: "missing_query",
+ message: "q is required.",
+ });
+ expect(fetchMock).not.toHaveBeenCalled();
+ });
+
+ it("returns normalized detail with parsed requirements from SKILL.md", async () => {
+ const handler = getRouteHandler("/skills/:slug");
+ fetchMock
+ .mockResolvedValueOnce(
+ mockJsonResponse(200, { registryBaseUrl: "https://registry.clawhub.ai" }),
+ )
+ .mockResolvedValueOnce(
+ mockJsonResponse(200, {
+ skill: {
+ slug: "github",
+ name: "GitHub",
+ description: "Manage issues.",
+ downloads: 94200,
+ stars: 1200,
+ updatedAt: "2026-04-01T12:00:00Z",
+ },
+ owner: {
+ handle: "steipete",
+ },
+ }),
+ )
+ .mockResolvedValueOnce(
+ mockJsonResponse(200, { registryBaseUrl: "https://registry.clawhub.ai" }),
+ )
+ .mockResolvedValueOnce(
+ mockTextResponse(
+ 200,
+ `---
+metadata:
+ openclaw:
+ requires:
+ bins:
+ - gh
+ env:
+ - GITHUB_TOKEN
+ install:
+ - kind: node
+ package: "@github/gh-cli"
+---
+# GitHub Skill
+
+Install and manage repos.
+`,
+ ),
+ );
+
+ const req = { params: { slug: "github" } };
+ const res = createMockRes();
+ await handler(req, res);
+
+ expect(res.statusCode).toBe(200);
+ expect(res.body).toEqual({
+ slug: "github",
+ name: "GitHub",
+ description: "Manage issues.",
+ downloads: 94200,
+ stars: 1200,
+ updatedAt: "2026-04-01T12:00:00.000Z",
+ author: "steipete",
+ pagePath: "steipete/github",
+ readme: "# GitHub Skill\n\nInstall and manage repos.",
+ requirements: {
+ bins: ["gh"],
+ env: ["GITHUB_TOKEN"],
+ config: [],
+ install: [{ kind: "node", package: "@github/gh-cli" }],
+ },
+ });
+ });
+
+ it("returns skill_not_found when the skill metadata is missing", async () => {
+ const handler = getRouteHandler("/skills/:slug");
+ fetchMock
+ .mockResolvedValueOnce(
+ mockJsonResponse(200, { registryBaseUrl: "https://registry.clawhub.ai" }),
+ )
+ .mockResolvedValueOnce(mockJsonResponse(404, { error: "not_found" }));
+
+ const req = { params: { slug: "unknown-skill" } };
+ const res = createMockRes();
+ await handler(req, res);
+
+ expect(res.statusCode).toBe(404);
+ expect(res.body).toEqual({
+ error: "skill_not_found",
+ message: "No skill found with slug: unknown-skill",
+ });
+ });
+
+ it("returns clawhub_unavailable when ClawHub cannot be reached", async () => {
+ const handler = getRouteHandler("/skills");
+ fetchMock.mockRejectedValue(new Error("network down"));
+
+ const req = { query: {} };
+ const res = createMockRes();
+ await handler(req, res);
+
+ expect(res.statusCode).toBe(502);
+ expect(res.body).toEqual({
+ error: "clawhub_unavailable",
+ message: "Could not reach ClawHub registry.",
+ });
+ });
+
+ it("returns installed skills normalized from the agent lockfile", async () => {
+ const handler = getRouteHandler("/agents/:agentId/skills");
+ db.query.mockResolvedValueOnce({
+ rows: [
+ {
+ id: "agent-1",
+ user_id: "user-1",
+ status: "running",
+ container_id: "container-1",
+ backend_type: "docker",
+ runtime_family: "openclaw",
+ deploy_target: "docker",
+ sandbox_profile: "standard",
+ clawhub_skills: [],
+ },
+ ],
+ });
+ runContainerCommand.mockResolvedValueOnce({
+ output: JSON.stringify({
+ version: 1,
+ skills: {
+ github: { version: "2.1.0", installedAt: 1700000000000 },
+ notion: { version: "1.0.0", installedAt: 1700000000001 },
+ },
+ }),
+ });
+
+ const req = { params: { agentId: "agent-1" }, user: { id: "user-1" } };
+ const res = createMockRes();
+ await handler(req, res);
+
+ expect(res.statusCode).toBe(200);
+ expect(res.body).toEqual({
+ skills: [
+ { slug: "github", version: "2.1.0" },
+ { slug: "notion", version: "1.0.0" },
+ ],
+ });
+ });
+
+ it("returns unsupported_runtime for non-docker or non-openclaw agents", async () => {
+ const handler = getRouteHandler("/agents/:agentId/skills/:slug/install", "post");
+ db.query.mockResolvedValueOnce({
+ rows: [
+ {
+ id: "agent-1",
+ user_id: "user-1",
+ status: "running",
+ container_id: "container-1",
+ backend_type: "k8s",
+ runtime_family: "openclaw",
+ clawhub_skills: [],
+ },
+ ],
+ });
+
+ const req = {
+ params: { agentId: "agent-1", slug: "github" },
+ user: { id: "user-1" },
+ body: {},
+ };
+ const res = createMockRes();
+ await handler(req, res);
+
+ expect(res.statusCode).toBe(409);
+ expect(res.body).toEqual({
+ error: "unsupported_runtime",
+ message: "ClawHub installs are only available for Docker-backed OpenClaw agents.",
+ });
+ });
+
+ it("returns container_not_running when the agent is stopped", async () => {
+ const handler = getRouteHandler("/agents/:agentId/skills/:slug/install", "post");
+ db.query.mockResolvedValueOnce({
+ rows: [
+ {
+ id: "agent-1",
+ user_id: "user-1",
+ status: "stopped",
+ container_id: "container-1",
+ backend_type: "docker",
+ runtime_family: "openclaw",
+ clawhub_skills: [],
+ },
+ ],
+ });
+
+ const req = {
+ params: { agentId: "agent-1", slug: "github" },
+ user: { id: "user-1" },
+ body: {},
+ };
+ const res = createMockRes();
+ await handler(req, res);
+
+ expect(res.statusCode).toBe(409);
+ expect(res.body).toEqual({
+ error: "container_not_running",
+ message: "Start the agent before installing skills.",
+ });
+ });
+
+ it("returns npm_unavailable when clawhub CLI bootstrap cannot use npm", async () => {
+ const handler = getRouteHandler("/agents/:agentId/skills/:slug/install", "post");
+ db.query.mockResolvedValueOnce({
+ rows: [
+ {
+ id: "agent-1",
+ user_id: "user-1",
+ status: "running",
+ container_id: "container-1",
+ backend_type: "docker",
+ runtime_family: "openclaw",
+ clawhub_skills: [],
+ },
+ ],
+ });
+ runContainerCommand.mockRejectedValueOnce(new Error("Container command exited with exit 42"));
+
+ const req = {
+ params: { agentId: "agent-1", slug: "github" },
+ user: { id: "user-1" },
+ body: {},
+ };
+ const res = createMockRes();
+ await handler(req, res);
+
+ expect(res.statusCode).toBe(422);
+ expect(res.body).toEqual({
+ error: "npm_unavailable",
+ message: "The clawhub CLI could not be installed. Ensure Node.js is in your base image.",
+ });
+ });
+
+ it("reuses an in-flight install job when one already exists", async () => {
+ const handler = getRouteHandler("/agents/:agentId/skills/:slug/install", "post");
+ db.query.mockResolvedValueOnce({
+ rows: [
+ {
+ id: "agent-1",
+ user_id: "user-1",
+ status: "running",
+ container_id: "container-1",
+ backend_type: "docker",
+ runtime_family: "openclaw",
+ clawhub_skills: [],
+ },
+ ],
+ });
+ runContainerCommand.mockResolvedValueOnce({ output: "" });
+ findInFlightClawhubInstallJob.mockResolvedValueOnce({ id: "job-1" });
+ getClawhubInstallJobStatus.mockResolvedValueOnce({
+ jobId: "job-1",
+ agentId: "agent-1",
+ slug: "github",
+ status: "running",
+ error: null,
+ completedAt: null,
+ });
+
+ const req = {
+ params: { agentId: "agent-1", slug: "github" },
+ user: { id: "user-1" },
+ body: {},
+ };
+ const res = createMockRes();
+ await handler(req, res);
+
+ expect(res.statusCode).toBe(202);
+ expect(res.body).toEqual({
+ jobId: "job-1",
+ agentId: "agent-1",
+ slug: "github",
+ status: "running",
+ });
+ expect(addClawhubInstallJob).not.toHaveBeenCalled();
+ });
+
+ it("enqueues a new install job and marks persistOnSuccess false when already saved", async () => {
+ const handler = getRouteHandler("/agents/:agentId/skills/:slug/install", "post");
+ db.query.mockResolvedValueOnce({
+ rows: [
+ {
+ id: "agent-1",
+ user_id: "user-1",
+ status: "running",
+ container_id: "container-1",
+ backend_type: "docker",
+ runtime_family: "openclaw",
+ clawhub_skills: [{ installSlug: "github", author: "steipete" }],
+ },
+ ],
+ });
+ runContainerCommand.mockResolvedValueOnce({ output: "" });
+ findInFlightClawhubInstallJob.mockResolvedValueOnce(null);
+ addClawhubInstallJob.mockResolvedValueOnce({ id: "job-2" });
+
+ const req = {
+ params: { agentId: "agent-1", slug: "github" },
+ user: { id: "user-1" },
+ body: {
+ author: "steipete",
+ pagePath: "steipete/github",
+ installedAt: "2026-04-21T00:00:00.000Z",
+ },
+ };
+ const res = createMockRes();
+ await handler(req, res);
+
+ expect(addClawhubInstallJob).toHaveBeenCalledWith({
+ agentId: "agent-1",
+ slug: "github",
+ skillEntry: {
+ source: "clawhub",
+ installSlug: "github",
+ author: "steipete",
+ pagePath: "steipete/github",
+ installedAt: "2026-04-21T00:00:00.000Z",
+ },
+ persistOnSuccess: false,
+ });
+ expect(res.statusCode).toBe(202);
+ expect(res.body).toEqual({
+ jobId: "job-2",
+ agentId: "agent-1",
+ slug: "github",
+ status: "pending",
+ });
+ });
+
+ it("returns job_not_found when the install job lookup misses", async () => {
+ const handler = getRouteHandler("/jobs/:jobId");
+ getClawhubInstallJobStatus.mockResolvedValueOnce(null);
+
+ const req = { params: { jobId: "missing-job" } };
+ const res = createMockRes();
+ await handler(req, res);
+
+ expect(res.statusCode).toBe(404);
+ expect(res.body).toEqual({ error: "job_not_found" });
+ });
+
+ it("returns normalized install job status when the job exists", async () => {
+ const handler = getRouteHandler("/jobs/:jobId");
+ getClawhubInstallJobStatus.mockResolvedValueOnce({
+ jobId: "job-3",
+ agentId: "agent-1",
+ slug: "github",
+ status: "success",
+ error: null,
+ completedAt: "2026-04-21T01:00:00.000Z",
+ });
+
+ const req = { params: { jobId: "job-3" } };
+ const res = createMockRes();
+ await handler(req, res);
+
+ expect(res.statusCode).toBe(200);
+ expect(res.body).toEqual({
+ jobId: "job-3",
+ agentId: "agent-1",
+ slug: "github",
+ status: "success",
+ error: null,
+ completedAt: "2026-04-21T01:00:00.000Z",
+ });
+ });
+});
diff --git a/backend-api/__tests__/clawhubReconciliation.test.ts b/backend-api/__tests__/clawhubReconciliation.test.ts
new file mode 100644
index 0000000..6a8897a
--- /dev/null
+++ b/backend-api/__tests__/clawhubReconciliation.test.ts
@@ -0,0 +1,55 @@
+// @ts-nocheck
+const {
+ computeMissingSavedSkills,
+ normalizeSavedSkillEntries,
+} = require("../../agent-runtime/lib/clawhubReconciliation");
+
+describe("clawhub reconciliation helpers", () => {
+ it("returns no missing skills when nothing is saved", () => {
+ expect(computeMissingSavedSkills([], [{ slug: "github", version: "1.0.0" }])).toEqual([]);
+ });
+
+ it("returns no missing skills when all saved skills are already installed", () => {
+ const savedSkills = [
+ { installSlug: "github", author: "steipete" },
+ { installSlug: "notion", author: "dimagious" },
+ ];
+ const installedSkills = [
+ { slug: "github", version: "1.0.0" },
+ { slug: "notion", version: "2.0.0" },
+ ];
+
+ expect(computeMissingSavedSkills(savedSkills, installedSkills)).toEqual([]);
+ });
+
+ it("returns only the saved skills missing from the new container", () => {
+ const savedSkills = [
+ { installSlug: "github", author: "steipete" },
+ { installSlug: "notion", author: "dimagious" },
+ { installSlug: "slack", author: "acme" },
+ ];
+ const installedSkills = [{ slug: "github", version: "1.0.0" }];
+
+ expect(computeMissingSavedSkills(savedSkills, installedSkills)).toEqual([
+ expect.objectContaining({ installSlug: "notion", author: "dimagious" }),
+ expect.objectContaining({ installSlug: "slack", author: "acme" }),
+ ]);
+ });
+
+ it("deduplicates repeated saved entries and ignores invalid ones", () => {
+ const normalized = normalizeSavedSkillEntries([
+ { installSlug: "github", author: "steipete" },
+ { installSlug: "github", author: "steipete" },
+ { slug: "github", author: "steipete" },
+ { installSlug: "notion", author: "dimagious" },
+ { installSlug: "", author: "nobody" },
+ null,
+ ]);
+
+ expect(normalized).toHaveLength(2);
+ expect(normalized).toEqual([
+ expect.objectContaining({ installSlug: "github", author: "steipete" }),
+ expect.objectContaining({ installSlug: "notion", author: "dimagious" }),
+ ]);
+ });
+});
diff --git a/backend-api/__tests__/containerManager.test.ts b/backend-api/__tests__/containerManager.test.ts
index 75154e6..afddfbd 100644
--- a/backend-api/__tests__/containerManager.test.ts
+++ b/backend-api/__tests__/containerManager.test.ts
@@ -16,7 +16,7 @@ const mockHermesStats = jest.fn();
const mockHermesLogs = jest.fn();
const mockHermesExec = jest.fn();
-jest.mock("../../workers/provisioner/backends/hermes", () => {
+jest.mock("../backends/hermes", () => {
return jest.fn().mockImplementation(() => ({
start: mockHermesStart,
stop: mockHermesStop,
@@ -29,7 +29,7 @@ jest.mock("../../workers/provisioner/backends/hermes", () => {
}));
});
-jest.mock("../../workers/provisioner/backends/nemoclaw", () => {
+jest.mock("../backends/nemoclaw", () => {
return jest.fn().mockImplementation(() => ({
start: mockStart,
stop: mockStop,
diff --git a/backend-api/backends/hermes.ts b/backend-api/backends/hermes.ts
new file mode 100644
index 0000000..e223579
--- /dev/null
+++ b/backend-api/backends/hermes.ts
@@ -0,0 +1,2 @@
+// @ts-nocheck
+module.exports = require("../../workers/provisioner/backends/hermes");
diff --git a/backend-api/backends/nemoclaw.ts b/backend-api/backends/nemoclaw.ts
new file mode 100644
index 0000000..ac4fe82
--- /dev/null
+++ b/backend-api/backends/nemoclaw.ts
@@ -0,0 +1,2 @@
+// @ts-nocheck
+module.exports = require("../../workers/provisioner/backends/nemoclaw");
diff --git a/backend-api/clawhubClient.ts b/backend-api/clawhubClient.ts
new file mode 100644
index 0000000..4c8b487
--- /dev/null
+++ b/backend-api/clawhubClient.ts
@@ -0,0 +1,419 @@
+// @ts-nocheck
+const matter = require("gray-matter");
+
+const DEFAULT_CLAWHUB_BASE_URL = "https://clawhub.ai";
+const CANDIDATE_BASE_URL_KEYS = [
+ "registryBaseUrl",
+ "registryURL",
+ "registryUrl",
+ "registry_base_url",
+ "apiBaseUrl",
+ "apiURL",
+ "apiUrl",
+ "api_base_url",
+ "baseUrl",
+ "baseURL",
+ "base_url",
+ "url",
+ "origin",
+];
+
+function createClawhubError(statusCode, code, message) {
+ const error = new Error(message);
+ error.statusCode = statusCode;
+ error.code = code;
+ return error;
+}
+
+function normalizeText(value, fallback = "") {
+ if (typeof value === "string") {
+ return value.trim();
+ }
+ if (typeof value === "number" && Number.isFinite(value)) {
+ return String(value);
+ }
+ return fallback;
+}
+
+function normalizeNumber(value, fallback = 0) {
+ const numeric = Number(value);
+ return Number.isFinite(numeric) && numeric >= 0 ? numeric : fallback;
+}
+
+function normalizeOptionalNumber(value) {
+ if (value == null || value === "") return null;
+ const numeric = Number(value);
+ return Number.isFinite(numeric) && numeric >= 0 ? numeric : null;
+}
+
+function normalizeDate(value) {
+ if (value == null || value === "") return null;
+ const parsed = new Date(value);
+ if (Number.isNaN(parsed.getTime())) return null;
+ return parsed.toISOString();
+}
+
+function normalizeStringArray(value) {
+ if (!Array.isArray(value)) {
+ if (typeof value === "string" && value.trim()) return [value.trim()];
+ return [];
+ }
+
+ return value.flatMap((entry) => {
+ if (typeof entry === "string") {
+ const trimmed = entry.trim();
+ return trimmed ? [trimmed] : [];
+ }
+ if (typeof entry === "number" && Number.isFinite(entry)) {
+ return [String(entry)];
+ }
+ return [];
+ });
+}
+
+function normalizeInstallEntry(entry) {
+ if (!entry || typeof entry !== "object" || Array.isArray(entry)) {
+ const rawValue = normalizeText(entry);
+ return rawValue ? { kind: "unknown", package: rawValue } : null;
+ }
+
+ const normalized = {};
+ const rawKind =
+ normalizeText(entry.kind) ||
+ normalizeText(entry.type) ||
+ normalizeText(entry.manager) ||
+ "unknown";
+ const rawPackage =
+ normalizeText(entry.package) || normalizeText(entry.name) || normalizeText(entry.value);
+
+ if (rawKind) normalized.kind = rawKind;
+ if (rawPackage) normalized.package = rawPackage;
+
+ for (const [key, value] of Object.entries(entry)) {
+ if (
+ key === "kind" ||
+ key === "package" ||
+ key === "type" ||
+ key === "name" ||
+ key === "value"
+ ) {
+ continue;
+ }
+ if (value == null) continue;
+ normalized[key] = value;
+ }
+
+ return Object.keys(normalized).length > 0 ? normalized : null;
+}
+
+function normalizeRequirements(openClaw = null) {
+ if (!openClaw || typeof openClaw !== "object") return null;
+
+ const bins = normalizeStringArray(openClaw.requires?.bins ?? openClaw.bins);
+ const env = normalizeStringArray(openClaw.requires?.env ?? openClaw.env);
+ const config = normalizeStringArray(openClaw.requires?.config ?? openClaw.config);
+ const installEntries = Array.isArray(openClaw.install)
+ ? openClaw.install.map((entry) => normalizeInstallEntry(entry)).filter(Boolean)
+ : [];
+
+ if (!bins.length && !env.length && !config.length && !installEntries.length) {
+ return null;
+ }
+
+ return {
+ bins,
+ env,
+ config,
+ install: installEntries,
+ };
+}
+
+function parseSkillMarkdown(readme = "") {
+ const raw = typeof readme === "string" ? readme : "";
+ if (!raw.trim()) {
+ return {
+ readme: "",
+ requirements: null,
+ };
+ }
+
+ try {
+ const parsed = matter(raw);
+ const openClaw = parsed?.data?.metadata?.openclaw ?? parsed?.data?.openclaw ?? null;
+
+ return {
+ readme: typeof parsed.content === "string" ? parsed.content.trim() : raw,
+ requirements: normalizeRequirements(openClaw),
+ };
+ } catch {
+ return {
+ readme: raw,
+ requirements: null,
+ };
+ }
+}
+
+function normalizeSkillSummary(item = {}) {
+ const source =
+ item && typeof item === "object" && item.skill && typeof item.skill === "object"
+ ? item.skill
+ : item;
+
+ const slug = normalizeText(source.slug || source.installSlug || source.pagePath || source.id);
+ if (!slug) return null;
+
+ return {
+ slug,
+ name: normalizeText(source.name || source.displayName, slug),
+ description: normalizeText(source.description || source.summary),
+ downloads: normalizeOptionalNumber(
+ source.downloads ?? source.download_count ?? source.downloadCount ?? source.stats?.downloads,
+ ),
+ stars: normalizeOptionalNumber(
+ source.stars ?? source.star_count ?? source.starCount ?? source.stats?.stars,
+ ),
+ updatedAt: normalizeDate(
+ source.updatedAt ?? source.updated_at ?? source.updated_at_at ?? source.updated,
+ ),
+ };
+}
+
+function extractSkillsList(payload) {
+ if (Array.isArray(payload)) return payload;
+ if (Array.isArray(payload?.skills)) return payload.skills;
+ if (Array.isArray(payload?.results)) return payload.results;
+ if (Array.isArray(payload?.items)) return payload.items;
+ return [];
+}
+
+function normalizeSkillListPayload(payload = {}) {
+ return {
+ skills: extractSkillsList(payload)
+ .map((item) => normalizeSkillSummary(item))
+ .filter(Boolean),
+ cursor:
+ normalizeText(
+ payload?.cursor ?? payload?.nextCursor ?? payload?.next_cursor ?? payload?.next,
+ ) || null,
+ };
+}
+
+function normalizeSkillDetailPayload(metadata = {}, readme = "") {
+ const skillMetadata =
+ metadata && typeof metadata === "object" && metadata.skill && typeof metadata.skill === "object"
+ ? metadata.skill
+ : metadata;
+ const owner =
+ metadata && typeof metadata === "object" && metadata.owner && typeof metadata.owner === "object"
+ ? metadata.owner
+ : null;
+ const summary = normalizeSkillSummary(skillMetadata);
+ if (!summary) {
+ return null;
+ }
+
+ const author = normalizeText(owner?.handle);
+ const pagePath = author ? `${author}/${summary.slug}` : summary.slug;
+
+ const parsedMarkdown = parseSkillMarkdown(readme);
+ const metadataRequirements = normalizeRequirements(
+ skillMetadata?.metadata?.openclaw ?? skillMetadata?.openClaw ?? null,
+ );
+
+ return {
+ ...summary,
+ author,
+ pagePath,
+ readme: parsedMarkdown.readme,
+ requirements: parsedMarkdown.requirements ?? metadataRequirements,
+ };
+}
+
+function pickDiscoveryBaseUrl(payload) {
+ if (!payload) return "";
+ if (typeof payload === "string") return payload;
+ for (const key of CANDIDATE_BASE_URL_KEYS) {
+ const value = payload[key];
+ if (typeof value === "string" && value.trim()) {
+ return value.trim();
+ }
+ }
+ if (payload.registry && typeof payload.registry === "object") {
+ const nested = pickDiscoveryBaseUrl(payload.registry);
+ if (nested) return nested;
+ }
+ if (payload.api && typeof payload.api === "object") {
+ const nested = pickDiscoveryBaseUrl(payload.api);
+ if (nested) return nested;
+ }
+ return "";
+}
+
+function ensureTrailingSlash(value) {
+ const normalized = normalizeText(value, DEFAULT_CLAWHUB_BASE_URL);
+ return normalized.endsWith("/") ? normalized : `${normalized}/`;
+}
+
+async function readResponseText(response) {
+ if (!response || typeof response.text !== "function") {
+ return "";
+ }
+ return response.text();
+}
+
+async function parseJsonResponse(response, fallbackErrorMessage) {
+ const body = await readResponseText(response);
+ if (!body) return {};
+
+ try {
+ return JSON.parse(body);
+ } catch {
+ throw createClawhubError(502, "clawhub_unavailable", fallbackErrorMessage);
+ }
+}
+
+async function fetchRegistryDiscoveryBaseUrl() {
+ let response;
+ try {
+ response = await fetch(`${DEFAULT_CLAWHUB_BASE_URL}/.well-known/clawhub.json`, {
+ headers: { Accept: "application/json" },
+ signal: typeof AbortSignal?.timeout === "function" ? AbortSignal.timeout(10000) : undefined,
+ });
+ } catch {
+ return DEFAULT_CLAWHUB_BASE_URL;
+ }
+
+ if (!response || !response.ok) {
+ return DEFAULT_CLAWHUB_BASE_URL;
+ }
+
+ const payload = await parseJsonResponse(response, "Could not reach ClawHub registry.");
+ return pickDiscoveryBaseUrl(payload) || DEFAULT_CLAWHUB_BASE_URL;
+}
+
+async function fetchRegistryJson(pathname, { allowNotFound = false } = {}) {
+ const baseUrl = ensureTrailingSlash(await fetchRegistryDiscoveryBaseUrl());
+ const url = new URL(pathname.replace(/^\/+/, ""), baseUrl);
+
+ let response;
+ try {
+ response = await fetch(url.toString(), {
+ headers: { Accept: "application/json" },
+ signal: typeof AbortSignal?.timeout === "function" ? AbortSignal.timeout(10000) : undefined,
+ });
+ } catch (error) {
+ if (allowNotFound) {
+ throw createClawhubError(404, "skill_not_found", "No skill found with slug: unknown");
+ }
+ throw createClawhubError(502, "clawhub_unavailable", "Could not reach ClawHub registry.");
+ }
+
+ if (!response.ok) {
+ if (allowNotFound && response.status === 404) {
+ throw createClawhubError(404, "skill_not_found", "No skill found with slug: unknown");
+ }
+ throw createClawhubError(502, "clawhub_unavailable", "Could not reach ClawHub registry.");
+ }
+
+ return parseJsonResponse(response, "Could not reach ClawHub registry.");
+}
+
+async function fetchRegistryText(pathname, { allowNotFound = false } = {}) {
+ const baseUrl = ensureTrailingSlash(await fetchRegistryDiscoveryBaseUrl());
+ const url = new URL(pathname.replace(/^\/+/, ""), baseUrl);
+
+ let response;
+ try {
+ response = await fetch(url.toString(), {
+ headers: { Accept: "text/markdown, text/plain, */*" },
+ signal: typeof AbortSignal?.timeout === "function" ? AbortSignal.timeout(10000) : undefined,
+ });
+ } catch {
+ if (allowNotFound) {
+ throw createClawhubError(404, "skill_not_found", "No skill found with slug: unknown");
+ }
+ throw createClawhubError(502, "clawhub_unavailable", "Could not reach ClawHub registry.");
+ }
+
+ if (!response.ok) {
+ if (allowNotFound && response.status === 404) {
+ throw createClawhubError(404, "skill_not_found", "No skill found with slug: unknown");
+ }
+ throw createClawhubError(502, "clawhub_unavailable", "Could not reach ClawHub registry.");
+ }
+
+ return readResponseText(response);
+}
+
+async function listSkills({ limit = 20, cursor = null } = {}) {
+ const params = new URLSearchParams();
+ params.set("limit", String(limit));
+ if (cursor) params.set("cursor", cursor);
+
+ const payload = await fetchRegistryJson(`/api/v1/skills?${params.toString()}`);
+ return normalizeSkillListPayload(payload);
+}
+
+async function searchSkills({ q, limit = 20 } = {}) {
+ const params = new URLSearchParams();
+ params.set("q", q);
+ params.set("limit", String(limit));
+
+ const payload = await fetchRegistryJson(`/api/v1/search?${params.toString()}`);
+ return normalizeSkillListPayload(payload);
+}
+
+async function getSkillDetail(slug) {
+ const normalizedSlug = normalizeText(slug);
+ if (!normalizedSlug) {
+ throw createClawhubError(404, "skill_not_found", "No skill found with slug: unknown");
+ }
+
+ const metadata = await fetchRegistryJson(`/api/v1/skills/${encodeURIComponent(normalizedSlug)}`, {
+ allowNotFound: true,
+ }).catch((error) => {
+ if (error?.statusCode === 404) {
+ throw createClawhubError(
+ 404,
+ "skill_not_found",
+ `No skill found with slug: ${normalizedSlug}`,
+ );
+ }
+ throw error;
+ });
+
+ const readme = await fetchRegistryText(
+ `/api/v1/skills/${encodeURIComponent(normalizedSlug)}/file?path=${encodeURIComponent("SKILL.md")}`,
+ { allowNotFound: true },
+ ).catch((error) => {
+ if (error?.statusCode === 404) {
+ throw createClawhubError(
+ 404,
+ "skill_not_found",
+ `No skill found with slug: ${normalizedSlug}`,
+ );
+ }
+ throw error;
+ });
+
+ const detail = normalizeSkillDetailPayload(metadata, readme);
+ if (!detail) {
+ throw createClawhubError(404, "skill_not_found", `No skill found with slug: ${normalizedSlug}`);
+ }
+ return detail;
+}
+
+module.exports = {
+ DEFAULT_CLAWHUB_BASE_URL,
+ createClawhubError,
+ fetchRegistryDiscoveryBaseUrl,
+ getSkillDetail,
+ listSkills,
+ normalizeInstallEntry,
+ normalizeRequirements,
+ parseSkillMarkdown,
+ normalizeSkillDetailPayload,
+ normalizeSkillListPayload,
+ normalizeSkillSummary,
+ searchSkills,
+};
diff --git a/backend-api/db_schema.sql b/backend-api/db_schema.sql
index e8a2be0..4f623f4 100644
--- a/backend-api/db_schema.sql
+++ b/backend-api/db_schema.sql
@@ -34,6 +34,7 @@ CREATE TABLE IF NOT EXISTS agents (
container_name TEXT,
image TEXT,
template_payload JSONB DEFAULT '{}',
+ clawhub_skills JSONB DEFAULT '[]',
vcpu INTEGER DEFAULT 1,
ram_mb INTEGER DEFAULT 1024,
disk_gb INTEGER DEFAULT 10,
diff --git a/backend-api/middleware/ownership.ts b/backend-api/middleware/ownership.ts
index a6e19be..3121ffa 100644
--- a/backend-api/middleware/ownership.ts
+++ b/backend-api/middleware/ownership.ts
@@ -4,8 +4,11 @@ const db = require("../db");
async function findOwnedAgent(agentId, userId) {
if (!agentId) return null;
const result = await db.query(
- "SELECT id, user_id, name, status, host FROM agents WHERE id = $1 AND user_id = $2",
- [agentId, userId]
+ `SELECT id, user_id, name, status, host, container_id, backend_type, runtime_family,
+ deploy_target, sandbox_profile, clawhub_skills
+ FROM agents
+ WHERE id = $1 AND user_id = $2`,
+ [agentId, userId],
);
return result.rows[0] || null;
}
@@ -14,7 +17,7 @@ async function findOwnedWorkspace(workspaceId, userId) {
if (!workspaceId) return null;
const result = await db.query(
"SELECT id, user_id, name, created_at FROM workspaces WHERE id = $1 AND user_id = $2",
- [workspaceId, userId]
+ [workspaceId, userId],
);
return result.rows[0] || null;
}
diff --git a/backend-api/package-lock.json b/backend-api/package-lock.json
index 764200b..79f0af4 100644
--- a/backend-api/package-lock.json
+++ b/backend-api/package-lock.json
@@ -13,6 +13,7 @@
"dockerode": "^4.0.10",
"express": "^5.2.1",
"express-rate-limit": "^8.3.2",
+ "gray-matter": "^4.0.3",
"helmet": "^8.1.0",
"ioredis": "^5.10.1",
"jsonwebtoken": "^9.0.2",
@@ -2215,9 +2216,6 @@
"arm64"
],
"dev": true,
- "libc": [
- "glibc"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -2232,9 +2230,6 @@
"arm64"
],
"dev": true,
- "libc": [
- "musl"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -2249,9 +2244,6 @@
"ppc64"
],
"dev": true,
- "libc": [
- "glibc"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -2266,9 +2258,6 @@
"riscv64"
],
"dev": true,
- "libc": [
- "glibc"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -2283,9 +2272,6 @@
"riscv64"
],
"dev": true,
- "libc": [
- "musl"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -2300,9 +2286,6 @@
"s390x"
],
"dev": true,
- "libc": [
- "glibc"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -2317,9 +2300,6 @@
"x64"
],
"dev": true,
- "libc": [
- "glibc"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -2334,9 +2314,6 @@
"x64"
],
"dev": true,
- "libc": [
- "musl"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -2540,7 +2517,6 @@
"version": "1.0.10",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
"integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
- "dev": true,
"license": "MIT",
"dependencies": {
"sprintf-js": "~1.0.2"
@@ -3695,7 +3671,6 @@
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
"integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
- "dev": true,
"license": "BSD-2-Clause",
"bin": {
"esparse": "bin/esparse.js",
@@ -3861,6 +3836,18 @@
"url": "https://opencollective.com/express"
}
},
+ "node_modules/extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==",
+ "license": "MIT",
+ "dependencies": {
+ "is-extendable": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
"node_modules/fast-fifo": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/fast-fifo/-/fast-fifo-1.3.2.tgz",
@@ -4175,6 +4162,21 @@
"dev": true,
"license": "ISC"
},
+ "node_modules/gray-matter": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz",
+ "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==",
+ "license": "MIT",
+ "dependencies": {
+ "js-yaml": "^3.13.1",
+ "kind-of": "^6.0.2",
+ "section-matter": "^1.0.0",
+ "strip-bom-string": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=6.0"
+ }
+ },
"node_modules/handlebars": {
"version": "4.7.9",
"resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.9.tgz",
@@ -4434,6 +4436,15 @@
"dev": true,
"license": "MIT"
},
+ "node_modules/is-extendable": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
+ "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
"node_modules/is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
@@ -5212,7 +5223,6 @@
"version": "3.14.2",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz",
"integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==",
- "dev": true,
"license": "MIT",
"dependencies": {
"argparse": "^1.0.7",
@@ -5325,6 +5335,15 @@
"safe-buffer": "^5.0.1"
}
},
+ "node_modules/kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
"node_modules/leven": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz",
@@ -6439,6 +6458,19 @@
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
"license": "MIT"
},
+ "node_modules/section-matter": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz",
+ "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==",
+ "license": "MIT",
+ "dependencies": {
+ "extend-shallow": "^2.0.1",
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
"node_modules/semver": {
"version": "7.7.4",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz",
@@ -6717,7 +6749,6 @@
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
"integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==",
- "dev": true,
"license": "BSD-3-Clause"
},
"node_modules/ssh2": {
@@ -6873,6 +6904,15 @@
"node": ">=8"
}
},
+ "node_modules/strip-bom-string": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz",
+ "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
"node_modules/strip-final-newline": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
diff --git a/backend-api/package.json b/backend-api/package.json
index 992ccc3..48cbe28 100644
--- a/backend-api/package.json
+++ b/backend-api/package.json
@@ -17,6 +17,7 @@
"dockerode": "^4.0.10",
"express": "^5.2.1",
"express-rate-limit": "^8.3.2",
+ "gray-matter": "^4.0.3",
"helmet": "^8.1.0",
"ioredis": "^5.10.1",
"jsonwebtoken": "^9.0.2",
diff --git a/backend-api/redisQueue.ts b/backend-api/redisQueue.ts
index ec52ee3..7d7c08a 100644
--- a/backend-api/redisQueue.ts
+++ b/backend-api/redisQueue.ts
@@ -1,52 +1,152 @@
// @ts-nocheck
// Redis based job queue using BullMQ
-const { Queue } = require('bullmq')
-const IORedis = require('ioredis')
+const { Queue } = require("bullmq");
+const { randomUUID } = require("crypto");
+const IORedis = require("ioredis");
function parseTimeoutMs(rawValue, fallbackMs) {
- const parsed = Number.parseInt(rawValue, 10)
- return Number.isFinite(parsed) && parsed >= 60000 ? parsed : fallbackMs
+ const parsed = Number.parseInt(rawValue, 10);
+ return Number.isFinite(parsed) && parsed >= 60000 ? parsed : fallbackMs;
}
const DEPLOYMENT_JOB_TIMEOUT_MS = parseTimeoutMs(
process.env.DEPLOYMENT_JOB_TIMEOUT_MS || process.env.PROVISION_TIMEOUT_MS,
- 900000
-)
+ 900000,
+);
+const CLAWHUB_INSTALL_JOB_TIMEOUT_MS = parseTimeoutMs(
+ process.env.CLAWHUB_INSTALL_TIMEOUT_MS,
+ 300000,
+);
const connection = new IORedis({
- host: process.env.REDIS_HOST || 'redis',
- port: parseInt(process.env.REDIS_PORT || '6379'),
+ host: process.env.REDIS_HOST || "redis",
+ port: parseInt(process.env.REDIS_PORT || "6379"),
...(process.env.REDIS_PASSWORD ? { password: process.env.REDIS_PASSWORD } : {}),
maxRetriesPerRequest: null,
-})
+});
-const deployQueue = new Queue('deployments', {
+const deployQueue = new Queue("deployments", {
connection,
defaultJobOptions: {
attempts: 5,
- backoff: { type: 'exponential', delay: 3000 },
+ backoff: { type: "exponential", delay: 3000 },
timeout: DEPLOYMENT_JOB_TIMEOUT_MS,
removeOnComplete: { count: 200 },
removeOnFail: false, // keep failed jobs for DLQ inspection
},
-})
+});
-async function addDeploymentJob(agent){
- await deployQueue.add('deploy-agent', agent)
+const clawhubInstallsQueue = new Queue("clawhub-installs", {
+ connection,
+ defaultJobOptions: {
+ attempts: 1,
+ backoff: { type: "exponential", delay: 3000 },
+ timeout: CLAWHUB_INSTALL_JOB_TIMEOUT_MS,
+ removeOnComplete: { count: 200 },
+ removeOnFail: false,
+ },
+});
+
+async function addDeploymentJob(agent) {
+ await deployQueue.add("deploy-agent", agent);
+}
+
+async function addClawhubInstallJob(payload) {
+ const jobId = payload?.jobId || randomUUID();
+ return clawhubInstallsQueue.add("install-skill", { ...payload, jobId }, { jobId });
+}
+
+async function findInFlightClawhubInstallJob(agentId, slug) {
+ if (!agentId || !slug) return null;
+
+ const jobs = await clawhubInstallsQueue.getJobs([
+ "active",
+ "waiting",
+ "waiting-children",
+ "delayed",
+ "prioritized",
+ ]);
+
+ const normalizedAgentId = String(agentId);
+ const normalizedSlug = String(slug).trim();
+
+ for (const job of jobs) {
+ if (!job) continue;
+ const matchesAgent = String(job.data?.agentId || "") === normalizedAgentId;
+ const matchesSlug = String(job.data?.slug || "").trim() === normalizedSlug;
+ if (matchesAgent && matchesSlug) {
+ return job;
+ }
+ }
+
+ return null;
+}
+
+function mapClawhubJobState(state) {
+ switch (state) {
+ case "active":
+ return "running";
+ case "completed":
+ return "success";
+ case "failed":
+ return "failed";
+ case "waiting":
+ case "waiting-children":
+ case "delayed":
+ case "prioritized":
+ default:
+ return "pending";
+ }
+}
+
+async function getClawhubInstallJob(jobId) {
+ if (!jobId) return null;
+ return clawhubInstallsQueue.getJob(jobId);
+}
+
+async function getClawhubInstallJobStatus(jobId) {
+ const job = await getClawhubInstallJob(jobId);
+ if (!job) return null;
+
+ const state = await job.getState();
+ const failedReason =
+ typeof job.failedReason === "string" && job.failedReason.trim()
+ ? job.failedReason.trim()
+ : null;
+
+ return {
+ jobId: String(job.id),
+ agentId: job.data?.agentId || null,
+ slug: job.data?.slug || null,
+ status: mapClawhubJobState(state),
+ error: failedReason,
+ completedAt: job.finishedOn ? new Date(job.finishedOn).toISOString() : null,
+ };
}
/** Retrieve failed jobs (dead letter queue) for inspection. */
async function getDLQJobs(start = 0, end = 50) {
- return deployQueue.getFailed(start, end)
+ return deployQueue.getFailed(start, end);
}
/** Retry a specific failed job by its ID. */
async function retryDLQJob(jobId) {
- const job = await deployQueue.getJob(jobId)
- if (!job) throw new Error(`Job ${jobId} not found`)
- await job.retry()
- return { jobId, status: 'retried' }
+ const job = await deployQueue.getJob(jobId);
+ if (!job) throw new Error(`Job ${jobId} not found`);
+ await job.retry();
+ return { jobId, status: "retried" };
}
-module.exports = { deployQueue, addDeploymentJob, getDLQJobs, retryDLQJob, connection }
+module.exports = {
+ deployQueue,
+ clawhubInstallsQueue,
+ addDeploymentJob,
+ addClawhubInstallJob,
+ findInFlightClawhubInstallJob,
+ getClawhubInstallJob,
+ getClawhubInstallJobStatus,
+ getDLQJobs,
+ retryDLQJob,
+ connection,
+};
diff --git a/backend-api/routes/agents.ts b/backend-api/routes/agents.ts
index 845045c..b27a2eb 100644
--- a/backend-api/routes/agents.ts
+++ b/backend-api/routes/agents.ts
@@ -15,6 +15,7 @@ const {
CLONE_MODES,
buildTemplatePayloadFromAgent,
createEmptyTemplatePayload,
+ ensureCoreTemplateFiles,
materializeTemplateWiring,
resolveContainerName,
sanitizeAgentName,
@@ -44,10 +45,7 @@ const {
normalizeRuntimeFamilyName,
} = require("../../agent-runtime/lib/backendCatalog");
const { asyncHandler } = require("../middleware/errorHandler");
-const {
- buildAgentHistoryResponse,
- buildAgentStatsResponse,
-} = require("../agentTelemetry");
+const { buildAgentHistoryResponse, buildAgentStatsResponse } = require("../agentTelemetry");
const {
buildAgentRuntimeFields,
isSameRuntimePath,
@@ -77,8 +75,7 @@ function resolveRequestedImage({
fallbackImage = null,
fallbackRuntimeFields = null,
} = {}) {
- const explicitRequestedImage =
- typeof requestedImage === "string" ? requestedImage.trim() : "";
+ const explicitRequestedImage = typeof requestedImage === "string" ? requestedImage.trim() : "";
if (explicitRequestedImage) return explicitRequestedImage;
if (
@@ -89,13 +86,11 @@ function resolveRequestedImage({
return fallbackImage;
}
- return (
- getDefaultAgentImage({
- backend: runtimeFields?.backend_type,
- deploy_target: runtimeFields?.deploy_target,
- sandbox_profile: runtimeFields?.sandbox_profile,
- })
- );
+ return getDefaultAgentImage({
+ backend: runtimeFields?.backend_type,
+ deploy_target: runtimeFields?.deploy_target,
+ sandbox_profile: runtimeFields?.sandbox_profile,
+ });
}
function normalizeRequestedRuntimeFamily(value) {
@@ -106,15 +101,13 @@ function normalizeRequestedRuntimeFamily(value) {
function assertSupportedRuntimeSelection(runtimeFields) {
if (runtimeFields?.runtime_family === "hermes") {
if (runtimeFields.deploy_target !== "docker") {
- const error = new Error(
- "Hermes runtime is only supported on the Docker execution target."
- );
+ const error = new Error("Hermes runtime is only supported on the Docker execution target.");
error.statusCode = 400;
throw error;
}
if (runtimeFields.sandbox_profile !== "standard") {
const error = new Error(
- "Hermes runtime currently supports only the Standard sandbox profile."
+ "Hermes runtime currently supports only the Standard sandbox profile.",
);
error.statusCode = 400;
throw error;
@@ -125,9 +118,7 @@ function assertSupportedRuntimeSelection(runtimeFields) {
if (runtimeFields?.sandbox_profile !== "nemoclaw") return;
if (runtimeFields.deploy_target !== "docker") {
- const error = new Error(
- "NemoClaw sandbox is only supported on the Docker execution target."
- );
+ const error = new Error("NemoClaw sandbox is only supported on the Docker execution target.");
error.statusCode = 400;
throw error;
}
@@ -193,7 +184,7 @@ function assertBackendAvailable(backend) {
}
if (!status.configured) {
const error = new Error(
- status.issue || `${status.label} is not configured for this Nora control plane.`
+ status.issue || `${status.label} is not configured for this Nora control plane.`,
);
error.statusCode = 400;
throw error;
@@ -201,73 +192,140 @@ function assertBackendAvailable(backend) {
return status;
}
-router.get("/", asyncHandler(async (req, res) => {
- const result = await db.query(
- "SELECT * FROM agents WHERE user_id = $1 ORDER BY created_at DESC",
- [req.user.id]
- );
- res.json(result.rows.map(serializeAgent));
-}));
+function normalizeClawhubSkillEntry(entry) {
+ if (!entry || typeof entry !== "object" || Array.isArray(entry)) {
+ return null;
+ }
-router.get("/:id", asyncHandler(async (req, res) => {
- const result = await db.query(
- "SELECT * FROM agents WHERE id = $1 AND user_id = $2",
- [req.params.id, req.user.id]
- );
- if (!result.rows[0]) return res.status(404).json({ error: "Agent not found" });
+ const installSlug =
+ typeof entry.installSlug === "string"
+ ? entry.installSlug.trim()
+ : typeof entry.slug === "string"
+ ? entry.slug.trim()
+ : "";
+ if (!installSlug) return null;
+
+ const author = typeof entry.author === "string" ? entry.author.trim() : "";
+ const pagePath =
+ typeof entry.pagePath === "string" && entry.pagePath.trim()
+ ? entry.pagePath.trim()
+ : author
+ ? `${author}/${installSlug}`
+ : installSlug;
+
+ const installedAtRaw = typeof entry.installedAt === "string" ? entry.installedAt.trim() : "";
+ const installedAt =
+ installedAtRaw && !Number.isNaN(new Date(installedAtRaw).getTime())
+ ? new Date(installedAtRaw).toISOString()
+ : new Date().toISOString();
- const agent = result.rows[0];
+ return {
+ source: "clawhub",
+ installSlug,
+ author,
+ pagePath,
+ installedAt,
+ };
+}
- // Live status reconciliation — check actual container state while preserving
- // warning as a first-class degraded state until the container actually stops.
- if (agent.container_id && ["running", "warning", "error", "stopped"].includes(agent.status)) {
- try {
- const live = await containerManager.status(agent);
- const reconciledStatus = reconcileAgentStatus(agent.status, Boolean(live.running));
- if (reconciledStatus !== agent.status) {
- await db.query("UPDATE agents SET status = $1 WHERE id = $2", [reconciledStatus, agent.id]);
- agent.status = reconciledStatus;
+function normalizeClawhubSkills(entries) {
+ if (!Array.isArray(entries)) return [];
+
+ const seen = new Set();
+ const normalized = [];
+
+ for (const entry of entries) {
+ const skill = normalizeClawhubSkillEntry(entry);
+ if (!skill) continue;
+ const dedupeKey = `${skill.author}::${skill.installSlug}`;
+ if (seen.has(dedupeKey)) continue;
+ seen.add(dedupeKey);
+ normalized.push(skill);
+ }
+
+ return normalized;
+}
+
+router.get(
+ "/",
+ asyncHandler(async (req, res) => {
+ const result = await db.query(
+ "SELECT * FROM agents WHERE user_id = $1 ORDER BY created_at DESC",
+ [req.user.id],
+ );
+ res.json(result.rows.map(serializeAgent));
+ }),
+);
+
+router.get(
+ "/:id",
+ asyncHandler(async (req, res) => {
+ const result = await db.query("SELECT * FROM agents WHERE id = $1 AND user_id = $2", [
+ req.params.id,
+ req.user.id,
+ ]);
+ if (!result.rows[0]) return res.status(404).json({ error: "Agent not found" });
+
+ const agent = result.rows[0];
+
+ // Live status reconciliation — check actual container state while preserving
+ // warning as a first-class degraded state until the container actually stops.
+ if (agent.container_id && ["running", "warning", "error", "stopped"].includes(agent.status)) {
+ try {
+ const live = await containerManager.status(agent);
+ const reconciledStatus = reconcileAgentStatus(agent.status, Boolean(live.running));
+ if (reconciledStatus !== agent.status) {
+ await db.query("UPDATE agents SET status = $1 WHERE id = $2", [
+ reconciledStatus,
+ agent.id,
+ ]);
+ agent.status = reconciledStatus;
+ }
+ } catch {
+ // Can't reach container runtime — leave DB status as-is
}
- } catch {
- // Can't reach container runtime — leave DB status as-is
}
- }
- res.json(serializeAgent(agent));
-}));
+ res.json(serializeAgent(agent));
+ }),
+);
// Historical container stats with time range
// Query params: ?range=5m|15m|30m|1h|6h|24h|3d|7d (default 15m) or ?from=ISO&to=ISO
-router.get("/:id/stats/history", asyncHandler(async (req, res) => {
- const agentCheck = await db.query(
- "SELECT * FROM agents WHERE id = $1 AND user_id = $2", [req.params.id, req.user.id]
- );
- const agent = agentCheck.rows[0];
- if (!agent) return res.status(404).json({ error: "Agent not found" });
+router.get(
+ "/:id/stats/history",
+ asyncHandler(async (req, res) => {
+ const agentCheck = await db.query("SELECT * FROM agents WHERE id = $1 AND user_id = $2", [
+ req.params.id,
+ req.user.id,
+ ]);
+ const agent = agentCheck.rows[0];
+ if (!agent) return res.status(404).json({ error: "Agent not found" });
- const rangeMap = {
- "5m": "5 minutes",
- "15m": "15 minutes",
- "30m": "30 minutes",
- "1h": "1 hour",
- "6h": "6 hours",
- "24h": "24 hours",
- "3d": "3 days",
- "7d": "7 days",
- };
- let fromTime, toTime;
-
- if (req.query.from && req.query.to) {
- fromTime = new Date(req.query.from);
- toTime = new Date(req.query.to);
- } else {
- const range = rangeMap[req.query.range || "15m"] || "15 minutes";
- toTime = new Date();
- fromTime = new Date(Date.now() - parseInterval(range));
- }
+ const rangeMap = {
+ "5m": "5 minutes",
+ "15m": "15 minutes",
+ "30m": "30 minutes",
+ "1h": "1 hour",
+ "6h": "6 hours",
+ "24h": "24 hours",
+ "3d": "3 days",
+ "7d": "7 days",
+ };
+ let fromTime, toTime;
- res.json(await buildAgentHistoryResponse(agent, fromTime, toTime));
-}));
+ if (req.query.from && req.query.to) {
+ fromTime = new Date(req.query.from);
+ toTime = new Date(req.query.to);
+ } else {
+ const range = rangeMap[req.query.range || "15m"] || "15 minutes";
+ toTime = new Date();
+ fromTime = new Date(Date.now() - parseInterval(range));
+ }
+
+ res.json(await buildAgentHistoryResponse(agent, fromTime, toTime));
+ }),
+);
function parseInterval(pg) {
const m = pg.match(/(\d+)\s*(day|minute|hour|second)/);
@@ -285,72 +343,75 @@ function agentAuditMetadata(req, agent, extra = {}) {
buildAgentContext(agent, {
ownerEmail: req?.user?.email || null,
...extra,
- })
+ }),
);
}
// Get the gateway control UI URL (published host port for direct browser access)
-router.get("/:id/gateway-url", asyncHandler(async (req, res) => {
- const result = await db.query(
- `SELECT id, host, container_id, backend_type, runtime_family, deploy_target,
+router.get(
+ "/:id/gateway-url",
+ asyncHandler(async (req, res) => {
+ const result = await db.query(
+ `SELECT id, host, container_id, backend_type, runtime_family, deploy_target,
sandbox_profile, gateway_token, gateway_host_port,
gateway_host, gateway_port, user_id, status
FROM agents
WHERE id = $1 AND user_id = $2`,
- [req.params.id, req.user.id]
- );
- const agent = result.rows[0];
- if (!agent) return res.status(404).json({ error: "Agent not found" });
- res.locals.auditContext = buildAgentContext(agent, {
- ownerEmail: req.user.email || null,
- });
- const runtimeFields = buildAgentRuntimeFields(agent);
- if (!isGatewayAvailableStatus(agent.status)) {
- return res.status(409).json({ error: "Agent gateway is only available while running" });
- }
- if (runtimeFields.runtime_family !== "openclaw") {
- return res.status(409).json({
- error: "This runtime family does not expose an OpenClaw gateway",
+ [req.params.id, req.user.id],
+ );
+ const agent = result.rows[0];
+ if (!agent) return res.status(404).json({ error: "Agent not found" });
+ res.locals.auditContext = buildAgentContext(agent, {
+ ownerEmail: req.user.email || null,
});
- }
- if (!agent.container_id) return res.status(409).json({ error: "No container" });
+ const runtimeFields = buildAgentRuntimeFields(agent);
+ if (!isGatewayAvailableStatus(agent.status)) {
+ return res.status(409).json({ error: "Agent gateway is only available while running" });
+ }
+ if (runtimeFields.runtime_family !== "openclaw") {
+ return res.status(409).json({
+ error: "This runtime family does not expose an OpenClaw gateway",
+ });
+ }
+ if (!agent.container_id) return res.status(409).json({ error: "No container" });
- // Prefer the stored published port when present. This keeps browser access on
- // the control-plane host for Docker and local kind NodePort verification.
- let hostPort = agent.gateway_host_port;
- const backendType = runtimeFields.backend_type;
- if (!hostPort && agent.container_id && ["docker", "nemoclaw"].includes(backendType)) {
- try {
- const Docker = require("dockerode");
- const docker = new Docker({ socketPath: "/var/run/docker.sock" });
- const info = await docker.getContainer(agent.container_id).inspect();
- const portBindings = info.NetworkSettings?.Ports?.[`${OPENCLAW_GATEWAY_PORT}/tcp`];
- hostPort = portBindings?.[0]?.HostPort || null;
- } catch (e) {
- return res.status(502).json({ error: "Could not inspect container", details: e.message });
+ // Prefer the stored published port when present. This keeps browser access on
+ // the control-plane host for Docker and local kind NodePort verification.
+ let hostPort = agent.gateway_host_port;
+ const backendType = runtimeFields.backend_type;
+ if (!hostPort && agent.container_id && ["docker", "nemoclaw"].includes(backendType)) {
+ try {
+ const Docker = require("dockerode");
+ const docker = new Docker({ socketPath: "/var/run/docker.sock" });
+ const info = await docker.getContainer(agent.container_id).inspect();
+ const portBindings = info.NetworkSettings?.Ports?.[`${OPENCLAW_GATEWAY_PORT}/tcp`];
+ hostPort = portBindings?.[0]?.HostPort || null;
+ } catch (e) {
+ return res.status(502).json({ error: "Could not inspect container", details: e.message });
+ }
}
- }
- const publishedGatewayHost = resolvePublishedGatewayHost(req);
- const publishedGatewayProtocol = resolvePublishedGatewayProtocol(req);
+ const publishedGatewayHost = resolvePublishedGatewayHost(req);
+ const publishedGatewayProtocol = resolvePublishedGatewayProtocol(req);
- if (hostPort) {
- return res.json({
- url: `${publishedGatewayProtocol}://${publishedGatewayHost}:${hostPort}`,
- port: parseInt(hostPort, 10),
- });
- }
+ if (hostPort) {
+ return res.json({
+ url: `${publishedGatewayProtocol}://${publishedGatewayHost}:${hostPort}`,
+ port: parseInt(hostPort, 10),
+ });
+ }
- const directAddress = resolveGatewayAddress(agent, {
- publishedHost: publishedGatewayHost,
- });
- if (!directAddress) return res.status(409).json({ error: "Gateway address not available" });
+ const directAddress = resolveGatewayAddress(agent, {
+ publishedHost: publishedGatewayHost,
+ });
+ if (!directAddress) return res.status(409).json({ error: "Gateway address not available" });
- res.json({
- url: `${publishedGatewayProtocol}://${directAddress.host}:${directAddress.port}`,
- port: parseInt(directAddress.port, 10),
- });
-}));
+ res.json({
+ url: `${publishedGatewayProtocol}://${directAddress.host}:${directAddress.port}`,
+ port: parseInt(directAddress.port, 10),
+ });
+ }),
+);
function extractHermesApiError(payload, fallbackMessage) {
if (payload && typeof payload === "object") {
@@ -380,10 +441,10 @@ function createStatusCodeError(message, statusCode) {
}
async function loadHermesUiAgent(req) {
- const result = await db.query(
- "SELECT * FROM agents WHERE id = $1 AND user_id = $2",
- [req.params.id, req.user.id]
- );
+ const result = await db.query("SELECT * FROM agents WHERE id = $1 AND user_id = $2", [
+ req.params.id,
+ req.user.id,
+ ]);
const agent = result.rows[0];
if (!agent) {
throw createStatusCodeError("Agent not found", 404);
@@ -393,15 +454,12 @@ async function loadHermesUiAgent(req) {
if (runtimeFields.runtime_family !== "hermes") {
throw createStatusCodeError(
"This runtime family does not expose the Hermes WebUI surface",
- 409
+ 409,
);
}
if (!isGatewayAvailableStatus(agent.status)) {
- throw createStatusCodeError(
- "Hermes WebUI is only available while the agent is running",
- 409
- );
+ throw createStatusCodeError("Hermes WebUI is only available while the agent is running", 409);
}
return agent;
@@ -410,11 +468,11 @@ async function loadHermesUiAgent(req) {
function buildHermesGatewaySummary(snapshot = {}) {
const directoryPlatforms = snapshot?.directory?.platforms || {};
const configuredPlatforms = Object.values(snapshot?.platformDetails || {}).filter(
- (entry) => entry?.connected || entry?.enabled
+ (entry) => entry?.connected || entry?.enabled,
);
const discoveredTargetsCount = Object.values(directoryPlatforms).reduce(
(count, entries) => count + (Array.isArray(entries) ? entries.length : 0),
- 0
+ 0,
);
return {
@@ -425,8 +483,7 @@ function buildHermesGatewaySummary(snapshot = {}) {
updatedAt: snapshot?.runtimeStatus?.updated_at || null,
configuredPlatformsCount: configuredPlatforms.length,
discoveredTargetsCount,
- jobsCount:
- typeof snapshot?.jobsCount === "number" ? snapshot.jobsCount : null,
+ jobsCount: typeof snapshot?.jobsCount === "number" ? snapshot.jobsCount : null,
platformStates: snapshot?.runtimeStatus?.platforms || {},
};
}
@@ -442,18 +499,13 @@ function buildHermesDashboardSummary(payload = {}) {
typeof payload?.gateway_state === "string" && payload.gateway_state.trim()
? payload.gateway_state.trim()
: null,
- activeSessions:
- typeof payload?.active_sessions === "number"
- ? payload.active_sessions
- : null,
+ activeSessions: typeof payload?.active_sessions === "number" ? payload.active_sessions : null,
};
}
function buildHermesDashboardUnsupportedMessage(versionLine = "") {
const versionSuffix =
- typeof versionLine === "string" && versionLine.trim()
- ? ` (${versionLine.trim()})`
- : "";
+ typeof versionLine === "string" && versionLine.trim() ? ` (${versionLine.trim()})` : "";
return (
`This Hermes image${versionSuffix} does not include the official dashboard yet. ` +
"Pull a current Hermes image and redeploy this agent."
@@ -471,7 +523,7 @@ function buildHermesDashboardEnsureCommand() {
'if ! python3 -c \'import importlib.util,sys;sys.exit(0 if importlib.util.find_spec("hermes_cli.web_server") else 1)\'; then echo "STATUS=missing-web-server"; printf "VERSION=%s\\n" "$VERSION"; exit 0; fi',
'if python3 -c \'import socket,sys;s=socket.socket();s.settimeout(1);rc=s.connect_ex(("127.0.0.1",9119));s.close();sys.exit(0 if rc==0 else 1)\'; then echo "STATUS=already-running"; printf "VERSION=%s\\n" "$VERSION"; exit 0; fi',
'if [ -d "$LOG_DIR" ]; then chown -R hermes:hermes "$LOG_DIR" 2>/dev/null || true; else mkdir -p "$LOG_DIR"; chown hermes:hermes "$LOG_DIR" 2>/dev/null || true; fi',
- 'nohup /opt/hermes/docker/entrypoint.sh dashboard --host 0.0.0.0 --insecure --no-open >> /proc/1/fd/1 2>> /proc/1/fd/2 &',
+ "nohup /opt/hermes/docker/entrypoint.sh dashboard --host 0.0.0.0 --insecure --no-open >> /proc/1/fd/1 2>> /proc/1/fd/2 &",
"sleep 2",
'if python3 -c \'import socket,sys;s=socket.socket();s.settimeout(1);rc=s.connect_ex(("127.0.0.1",9119));s.close();sys.exit(0 if rc==0 else 1)\'; then echo "STATUS=started"; else echo "STATUS=start-failed"; fi',
'printf "VERSION=%s\\n" "$VERSION"',
@@ -480,11 +532,9 @@ function buildHermesDashboardEnsureCommand() {
async function ensureHermesDashboardProcess(agent) {
try {
- const { output } = await runContainerCommand(
- agent,
- buildHermesDashboardEnsureCommand(),
- { timeout: 15000 }
- );
+ const { output } = await runContainerCommand(agent, buildHermesDashboardEnsureCommand(), {
+ timeout: 15000,
+ });
const lines = String(output || "")
.split(/\r?\n/)
.map((line) => line.trim())
@@ -545,11 +595,7 @@ function normalizeHermesCronListPayload(payload) {
}
function resolveHermesChannelConfig(body = {}) {
- if (
- body?.config &&
- typeof body.config === "object" &&
- !Array.isArray(body.config)
- ) {
+ if (body?.config && typeof body.config === "object" && !Array.isArray(body.config)) {
return body.config;
}
@@ -571,12 +617,10 @@ async function resolveHermesApiToken(agent) {
const docker = new Docker({ socketPath: "/var/run/docker.sock" });
const info = await docker.getContainer(agent.container_id).inspect();
const envVars = Array.isArray(info?.Config?.Env) ? info.Config.Env : [];
- const keyEntry = envVars.find((entry) =>
- typeof entry === "string" && entry.startsWith("API_SERVER_KEY=")
+ const keyEntry = envVars.find(
+ (entry) => typeof entry === "string" && entry.startsWith("API_SERVER_KEY="),
);
- const resolvedToken = keyEntry
- ? keyEntry.slice("API_SERVER_KEY=".length).trim()
- : "";
+ const resolvedToken = keyEntry ? keyEntry.slice("API_SERVER_KEY=".length).trim() : "";
if (!resolvedToken) return null;
@@ -607,7 +651,7 @@ async function fetchHermesApi(agent, path, options = {}) {
const apiToken = await resolveHermesApiToken(agent);
if (!apiToken) {
const error = new Error(
- "Hermes API auth token unavailable. Redeploy the agent to refresh runtime auth."
+ "Hermes API auth token unavailable. Redeploy the agent to refresh runtime auth.",
);
error.statusCode = 409;
throw error;
@@ -621,10 +665,7 @@ async function fetchHermesApi(agent, path, options = {}) {
let body;
if (options.body != null) {
- body =
- typeof options.body === "string"
- ? options.body
- : JSON.stringify(options.body);
+ body = typeof options.body === "string" ? options.body : JSON.stringify(options.body);
if (!requestHeaders["Content-Type"]) {
requestHeaders["Content-Type"] = "application/json";
}
@@ -691,488 +732,497 @@ async function fetchHermesDashboard(agent, path, options = {}) {
}
// Hermes runtime status and model metadata for the agent-details WebUI tab.
-router.get("/:id/hermes-ui", asyncHandler(async (req, res) => {
- const agent = await loadHermesUiAgent(req);
-
- const runtimeAddress = resolveRuntimeAddress(agent);
- const dashboardAddress = resolveHermesDashboardAddress(agent);
- if (!runtimeAddress) {
- return res.status(409).json({ error: "Hermes runtime address not available" });
- }
+router.get(
+ "/:id/hermes-ui",
+ asyncHandler(async (req, res) => {
+ const agent = await loadHermesUiAgent(req);
+
+ const runtimeAddress = resolveRuntimeAddress(agent);
+ const dashboardAddress = resolveHermesDashboardAddress(agent);
+ if (!runtimeAddress) {
+ return res.status(409).json({ error: "Hermes runtime address not available" });
+ }
- let health = { ok: false, error: "Hermes runtime not ready yet" };
- let models = [];
- let modelsError = null;
- let gateway = null;
- let gatewayError = null;
- let directoryUpdatedAt = null;
- let configuredModel = null;
- let configuredProvider = null;
- let configuredBaseUrl = null;
- const dashboardBaseUrl = dashboardAddress
- ? dashboardUrlForAgent(agent, "")
- : null;
- let dashboard = {
- ready: false,
- url: dashboardBaseUrl,
- port: dashboardAddress?.port || null,
- health: null,
- retryable: true,
- error: dashboardAddress
- ? "Hermes dashboard not ready yet"
- : "Hermes dashboard endpoint not available",
- };
+ let health = { ok: false, error: "Hermes runtime not ready yet" };
+ let models = [];
+ let modelsError = null;
+ let gateway = null;
+ let gatewayError = null;
+ let directoryUpdatedAt = null;
+ let configuredModel = null;
+ let configuredProvider = null;
+ let configuredBaseUrl = null;
+ const dashboardBaseUrl = dashboardAddress ? dashboardUrlForAgent(agent, "") : null;
+ let dashboard = {
+ ready: false,
+ url: dashboardBaseUrl,
+ port: dashboardAddress?.port || null,
+ health: null,
+ retryable: true,
+ error: dashboardAddress
+ ? "Hermes dashboard not ready yet"
+ : "Hermes dashboard endpoint not available",
+ };
- try {
- const healthResponse = await fetchHermesApi(agent, "/health", {
- timeoutMs: 5000,
- });
- if (healthResponse.ok && healthResponse.data?.status === "ok") {
- health = {
- ok: true,
- ...healthResponse.data,
- };
- const modelsResponse = await fetchHermesApi(agent, "/v1/models", {
+ try {
+ const healthResponse = await fetchHermesApi(agent, "/health", {
timeoutMs: 5000,
});
- if (modelsResponse.ok && Array.isArray(modelsResponse.data?.data)) {
- models = modelsResponse.data.data;
+ if (healthResponse.ok && healthResponse.data?.status === "ok") {
+ health = {
+ ok: true,
+ ...healthResponse.data,
+ };
+ const modelsResponse = await fetchHermesApi(agent, "/v1/models", {
+ timeoutMs: 5000,
+ });
+ if (modelsResponse.ok && Array.isArray(modelsResponse.data?.data)) {
+ models = modelsResponse.data.data;
+ } else {
+ modelsError = extractHermesApiError(
+ modelsResponse.data,
+ `Hermes model listing returned ${modelsResponse.status}`,
+ );
+ }
} else {
- modelsError = extractHermesApiError(
- modelsResponse.data,
- `Hermes model listing returned ${modelsResponse.status}`
- );
+ health = {
+ ok: false,
+ error: extractHermesApiError(
+ healthResponse.data,
+ `Hermes runtime returned ${healthResponse.status}`,
+ ),
+ };
}
- } else {
+ } catch (error) {
health = {
ok: false,
- error: extractHermesApiError(
- healthResponse.data,
- `Hermes runtime returned ${healthResponse.status}`
- ),
- };
- }
- } catch (error) {
- health = {
- ok: false,
- error: error.message || "Hermes runtime not reachable",
- };
- }
-
- try {
- const dashboardResponse = await fetchHermesDashboard(agent, "/api/status", {
- timeoutMs: 5000,
- });
- if (dashboardResponse.ok) {
- dashboard = {
- ready: true,
- url: dashboardBaseUrl,
- port: dashboardAddress?.port || null,
- health: buildHermesDashboardSummary(dashboardResponse.data),
- retryable: false,
- error: null,
- };
- } else {
- dashboard = {
- ready: false,
- url: dashboardBaseUrl,
- port: dashboardAddress?.port || null,
- health: null,
- retryable: true,
- error: extractHermesApiError(
- dashboardResponse.data,
- `Hermes dashboard returned ${dashboardResponse.status}`
- ),
+ error: error.message || "Hermes runtime not reachable",
};
}
- } catch (error) {
- const ensuredDashboard = await ensureHermesDashboardProcess(agent);
- if (
- ensuredDashboard.status === "started" ||
- ensuredDashboard.status === "already-running"
- ) {
- try {
- const dashboardResponse = await fetchHermesDashboard(agent, "/api/status", {
- timeoutMs: 5000,
- });
- if (dashboardResponse.ok) {
- dashboard = {
- ready: true,
- url: dashboardBaseUrl,
- port: dashboardAddress?.port || null,
- health: buildHermesDashboardSummary(dashboardResponse.data),
- retryable: false,
- error: null,
- };
- } else {
+ try {
+ const dashboardResponse = await fetchHermesDashboard(agent, "/api/status", {
+ timeoutMs: 5000,
+ });
+ if (dashboardResponse.ok) {
+ dashboard = {
+ ready: true,
+ url: dashboardBaseUrl,
+ port: dashboardAddress?.port || null,
+ health: buildHermesDashboardSummary(dashboardResponse.data),
+ retryable: false,
+ error: null,
+ };
+ } else {
+ dashboard = {
+ ready: false,
+ url: dashboardBaseUrl,
+ port: dashboardAddress?.port || null,
+ health: null,
+ retryable: true,
+ error: extractHermesApiError(
+ dashboardResponse.data,
+ `Hermes dashboard returned ${dashboardResponse.status}`,
+ ),
+ };
+ }
+ } catch (error) {
+ const ensuredDashboard = await ensureHermesDashboardProcess(agent);
+
+ if (ensuredDashboard.status === "started" || ensuredDashboard.status === "already-running") {
+ try {
+ const dashboardResponse = await fetchHermesDashboard(agent, "/api/status", {
+ timeoutMs: 5000,
+ });
+ if (dashboardResponse.ok) {
+ dashboard = {
+ ready: true,
+ url: dashboardBaseUrl,
+ port: dashboardAddress?.port || null,
+ health: buildHermesDashboardSummary(dashboardResponse.data),
+ retryable: false,
+ error: null,
+ };
+ } else {
+ dashboard = {
+ ready: false,
+ url: dashboardBaseUrl,
+ port: dashboardAddress?.port || null,
+ health: null,
+ retryable: true,
+ error: extractHermesApiError(
+ dashboardResponse.data,
+ `Hermes dashboard returned ${dashboardResponse.status}`,
+ ),
+ };
+ }
+ } catch (retryError) {
dashboard = {
ready: false,
url: dashboardBaseUrl,
port: dashboardAddress?.port || null,
health: null,
retryable: true,
- error: extractHermesApiError(
- dashboardResponse.data,
- `Hermes dashboard returned ${dashboardResponse.status}`
- ),
+ error: retryError.message || "Hermes dashboard not reachable",
};
}
- } catch (retryError) {
+ } else if (
+ ensuredDashboard.status === "missing-dashboard" ||
+ ensuredDashboard.status === "missing-web-server" ||
+ ensuredDashboard.status === "missing-cli"
+ ) {
+ dashboard = {
+ ready: false,
+ url: dashboardBaseUrl,
+ port: dashboardAddress?.port || null,
+ health: null,
+ retryable: false,
+ error: buildHermesDashboardUnsupportedMessage(ensuredDashboard.version),
+ };
+ } else if (ensuredDashboard.status === "start-failed") {
dashboard = {
ready: false,
url: dashboardBaseUrl,
port: dashboardAddress?.port || null,
health: null,
retryable: true,
- error: retryError.message || "Hermes dashboard not reachable",
+ error:
+ "Hermes dashboard failed to start inside the running agent. Check the container logs or redeploy the agent.",
+ };
+ } else {
+ dashboard = {
+ ready: false,
+ url: dashboardBaseUrl,
+ port: dashboardAddress?.port || null,
+ health: null,
+ retryable: true,
+ error: error.message || "Hermes dashboard not reachable",
};
}
- } else if (
- ensuredDashboard.status === "missing-dashboard" ||
- ensuredDashboard.status === "missing-web-server" ||
- ensuredDashboard.status === "missing-cli"
- ) {
- dashboard = {
- ready: false,
- url: dashboardBaseUrl,
- port: dashboardAddress?.port || null,
- health: null,
- retryable: false,
- error: buildHermesDashboardUnsupportedMessage(ensuredDashboard.version),
- };
- } else if (ensuredDashboard.status === "start-failed") {
- dashboard = {
- ready: false,
- url: dashboardBaseUrl,
- port: dashboardAddress?.port || null,
- health: null,
- retryable: true,
- error:
- "Hermes dashboard failed to start inside the running agent. Check the container logs or redeploy the agent.",
- };
- } else {
- dashboard = {
- ready: false,
- url: dashboardBaseUrl,
- port: dashboardAddress?.port || null,
- health: null,
- retryable: true,
- error: error.message || "Hermes dashboard not reachable",
- };
}
- }
-
- try {
- const snapshot = await readHermesRuntimeSnapshot(agent);
- gateway = buildHermesGatewaySummary(snapshot);
- directoryUpdatedAt = snapshot?.directory?.updated_at || null;
- configuredModel =
- typeof snapshot?.modelConfig?.defaultModel === "string" &&
- snapshot.modelConfig.defaultModel.trim()
- ? snapshot.modelConfig.defaultModel.trim()
- : null;
- configuredProvider =
- typeof snapshot?.modelConfig?.provider === "string" &&
- snapshot.modelConfig.provider.trim()
- ? snapshot.modelConfig.provider.trim()
- : null;
- configuredBaseUrl =
- typeof snapshot?.modelConfig?.baseUrl === "string" &&
- snapshot.modelConfig.baseUrl.trim()
- ? snapshot.modelConfig.baseUrl.trim()
- : null;
- } catch (error) {
- gatewayError = error.message || "Failed to read Hermes gateway state";
- }
-
- res.json({
- url: runtimeUrlForAgent(agent, "/v1"),
- runtime: runtimeAddress,
- health,
- dashboard,
- models,
- defaultModel: configuredModel || models[0]?.id || null,
- configuredModel,
- configuredProvider,
- configuredBaseUrl,
- directoryUpdatedAt,
- ...(gateway ? { gateway } : {}),
- ...(modelsError ? { modelsError } : {}),
- ...(gatewayError ? { gatewayError } : {}),
- });
-}));
-
-router.post("/:id/hermes-ui/chat", asyncHandler(async (req, res) => {
- const agent = await loadHermesUiAgent(req);
-
- const messages = (Array.isArray(req.body?.messages) ? req.body.messages : [])
- .map((entry) => ({
- role: String(entry?.role || "").trim(),
- content: String(entry?.content || ""),
- }))
- .filter(
- (entry) =>
- ["system", "user", "assistant"].includes(entry.role) &&
- entry.content.trim()
- );
-
- if (!messages.length) {
- return res.status(400).json({ error: "At least one chat message is required" });
- }
- if (messages[messages.length - 1]?.role !== "user") {
- return res.status(400).json({
- error: "Hermes chat requests must end with a user message",
- });
- }
-
- const requestedModel =
- typeof req.body?.model === "string" ? req.body.model.trim() : "";
- const sessionId =
- typeof req.body?.sessionId === "string" ? req.body.sessionId.trim() : "";
+ try {
+ const snapshot = await readHermesRuntimeSnapshot(agent);
+ gateway = buildHermesGatewaySummary(snapshot);
+ directoryUpdatedAt = snapshot?.directory?.updated_at || null;
+ configuredModel =
+ typeof snapshot?.modelConfig?.defaultModel === "string" &&
+ snapshot.modelConfig.defaultModel.trim()
+ ? snapshot.modelConfig.defaultModel.trim()
+ : null;
+ configuredProvider =
+ typeof snapshot?.modelConfig?.provider === "string" && snapshot.modelConfig.provider.trim()
+ ? snapshot.modelConfig.provider.trim()
+ : null;
+ configuredBaseUrl =
+ typeof snapshot?.modelConfig?.baseUrl === "string" && snapshot.modelConfig.baseUrl.trim()
+ ? snapshot.modelConfig.baseUrl.trim()
+ : null;
+ } catch (error) {
+ gatewayError = error.message || "Failed to read Hermes gateway state";
+ }
- let chatResponse;
- try {
- chatResponse = await fetchHermesApi(agent, "/v1/chat/completions", {
- method: "POST",
- timeoutMs: 240000,
- headers: sessionId
- ? {
- "X-Hermes-Session-Id": sessionId,
- }
- : undefined,
- body: {
- ...(requestedModel ? { model: requestedModel } : {}),
- stream: false,
- messages,
- },
+ res.json({
+ url: runtimeUrlForAgent(agent, "/v1"),
+ runtime: runtimeAddress,
+ health,
+ dashboard,
+ models,
+ defaultModel: configuredModel || models[0]?.id || null,
+ configuredModel,
+ configuredProvider,
+ configuredBaseUrl,
+ directoryUpdatedAt,
+ ...(gateway ? { gateway } : {}),
+ ...(modelsError ? { modelsError } : {}),
+ ...(gatewayError ? { gatewayError } : {}),
});
- } catch (error) {
- return res
- .status(error.statusCode || 502)
- .json({ error: error.message || "Hermes runtime unreachable" });
- }
+ }),
+);
+
+router.post(
+ "/:id/hermes-ui/chat",
+ asyncHandler(async (req, res) => {
+ const agent = await loadHermesUiAgent(req);
+
+ const messages = (Array.isArray(req.body?.messages) ? req.body.messages : [])
+ .map((entry) => ({
+ role: String(entry?.role || "").trim(),
+ content: String(entry?.content || ""),
+ }))
+ .filter(
+ (entry) => ["system", "user", "assistant"].includes(entry.role) && entry.content.trim(),
+ );
- if (!chatResponse.ok) {
- const upstreamStatus =
- chatResponse.status >= 500 ? 502 : chatResponse.status;
- return res.status(upstreamStatus).json({
- error: extractHermesApiError(
- chatResponse.data,
- `Hermes chat returned ${chatResponse.status}`
- ),
- });
- }
+ if (!messages.length) {
+ return res.status(400).json({ error: "At least one chat message is required" });
+ }
- const assistantMessage =
- chatResponse.data?.choices?.[0]?.message?.content || "";
- if (!assistantMessage) {
- return res.status(502).json({
- error: "Hermes chat returned an empty assistant message",
- });
- }
+ if (messages[messages.length - 1]?.role !== "user") {
+ return res.status(400).json({
+ error: "Hermes chat requests must end with a user message",
+ });
+ }
- res.json({
- message: assistantMessage,
- usage: chatResponse.data?.usage || null,
- model: chatResponse.data?.model || requestedModel || null,
- sessionId:
- chatResponse.headers.get("x-hermes-session-id") || sessionId || null,
- });
-}));
+ const requestedModel = typeof req.body?.model === "string" ? req.body.model.trim() : "";
+ const sessionId = typeof req.body?.sessionId === "string" ? req.body.sessionId.trim() : "";
-router.get("/:id/hermes-ui/cron", asyncHandler(async (req, res) => {
- const agent = await loadHermesUiAgent(req);
+ let chatResponse;
+ try {
+ chatResponse = await fetchHermesApi(agent, "/v1/chat/completions", {
+ method: "POST",
+ timeoutMs: 240000,
+ headers: sessionId
+ ? {
+ "X-Hermes-Session-Id": sessionId,
+ }
+ : undefined,
+ body: {
+ ...(requestedModel ? { model: requestedModel } : {}),
+ stream: false,
+ messages,
+ },
+ });
+ } catch (error) {
+ return res
+ .status(error.statusCode || 502)
+ .json({ error: error.message || "Hermes runtime unreachable" });
+ }
- try {
- const cronResponse = await fetchHermesApi(
- agent,
- "/api/jobs?include_disabled=true",
- { timeoutMs: 10000 }
- );
- if (!cronResponse.ok) {
- return res.status(cronResponse.status >= 500 ? 502 : cronResponse.status).json({
+ if (!chatResponse.ok) {
+ const upstreamStatus = chatResponse.status >= 500 ? 502 : chatResponse.status;
+ return res.status(upstreamStatus).json({
error: extractHermesApiError(
- cronResponse.data,
- `Hermes cron listing returned ${cronResponse.status}`
+ chatResponse.data,
+ `Hermes chat returned ${chatResponse.status}`,
),
});
}
- res.json(normalizeHermesCronListPayload(cronResponse.data));
- } catch (error) {
- res.status(error.statusCode || 502).json({
- error: error.message || "Hermes cron endpoint unreachable",
+ const assistantMessage = chatResponse.data?.choices?.[0]?.message?.content || "";
+ if (!assistantMessage) {
+ return res.status(502).json({
+ error: "Hermes chat returned an empty assistant message",
+ });
+ }
+
+ res.json({
+ message: assistantMessage,
+ usage: chatResponse.data?.usage || null,
+ model: chatResponse.data?.model || requestedModel || null,
+ sessionId: chatResponse.headers.get("x-hermes-session-id") || sessionId || null,
});
- }
-}));
+ }),
+);
-router.post("/:id/hermes-ui/cron", asyncHandler(async (req, res) => {
- const agent = await loadHermesUiAgent(req);
+router.get(
+ "/:id/hermes-ui/cron",
+ asyncHandler(async (req, res) => {
+ const agent = await loadHermesUiAgent(req);
- try {
- const cronResponse = await fetchHermesApi(agent, "/api/jobs", {
- method: "POST",
- timeoutMs: 15000,
- body: normalizeHermesCronPayload(req.body),
- });
- if (!cronResponse.ok) {
- return res.status(cronResponse.status >= 500 ? 502 : cronResponse.status).json({
- error: extractHermesApiError(
- cronResponse.data,
- `Hermes cron creation returned ${cronResponse.status}`
- ),
+ try {
+ const cronResponse = await fetchHermesApi(agent, "/api/jobs?include_disabled=true", {
+ timeoutMs: 10000,
});
- }
+ if (!cronResponse.ok) {
+ return res.status(cronResponse.status >= 500 ? 502 : cronResponse.status).json({
+ error: extractHermesApiError(
+ cronResponse.data,
+ `Hermes cron listing returned ${cronResponse.status}`,
+ ),
+ });
+ }
- res.json(
- cronResponse.data && typeof cronResponse.data === "object"
- ? cronResponse.data
- : { job: null }
- );
- } catch (error) {
- res.status(error.statusCode || 502).json({
- error: error.message || "Hermes cron endpoint unreachable",
- });
- }
-}));
+ res.json(normalizeHermesCronListPayload(cronResponse.data));
+ } catch (error) {
+ res.status(error.statusCode || 502).json({
+ error: error.message || "Hermes cron endpoint unreachable",
+ });
+ }
+ }),
+);
-router.delete("/:id/hermes-ui/cron/:jobId", asyncHandler(async (req, res) => {
- const agent = await loadHermesUiAgent(req);
+router.post(
+ "/:id/hermes-ui/cron",
+ asyncHandler(async (req, res) => {
+ const agent = await loadHermesUiAgent(req);
- try {
- const cronResponse = await fetchHermesApi(
- agent,
- `/api/jobs/${encodeURIComponent(req.params.jobId)}`,
- {
- method: "DELETE",
+ try {
+ const cronResponse = await fetchHermesApi(agent, "/api/jobs", {
+ method: "POST",
timeoutMs: 15000,
+ body: normalizeHermesCronPayload(req.body),
+ });
+ if (!cronResponse.ok) {
+ return res.status(cronResponse.status >= 500 ? 502 : cronResponse.status).json({
+ error: extractHermesApiError(
+ cronResponse.data,
+ `Hermes cron creation returned ${cronResponse.status}`,
+ ),
+ });
}
- );
- if (!cronResponse.ok) {
- return res.status(cronResponse.status >= 500 ? 502 : cronResponse.status).json({
- error: extractHermesApiError(
- cronResponse.data,
- `Hermes cron deletion returned ${cronResponse.status}`
- ),
+
+ res.json(
+ cronResponse.data && typeof cronResponse.data === "object"
+ ? cronResponse.data
+ : { job: null },
+ );
+ } catch (error) {
+ res.status(error.statusCode || 502).json({
+ error: error.message || "Hermes cron endpoint unreachable",
});
}
+ }),
+);
- res.json({
- success: true,
- ...(cronResponse.data && typeof cronResponse.data === "object"
- ? cronResponse.data
- : {}),
- });
- } catch (error) {
- res.status(error.statusCode || 502).json({
- error: error.message || "Hermes cron endpoint unreachable",
- });
- }
-}));
+router.delete(
+ "/:id/hermes-ui/cron/:jobId",
+ asyncHandler(async (req, res) => {
+ const agent = await loadHermesUiAgent(req);
-router.get("/:id/hermes-ui/channels", asyncHandler(async (req, res) => {
- const agent = await loadHermesUiAgent(req);
+ try {
+ const cronResponse = await fetchHermesApi(
+ agent,
+ `/api/jobs/${encodeURIComponent(req.params.jobId)}`,
+ {
+ method: "DELETE",
+ timeoutMs: 15000,
+ },
+ );
+ if (!cronResponse.ok) {
+ return res.status(cronResponse.status >= 500 ? 502 : cronResponse.status).json({
+ error: extractHermesApiError(
+ cronResponse.data,
+ `Hermes cron deletion returned ${cronResponse.status}`,
+ ),
+ });
+ }
- try {
- res.json(await listHermesChannels(agent));
- } catch (error) {
- res.status(error.statusCode || 500).json({
- error: error.message || "Failed to load Hermes channels",
- });
- }
-}));
+ res.json({
+ success: true,
+ ...(cronResponse.data && typeof cronResponse.data === "object" ? cronResponse.data : {}),
+ });
+ } catch (error) {
+ res.status(error.statusCode || 502).json({
+ error: error.message || "Hermes cron endpoint unreachable",
+ });
+ }
+ }),
+);
-router.post("/:id/hermes-ui/channels", asyncHandler(async (req, res) => {
- const agent = await loadHermesUiAgent(req);
- const type =
- typeof req.body?.type === "string" ? req.body.type.trim().toLowerCase() : "";
+router.get(
+ "/:id/hermes-ui/channels",
+ asyncHandler(async (req, res) => {
+ const agent = await loadHermesUiAgent(req);
- if (!type) {
- return res.status(400).json({ error: "Channel type is required" });
- }
+ try {
+ res.json(await listHermesChannels(agent));
+ } catch (error) {
+ res.status(error.statusCode || 500).json({
+ error: error.message || "Failed to load Hermes channels",
+ });
+ }
+ }),
+);
- try {
- res.json(
- await saveHermesChannel(agent, type, resolveHermesChannelConfig(req.body), {
- create: true,
- })
- );
- } catch (error) {
- res.status(error.statusCode || 500).json({
- error: error.message || "Failed to save Hermes channel",
- });
- }
-}));
+router.post(
+ "/:id/hermes-ui/channels",
+ asyncHandler(async (req, res) => {
+ const agent = await loadHermesUiAgent(req);
+ const type = typeof req.body?.type === "string" ? req.body.type.trim().toLowerCase() : "";
-router.patch("/:id/hermes-ui/channels/:channelId", asyncHandler(async (req, res) => {
- const agent = await loadHermesUiAgent(req);
+ if (!type) {
+ return res.status(400).json({ error: "Channel type is required" });
+ }
- try {
- res.json(
- await saveHermesChannel(
- agent,
- req.params.channelId,
- resolveHermesChannelConfig(req.body)
- )
- );
- } catch (error) {
- res.status(error.statusCode || 500).json({
- error: error.message || "Failed to update Hermes channel",
- });
- }
-}));
+ try {
+ res.json(
+ await saveHermesChannel(agent, type, resolveHermesChannelConfig(req.body), {
+ create: true,
+ }),
+ );
+ } catch (error) {
+ res.status(error.statusCode || 500).json({
+ error: error.message || "Failed to save Hermes channel",
+ });
+ }
+ }),
+);
-router.delete("/:id/hermes-ui/channels/:channelId", asyncHandler(async (req, res) => {
- const agent = await loadHermesUiAgent(req);
+router.patch(
+ "/:id/hermes-ui/channels/:channelId",
+ asyncHandler(async (req, res) => {
+ const agent = await loadHermesUiAgent(req);
- try {
- res.json(await deleteHermesChannel(agent, req.params.channelId));
- } catch (error) {
- res.status(error.statusCode || 500).json({
- error: error.message || "Failed to delete Hermes channel",
- });
- }
-}));
+ try {
+ res.json(
+ await saveHermesChannel(agent, req.params.channelId, resolveHermesChannelConfig(req.body)),
+ );
+ } catch (error) {
+ res.status(error.statusCode || 500).json({
+ error: error.message || "Failed to update Hermes channel",
+ });
+ }
+ }),
+);
-router.post("/:id/hermes-ui/channels/:channelId/test", asyncHandler(async (req, res) => {
- const agent = await loadHermesUiAgent(req);
+router.delete(
+ "/:id/hermes-ui/channels/:channelId",
+ asyncHandler(async (req, res) => {
+ const agent = await loadHermesUiAgent(req);
- try {
- res.json(await testHermesChannel(agent, req.params.channelId));
- } catch (error) {
- res.status(error.statusCode || 500).json({
- error: error.message || "Failed to test Hermes channel",
- });
- }
-}));
+ try {
+ res.json(await deleteHermesChannel(agent, req.params.channelId));
+ } catch (error) {
+ res.status(error.statusCode || 500).json({
+ error: error.message || "Failed to delete Hermes channel",
+ });
+ }
+ }),
+);
+
+router.post(
+ "/:id/hermes-ui/channels/:channelId/test",
+ asyncHandler(async (req, res) => {
+ const agent = await loadHermesUiAgent(req);
+
+ try {
+ res.json(await testHermesChannel(agent, req.params.channelId));
+ } catch (error) {
+ res.status(error.statusCode || 500).json({
+ error: error.message || "Failed to test Hermes channel",
+ });
+ }
+ }),
+);
// Live container resource stats (CPU, memory, network, PIDs)
-router.get("/:id/stats", asyncHandler(async (req, res) => {
- const result = await db.query(
- "SELECT * FROM agents WHERE id = $1 AND user_id = $2",
- [req.params.id, req.user.id]
- );
- const agent = result.rows[0];
- if (!agent) return res.status(404).json({ error: "Agent not found" });
- res.json(await buildAgentStatsResponse(agent));
-}));
+router.get(
+ "/:id/stats",
+ asyncHandler(async (req, res) => {
+ const result = await db.query("SELECT * FROM agents WHERE id = $1 AND user_id = $2", [
+ req.params.id,
+ req.user.id,
+ ]);
+ const agent = result.rows[0];
+ if (!agent) return res.status(404).json({ error: "Agent not found" });
+ res.json(await buildAgentStatsResponse(agent));
+ }),
+);
router.post("/deploy", async (req, res) => {
try {
const requestBody = req.body || {};
+ const clawhubSkills = normalizeClawhubSkills(requestBody.clawhub_skills);
// Enforce billing limits
const limits = await billing.enforceLimits(req.user.id);
- if (!limits.allowed) return res.status(402).json({ error: limits.error, subscription: limits.subscription });
+ if (!limits.allowed)
+ return res.status(402).json({ error: limits.error, subscription: limits.subscription });
const sub = limits.subscription;
let migrationDraft = null;
if (requestBody.migration_draft_id) {
- migrationDraft = await getOwnedMigrationDraft(
- requestBody.migration_draft_id,
- req.user.id
- );
+ migrationDraft = await getOwnedMigrationDraft(requestBody.migration_draft_id, req.user.id);
if (!migrationDraft) {
return res.status(404).json({ error: "Migration draft not found" });
}
@@ -1191,11 +1241,10 @@ router.post("/deploy", async (req, res) => {
const name = sanitizeAgentName(
requestBody.name,
migrationDraft?.manifest?.name ||
- (migrationDraft?.manifest?.runtimeFamily === "hermes"
- ? "Hermes-Agent"
- : "OpenClaw-Agent")
+ (migrationDraft?.manifest?.runtimeFamily === "hermes" ? "Hermes-Agent" : "OpenClaw-Agent"),
);
- if (name.length > 100) return res.status(400).json({ error: "Agent name must be 100 characters or less" });
+ if (name.length > 100)
+ return res.status(400).json({ error: "Agent name must be 100 characters or less" });
const runtimeFields = resolveRequestedRuntimeFields({
request: {
...requestBody,
@@ -1208,10 +1257,7 @@ router.post("/deploy", async (req, res) => {
runtimeSelection: runtimeFields,
});
assertSupportedRuntimeSelection(runtimeFields);
- if (
- migrationDraft &&
- runtimeFields.runtime_family !== migrationDraft.manifest.runtimeFamily
- ) {
+ if (migrationDraft && runtimeFields.runtime_family !== migrationDraft.manifest.runtimeFamily) {
return res.status(400).json({
error: `Migration draft targets the ${migrationDraft.manifest.runtimeFamily} runtime family and cannot be deployed as ${runtimeFields.runtime_family}.`,
});
@@ -1228,7 +1274,7 @@ router.post("/deploy", async (req, res) => {
// Self-hosted: accept user-chosen values clamped to operator limits
specs = clampDeploymentDefaults(
normalizeDeploymentDefaults(requestBody, deploymentDefaults),
- billing.SELFHOSTED_LIMITS
+ billing.SELFHOSTED_LIMITS,
);
} else {
// PaaS: resources are controlled by the operator-managed deployment defaults.
@@ -1240,23 +1286,42 @@ router.post("/deploy", async (req, res) => {
});
const templatePayload = migrationDraft
? migrationDraft.manifest.runtimeFamily === "openclaw"
- ? migrationDraft.manifest.templatePayload || createEmptyTemplatePayload({
- source: "migration-draft",
- })
+ ? migrationDraft.manifest.templatePayload ||
+ ensureCoreTemplateFiles(
+ createEmptyTemplatePayload({
+ source: "migration-draft",
+ }),
+ {
+ name,
+ sourceType: "platform",
+ includeBootstrap: true,
+ },
+ )
: createEmptyTemplatePayload({
source: "migration-draft",
migrationDraftId: migrationDraft.id,
})
- : createEmptyTemplatePayload({
- source: "blank-deploy",
- });
+ : runtimeFields.runtime_family === "openclaw"
+ ? ensureCoreTemplateFiles(
+ createEmptyTemplatePayload({
+ source: "blank-deploy",
+ }),
+ {
+ name,
+ sourceType: "platform",
+ includeBootstrap: true,
+ },
+ )
+ : createEmptyTemplatePayload({
+ source: "blank-deploy",
+ });
const result = await db.query(
`INSERT INTO agents(
user_id, name, status, node, backend_type, sandbox_type, vcpu, ram_mb, disk_gb,
- container_name, image, template_payload, runtime_family, deploy_target,
+ container_name, image, template_payload, clawhub_skills, runtime_family, deploy_target,
sandbox_profile
- ) VALUES($1, $2, 'queued', $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14) RETURNING *`,
+ ) VALUES($1, $2, 'queued', $3, $4, $5, $6, $7, $8, $9, $10, $11, $12::jsonb, $13, $14, $15) RETURNING *`,
[
req.user.id,
name,
@@ -1269,10 +1334,11 @@ router.post("/deploy", async (req, res) => {
containerName,
image,
JSON.stringify(templatePayload),
+ JSON.stringify(clawhubSkills),
runtimeFields.runtime_family,
runtimeFields.deploy_target,
runtimeFields.sandbox_profile,
- ]
+ ],
);
const agent = result.rows[0];
@@ -1294,10 +1360,7 @@ router.post("/deploy", async (req, res) => {
await attachDraftToAgent(migrationDraft.id, agent.id);
}
- await db.query(
- "INSERT INTO deployments(agent_id, status) VALUES($1, 'queued')",
- [agent.id]
- );
+ await db.query("INSERT INTO deployments(agent_id, status) VALUES($1, 'queued')", [agent.id]);
await addDeploymentJob({
id: agent.id,
@@ -1311,6 +1374,7 @@ router.post("/deploy", async (req, res) => {
image,
model: runtimeFields.sandbox_profile === "nemoclaw" ? req.body.model || null : null,
migration_draft_id: migrationDraft?.id || null,
+ clawhub_skills: clawhubSkills,
});
const deployType = backendStatus.label;
@@ -1329,7 +1393,7 @@ router.post("/deploy", async (req, res) => {
containerName,
migrationDraftId: migrationDraft?.id || null,
},
- })
+ }),
);
res.json(serializeAgent(agent));
@@ -1338,185 +1402,190 @@ router.post("/deploy", async (req, res) => {
}
});
-router.patch("/:id", asyncHandler(async (req, res) => {
- const result = await db.query(
- "SELECT * FROM agents WHERE id = $1 AND user_id = $2",
- [req.params.id, req.user.id]
- );
- const agent = result.rows[0];
- if (!agent) return res.status(404).json({ error: "Agent not found" });
+router.patch(
+ "/:id",
+ asyncHandler(async (req, res) => {
+ const result = await db.query("SELECT * FROM agents WHERE id = $1 AND user_id = $2", [
+ req.params.id,
+ req.user.id,
+ ]);
+ const agent = result.rows[0];
+ if (!agent) return res.status(404).json({ error: "Agent not found" });
- const name = sanitizeAgentName(req.body.name, agent.name || "OpenClaw-Agent");
- if (name.length > 100) {
- return res.status(400).json({ error: "Agent name must be 100 characters or less" });
- }
+ const name = sanitizeAgentName(req.body.name, agent.name || "OpenClaw-Agent");
+ if (name.length > 100) {
+ return res.status(400).json({ error: "Agent name must be 100 characters or less" });
+ }
- const updated = await db.query(
- "UPDATE agents SET name = $1 WHERE id = $2 RETURNING *",
- [name, agent.id]
- );
- await monitoring.logEvent(
- "agent_renamed",
- `Agent renamed to "${name}"`,
- agentAuditMetadata(req, updated.rows[0], {
- result: {
- previousName: agent.name,
- nextName: name,
- },
- })
- );
- res.json(serializeAgent(updated.rows[0]));
-}));
-
-router.post("/:id/duplicate", asyncHandler(async (req, res) => {
- const requestBody = req.body || {};
- const limits = await billing.enforceLimits(req.user.id);
- if (!limits.allowed) {
- return res.status(402).json({ error: limits.error, subscription: limits.subscription });
- }
+ const updated = await db.query("UPDATE agents SET name = $1 WHERE id = $2 RETURNING *", [
+ name,
+ agent.id,
+ ]);
+ await monitoring.logEvent(
+ "agent_renamed",
+ `Agent renamed to "${name}"`,
+ agentAuditMetadata(req, updated.rows[0], {
+ result: {
+ previousName: agent.name,
+ nextName: name,
+ },
+ }),
+ );
+ res.json(serializeAgent(updated.rows[0]));
+ }),
+);
- const sourceResult = await db.query(
- "SELECT * FROM agents WHERE id = $1 AND user_id = $2",
- [req.params.id, req.user.id]
- );
- const sourceAgent = sourceResult.rows[0];
- if (!sourceAgent) return res.status(404).json({ error: "Agent not found" });
- const sourceRuntime = buildAgentRuntimeFields(sourceAgent);
- res.locals.auditContext = buildAgentContext(sourceAgent, {
- ownerEmail: req.user.email || null,
- });
+router.post(
+ "/:id/duplicate",
+ asyncHandler(async (req, res) => {
+ const requestBody = req.body || {};
+ const limits = await billing.enforceLimits(req.user.id);
+ if (!limits.allowed) {
+ return res.status(402).json({ error: limits.error, subscription: limits.subscription });
+ }
- const cloneMode = CLONE_MODES.has(requestBody.clone_mode)
- ? requestBody.clone_mode
- : "files_only";
- const runtimeFamily = normalizeRequestedRuntimeFamily(requestBody.runtime_family);
- if (requestBody.runtime_family != null && runtimeFamily == null) {
- return res.status(400).json({
- error: `Unsupported runtime_family. Nora currently supports: ${KNOWN_RUNTIME_FAMILIES.map((value) => `"${value}"`).join(", ")}.`,
+ const sourceResult = await db.query("SELECT * FROM agents WHERE id = $1 AND user_id = $2", [
+ req.params.id,
+ req.user.id,
+ ]);
+ const sourceAgent = sourceResult.rows[0];
+ if (!sourceAgent) return res.status(404).json({ error: "Agent not found" });
+ const sourceRuntime = buildAgentRuntimeFields(sourceAgent);
+ res.locals.auditContext = buildAgentContext(sourceAgent, {
+ ownerEmail: req.user.email || null,
});
- }
- const name = sanitizeAgentName(
- requestBody.name,
- `${sourceAgent.name || "OpenClaw-Agent"} Copy`
- );
- if (name.length > 100) {
- return res.status(400).json({ error: "Agent name must be 100 characters or less" });
- }
- const runtimeFields = resolveRequestedRuntimeFields({
- request: {
- ...requestBody,
- runtime_family: runtimeFamily || sourceRuntime.runtime_family,
- },
- fallback: sourceRuntime,
- });
- assertSupportedRuntimeSelection(runtimeFields);
- assertBackendAvailable(runtimeFields.backend_type);
- const node = await scheduler.selectNode({
- fallback: runtimeFields.deploy_target,
- });
- const specs = {
- vcpu: sourceAgent.vcpu || 2,
- ram_mb: sourceAgent.ram_mb || 2048,
- disk_gb: sourceAgent.disk_gb || 20,
- };
- const image = resolveRequestedImage({
- requestedImage: requestBody.image,
- runtimeFields,
- fallbackImage: sourceAgent.image || null,
- fallbackRuntimeFields: sourceRuntime,
- });
- const containerName = resolveContainerName({
- requestedName: requestBody.container_name,
- agentName: name,
- runtimeSelection: runtimeFields,
- });
+ const cloneMode = CLONE_MODES.has(requestBody.clone_mode)
+ ? requestBody.clone_mode
+ : "files_only";
+ const runtimeFamily = normalizeRequestedRuntimeFamily(requestBody.runtime_family);
+ if (requestBody.runtime_family != null && runtimeFamily == null) {
+ return res.status(400).json({
+ error: `Unsupported runtime_family. Nora currently supports: ${KNOWN_RUNTIME_FAMILIES.map((value) => `"${value}"`).join(", ")}.`,
+ });
+ }
+ const name = sanitizeAgentName(
+ requestBody.name,
+ `${sourceAgent.name || "OpenClaw-Agent"} Copy`,
+ );
+ if (name.length > 100) {
+ return res.status(400).json({ error: "Agent name must be 100 characters or less" });
+ }
- let templatePayload;
- try {
- templatePayload = await buildTemplatePayloadFromAgent(sourceAgent, cloneMode);
- } catch (err) {
- return res.status(409).json({ error: err.message });
- }
+ const runtimeFields = resolveRequestedRuntimeFields({
+ request: {
+ ...requestBody,
+ runtime_family: runtimeFamily || sourceRuntime.runtime_family,
+ },
+ fallback: sourceRuntime,
+ });
+ assertSupportedRuntimeSelection(runtimeFields);
+ assertBackendAvailable(runtimeFields.backend_type);
+ const node = await scheduler.selectNode({
+ fallback: runtimeFields.deploy_target,
+ });
+ const specs = {
+ vcpu: sourceAgent.vcpu || 2,
+ ram_mb: sourceAgent.ram_mb || 2048,
+ disk_gb: sourceAgent.disk_gb || 20,
+ };
+ const image = resolveRequestedImage({
+ requestedImage: requestBody.image,
+ runtimeFields,
+ fallbackImage: sourceAgent.image || null,
+ fallbackRuntimeFields: sourceRuntime,
+ });
+ const containerName = resolveContainerName({
+ requestedName: requestBody.container_name,
+ agentName: name,
+ runtimeSelection: runtimeFields,
+ });
+
+ let templatePayload;
+ try {
+ templatePayload = await buildTemplatePayloadFromAgent(sourceAgent, cloneMode);
+ } catch (err) {
+ return res.status(409).json({ error: err.message });
+ }
- const inserted = await db.query(
- `INSERT INTO agents(
+ const inserted = await db.query(
+ `INSERT INTO agents(
user_id, name, status, node, backend_type, sandbox_type, vcpu, ram_mb, disk_gb,
container_name, image, template_payload, runtime_family, deploy_target,
sandbox_profile
) VALUES($1, $2, 'queued', $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14) RETURNING *`,
- [
- req.user.id,
- name,
- node?.name || runtimeFields.deploy_target,
- runtimeFields.backend_type,
- runtimeFields.sandbox_type,
- specs.vcpu,
- specs.ram_mb,
- specs.disk_gb,
- containerName,
- image,
- JSON.stringify(templatePayload),
- runtimeFields.runtime_family,
- runtimeFields.deploy_target,
- runtimeFields.sandbox_profile,
- ]
- );
- const agent = inserted.rows[0];
+ [
+ req.user.id,
+ name,
+ node?.name || runtimeFields.deploy_target,
+ runtimeFields.backend_type,
+ runtimeFields.sandbox_type,
+ specs.vcpu,
+ specs.ram_mb,
+ specs.disk_gb,
+ containerName,
+ image,
+ JSON.stringify(templatePayload),
+ runtimeFields.runtime_family,
+ runtimeFields.deploy_target,
+ runtimeFields.sandbox_profile,
+ ],
+ );
+ const agent = inserted.rows[0];
- await materializeTemplateWiring(agent.id, templatePayload);
- await db.query(
- "INSERT INTO deployments(agent_id, status) VALUES($1, 'queued')",
- [agent.id]
- );
- await addDeploymentJob({
- id: agent.id,
- name: agent.name,
- userId: req.user.id,
- plan: limits.subscription.plan,
- backend: runtimeFields.backend_type,
- sandbox: runtimeFields.sandbox_profile,
- specs,
- container_name: containerName,
- image,
- });
- await monitoring.logEvent(
- "agent_duplicated",
- `Agent "${sourceAgent.name}" duplicated as "${agent.name}"`,
- agentAuditMetadata(req, agent, {
- sourceAgent: {
- id: sourceAgent.id,
- name: sourceAgent.name,
- },
- clone: {
- mode: cloneMode,
- runtimeFamily: runtimeFields.runtime_family,
- deployTarget: runtimeFields.deploy_target,
- sandboxProfile: runtimeFields.sandbox_profile,
- },
- })
- );
+ await materializeTemplateWiring(agent.id, templatePayload);
+ await db.query("INSERT INTO deployments(agent_id, status) VALUES($1, 'queued')", [agent.id]);
+ await addDeploymentJob({
+ id: agent.id,
+ name: agent.name,
+ userId: req.user.id,
+ plan: limits.subscription.plan,
+ backend: runtimeFields.backend_type,
+ sandbox: runtimeFields.sandbox_profile,
+ specs,
+ container_name: containerName,
+ image,
+ });
+ await monitoring.logEvent(
+ "agent_duplicated",
+ `Agent "${sourceAgent.name}" duplicated as "${agent.name}"`,
+ agentAuditMetadata(req, agent, {
+ sourceAgent: {
+ id: sourceAgent.id,
+ name: sourceAgent.name,
+ },
+ clone: {
+ mode: cloneMode,
+ runtimeFamily: runtimeFields.runtime_family,
+ deployTarget: runtimeFields.deploy_target,
+ sandboxProfile: runtimeFields.sandbox_profile,
+ },
+ }),
+ );
- res.json(serializeAgent(agent));
-}));
+ res.json(serializeAgent(agent));
+ }),
+);
router.post("/:id/start", async (req, res) => {
try {
- const result = await db.query(
- "SELECT * FROM agents WHERE id = $1 AND user_id = $2",
- [req.params.id, req.user.id]
- );
+ const result = await db.query("SELECT * FROM agents WHERE id = $1 AND user_id = $2", [
+ req.params.id,
+ req.user.id,
+ ]);
const agent = result.rows[0];
if (!agent) return res.status(404).json({ error: "Agent not found" });
res.locals.auditContext = buildAgentContext(agent, {
ownerEmail: req.user.email || null,
});
- if (!agent.container_id) return res.status(400).json({ error: "No container — redeploy the agent first" });
+ if (!agent.container_id)
+ return res.status(400).json({ error: "No container — redeploy the agent first" });
await containerManager.start(agent);
const updated = await db.query(
- "UPDATE agents SET status = 'running' WHERE id = $1 RETURNING *", [agent.id]
+ "UPDATE agents SET status = 'running' WHERE id = $1 RETURNING *",
+ [agent.id],
);
try {
const authSyncResults = await syncAuthToUserAgents(req.user.id, agent.id, {
@@ -1525,13 +1594,11 @@ router.post("/:id/start", async (req, res) => {
const failedSync = authSyncResults.find((result) => result.status === "failed");
if (failedSync) {
console.warn(
- `[agents.start] Auth sync failed for agent ${agent.id}: ${failedSync.error || "unknown error"}`
+ `[agents.start] Auth sync failed for agent ${agent.id}: ${failedSync.error || "unknown error"}`,
);
}
} catch (syncError) {
- console.warn(
- `[agents.start] Auth sync errored for agent ${agent.id}: ${syncError.message}`
- );
+ console.warn(`[agents.start] Auth sync errored for agent ${agent.id}: ${syncError.message}`);
}
await monitoring.logEvent(
@@ -1539,7 +1606,7 @@ router.post("/:id/start", async (req, res) => {
`Agent "${agent.name}" started`,
agentAuditMetadata(req, updated.rows[0], {
result: { status: "running" },
- })
+ }),
);
res.json(serializeAgent(updated.rows[0]));
} catch (e) {
@@ -1549,10 +1616,10 @@ router.post("/:id/start", async (req, res) => {
router.post("/:id/stop", async (req, res) => {
try {
- const result = await db.query(
- "SELECT * FROM agents WHERE id = $1 AND user_id = $2",
- [req.params.id, req.user.id]
- );
+ const result = await db.query("SELECT * FROM agents WHERE id = $1 AND user_id = $2", [
+ req.params.id,
+ req.user.id,
+ ]);
const agent = result.rows[0];
if (!agent) return res.status(404).json({ error: "Agent not found" });
res.locals.auditContext = buildAgentContext(agent, {
@@ -1570,14 +1637,15 @@ router.post("/:id/stop", async (req, res) => {
}
const updated = await db.query(
- "UPDATE agents SET status = 'stopped' WHERE id = $1 RETURNING *", [agent.id]
+ "UPDATE agents SET status = 'stopped' WHERE id = $1 RETURNING *",
+ [agent.id],
);
await monitoring.logEvent(
"agent_stopped",
`Agent "${agent.name}" stopped`,
agentAuditMetadata(req, updated.rows[0], {
result: { status: "stopped" },
- })
+ }),
);
res.json(serializeAgent(updated.rows[0]));
} catch (e) {
@@ -1586,10 +1654,10 @@ router.post("/:id/stop", async (req, res) => {
});
async function destroyAgent(agentId, userId, req, res) {
- const result = await db.query(
- "SELECT * FROM agents WHERE id = $1 AND user_id = $2",
- [agentId, userId]
- );
+ const result = await db.query("SELECT * FROM agents WHERE id = $1 AND user_id = $2", [
+ agentId,
+ userId,
+ ]);
const agent = result.rows[0];
if (!agent) return res.status(404).json({ error: "Agent not found" });
res.locals.auditContext = buildAgentContext(agent, {
@@ -1610,7 +1678,7 @@ async function destroyAgent(agentId, userId, req, res) {
`Agent "${agent.name}" deleted`,
agentAuditMetadata(req, agent, {
result: { deleted: true },
- })
+ }),
);
res.json({ success: true });
}
@@ -1633,16 +1701,17 @@ router.delete("/:id", async (req, res) => {
router.post("/:id/restart", async (req, res) => {
try {
- const result = await db.query(
- "SELECT * FROM agents WHERE id = $1 AND user_id = $2",
- [req.params.id, req.user.id]
- );
+ const result = await db.query("SELECT * FROM agents WHERE id = $1 AND user_id = $2", [
+ req.params.id,
+ req.user.id,
+ ]);
const agent = result.rows[0];
if (!agent) return res.status(404).json({ error: "Agent not found" });
res.locals.auditContext = buildAgentContext(agent, {
ownerEmail: req.user.email || null,
});
- if (!agent.container_id) return res.status(400).json({ error: "No container — redeploy the agent first" });
+ if (!agent.container_id)
+ return res.status(400).json({ error: "No container — redeploy the agent first" });
await containerManager.restart(agent);
@@ -1652,7 +1721,7 @@ router.post("/:id/restart", async (req, res) => {
`Agent "${agent.name}" restarted`,
agentAuditMetadata(req, agent, {
result: { status: "running" },
- })
+ }),
);
res.json({ success: true });
} catch (e) {
@@ -1663,17 +1732,19 @@ router.post("/:id/restart", async (req, res) => {
router.post("/:id/redeploy", async (req, res) => {
try {
const requestBody = req.body || {};
- const result = await db.query(
- "SELECT * FROM agents WHERE id = $1 AND user_id = $2",
- [req.params.id, req.user.id]
- );
+ const result = await db.query("SELECT * FROM agents WHERE id = $1 AND user_id = $2", [
+ req.params.id,
+ req.user.id,
+ ]);
const agent = result.rows[0];
if (!agent) return res.status(404).json({ error: "Agent not found" });
res.locals.auditContext = buildAgentContext(agent, {
ownerEmail: req.user.email || null,
});
if (!["warning", "error", "stopped"].includes(agent.status)) {
- return res.status(400).json({ error: "Agent must be in warning, error, or stopped state to redeploy" });
+ return res
+ .status(400)
+ .json({ error: "Agent must be in warning, error, or stopped state to redeploy" });
}
const runtimeFamily = normalizeRequestedRuntimeFamily(requestBody.runtime_family);
@@ -1734,13 +1805,10 @@ router.post("/:id/redeploy", async (req, res) => {
runtimeFields.sandbox_profile,
containerName,
image,
- ]
+ ],
);
- await db.query(
- "INSERT INTO deployments(agent_id, status) VALUES($1, 'queued')",
- [agent.id]
- );
+ await db.query("INSERT INTO deployments(agent_id, status) VALUES($1, 'queued')", [agent.id]);
await addDeploymentJob({
id: agent.id,
@@ -1764,7 +1832,7 @@ router.post("/:id/redeploy", async (req, res) => {
deployTarget: runtimeFields.deploy_target,
sandboxProfile: runtimeFields.sandbox_profile,
},
- })
+ }),
);
res.json({ success: true, status: "queued" });
diff --git a/backend-api/routes/clawhub.ts b/backend-api/routes/clawhub.ts
new file mode 100644
index 0000000..65d8e52
--- /dev/null
+++ b/backend-api/routes/clawhub.ts
@@ -0,0 +1,321 @@
+// @ts-nocheck
+const express = require("express");
+const { getSkillDetail, listSkills, searchSkills } = require("../clawhubClient");
+const {
+ addClawhubInstallJob,
+ findInFlightClawhubInstallJob,
+ getClawhubInstallJobStatus,
+} = require("../redisQueue");
+const db = require("../db");
+const { runContainerCommand } = require("../authSync");
+
+const router = express.Router();
+const OPENCLAW_WORKSPACE_PATH = "/root/.openclaw/workspace";
+const CLAWHUB_LOCKFILE_PATH = `${OPENCLAW_WORKSPACE_PATH}/.clawhub/lock.json`;
+const CLAWHUB_INSTALL_TIMEOUT_MS = (() => {
+ const parsed = Number.parseInt(process.env.CLAWHUB_INSTALL_TIMEOUT_MS, 10);
+ return Number.isFinite(parsed) && parsed >= 60000 ? parsed : 300000;
+})();
+
+function parseLimit(value, fallback = 20) {
+ const parsed = Number.parseInt(Array.isArray(value) ? value[0] : value, 10);
+ if (!Number.isFinite(parsed)) return fallback;
+ return Math.min(50, Math.max(1, parsed));
+}
+
+function sendClawhubError(res, error) {
+ if (error?.statusCode === 404) {
+ return res.status(404).json({
+ error: "skill_not_found",
+ message: error.message || "No skill found with slug: unknown",
+ });
+ }
+
+ if (error?.statusCode === 400 && error?.code === "missing_query") {
+ return res.status(400).json({
+ error: "missing_query",
+ message: error.message || "q is required.",
+ });
+ }
+
+ if (error?.statusCode === 502 || error?.code === "clawhub_unavailable") {
+ return res.status(502).json({
+ error: "clawhub_unavailable",
+ message: "Could not reach ClawHub registry.",
+ });
+ }
+
+ const statusCode = error?.statusCode || 500;
+ return res.status(statusCode).json({
+ error: error?.code || error?.message || "Unexpected error",
+ message: error?.message || "Unexpected error",
+ });
+}
+
+function normalizeInstalledSkillsLockfile(parsed) {
+ const skills = parsed?.skills;
+ if (!skills || typeof skills !== "object" || Array.isArray(skills)) {
+ return [];
+ }
+
+ return Object.entries(skills)
+ .map(([slug, entry]) => ({
+ slug,
+ version:
+ entry && typeof entry === "object" && typeof entry.version === "string"
+ ? entry.version
+ : "",
+ }))
+ .filter((entry) => entry.slug && entry.version);
+}
+
+function validateInstallableAgent(agent) {
+ if (!agent) {
+ const error = new Error("agent_not_found");
+ error.statusCode = 404;
+ error.code = "agent_not_found";
+ throw error;
+ }
+
+ if (agent.backend_type !== "docker" || agent.runtime_family !== "openclaw") {
+ const error = new Error(
+ "ClawHub installs are only available for Docker-backed OpenClaw agents.",
+ );
+ error.statusCode = 409;
+ error.code = "unsupported_runtime";
+ throw error;
+ }
+
+ if (agent.status !== "running" && agent.status !== "warning") {
+ const error = new Error("Start the agent before installing skills.");
+ error.statusCode = 409;
+ error.code = "container_not_running";
+ throw error;
+ }
+
+ if (!agent.container_id) {
+ const error = new Error("Start the agent before installing skills.");
+ error.statusCode = 409;
+ error.code = "container_not_running";
+ throw error;
+ }
+}
+
+function normalizeSavedSkillEntry(slug, input = {}) {
+ const installSlug = typeof slug === "string" ? slug.trim() : "";
+ if (!installSlug) return null;
+
+ const author = typeof input.author === "string" ? input.author.trim() : "";
+ const pagePath =
+ typeof input.pagePath === "string" && input.pagePath.trim()
+ ? input.pagePath.trim()
+ : author
+ ? `${author}/${installSlug}`
+ : installSlug;
+ const installedAtRaw = typeof input.installedAt === "string" ? input.installedAt.trim() : "";
+ const installedAt =
+ installedAtRaw && !Number.isNaN(new Date(installedAtRaw).getTime())
+ ? new Date(installedAtRaw).toISOString()
+ : new Date().toISOString();
+
+ return {
+ source: "clawhub",
+ installSlug,
+ author,
+ pagePath,
+ installedAt,
+ };
+}
+
+function sendInstallError(res, error) {
+ if (error?.statusCode === 404 || error?.code === "agent_not_found") {
+ return res.status(404).json({ error: "agent_not_found" });
+ }
+
+ if (error?.code === "container_not_running") {
+ return res.status(409).json({
+ error: "container_not_running",
+ message: "Start the agent before installing skills.",
+ });
+ }
+
+ if (error?.code === "unsupported_runtime") {
+ return res.status(409).json({
+ error: "unsupported_runtime",
+ message: "ClawHub installs are only available for Docker-backed OpenClaw agents.",
+ });
+ }
+
+ if (error?.code === "npm_unavailable") {
+ return res.status(422).json({
+ error: "npm_unavailable",
+ message: "The clawhub CLI could not be installed. Ensure Node.js is in your base image.",
+ });
+ }
+
+ return res.status(error?.statusCode || 500).json({
+ error: error?.code || "install_failed",
+ message: error?.message || "Unexpected error",
+ });
+}
+
+async function loadOwnedAgent(agentId, userId) {
+ const result = await db.query(
+ `SELECT id, user_id, name, status, host, container_id, backend_type, runtime_family,
+ deploy_target, sandbox_profile, clawhub_skills
+ FROM agents
+ WHERE id = $1 AND user_id = $2
+ LIMIT 1`,
+ [agentId, userId],
+ );
+ return result.rows[0] || null;
+}
+
+router.get("/skills", async (req, res) => {
+ try {
+ const limit = parseLimit(req.query.limit, 20);
+ const cursor =
+ typeof req.query.cursor === "string" && req.query.cursor.trim()
+ ? req.query.cursor.trim()
+ : null;
+ res.json(await listSkills({ limit, cursor }));
+ } catch (error) {
+ sendClawhubError(res, error);
+ }
+});
+
+router.get("/skills/search", async (req, res) => {
+ try {
+ const q = typeof req.query.q === "string" ? req.query.q.trim() : "";
+ if (!q) {
+ return res.status(400).json({
+ error: "missing_query",
+ message: "q is required.",
+ });
+ }
+
+ const limit = parseLimit(req.query.limit, 20);
+ res.json(await searchSkills({ q, limit }));
+ } catch (error) {
+ sendClawhubError(res, error);
+ }
+});
+
+router.get("/skills/:slug", async (req, res) => {
+ try {
+ const slug = typeof req.params.slug === "string" ? req.params.slug.trim() : "";
+ if (!slug) {
+ return res.status(404).json({
+ error: "skill_not_found",
+ message: "No skill found with slug: unknown",
+ });
+ }
+
+ res.json(await getSkillDetail(slug));
+ } catch (error) {
+ sendClawhubError(res, error);
+ }
+});
+
+router.get("/agents/:agentId/skills", async (req, res) => {
+ try {
+ const agent = await loadOwnedAgent(req.params.agentId, req.user.id);
+ validateInstallableAgent(agent);
+ const { output } = await runContainerCommand(
+ agent,
+ `if [ -f ${JSON.stringify(CLAWHUB_LOCKFILE_PATH)} ]; then cat ${JSON.stringify(
+ CLAWHUB_LOCKFILE_PATH,
+ )}; else printf '{"version":1,"skills":{}}'; fi`,
+ );
+ const parsed = JSON.parse(output || '{"version":1,"skills":{}}');
+ return res.json({
+ skills: normalizeInstalledSkillsLockfile(parsed),
+ });
+ } catch (error) {
+ return sendInstallError(res, error);
+ }
+});
+
+router.post("/agents/:agentId/skills/:slug/install", async (req, res) => {
+ try {
+ const agent = await loadOwnedAgent(req.params.agentId, req.user.id);
+ validateInstallableAgent(agent);
+ const slug = typeof req.params.slug === "string" ? req.params.slug.trim() : "";
+ if (!slug) {
+ return res.status(404).json({
+ error: "skill_not_found",
+ message: "No skill found with slug: unknown",
+ });
+ }
+
+ const skillEntry = normalizeSavedSkillEntry(slug, req.body || {});
+ const existingSavedSkills = Array.isArray(agent.clawhub_skills) ? agent.clawhub_skills : [];
+ const existingSaved = existingSavedSkills.some((entry) => {
+ const savedSlug = typeof entry?.installSlug === "string" ? entry.installSlug : entry?.slug;
+ return String(savedSlug || "").trim() === slug;
+ });
+
+ try {
+ await runContainerCommand(
+ agent,
+ "if command -v clawhub >/dev/null 2>&1; then exit 0; fi; " +
+ "if ! command -v npm >/dev/null 2>&1; then exit 42; fi; " +
+ "npm install -g clawhub",
+ { timeout: CLAWHUB_INSTALL_TIMEOUT_MS },
+ );
+ } catch (error) {
+ if (String(error?.message || "").includes("exit 42")) {
+ const npmError = new Error(
+ "The clawhub CLI could not be installed. Ensure Node.js is in your base image.",
+ );
+ npmError.statusCode = 422;
+ npmError.code = "npm_unavailable";
+ throw npmError;
+ }
+ throw error;
+ }
+
+ const existingJob = await findInFlightClawhubInstallJob(agent.id, slug);
+ if (existingJob) {
+ const existingStatus = await getClawhubInstallJobStatus(existingJob.id);
+ return res.status(202).json({
+ jobId: String(existingJob.id),
+ agentId: agent.id,
+ slug,
+ status: existingStatus?.status || "pending",
+ });
+ }
+
+ const job = await addClawhubInstallJob({
+ agentId: agent.id,
+ slug,
+ skillEntry,
+ persistOnSuccess: !existingSaved,
+ });
+
+ return res.status(202).json({
+ jobId: String(job.id),
+ agentId: agent.id,
+ slug,
+ status: "pending",
+ });
+ } catch (error) {
+ return sendInstallError(res, error);
+ }
+});
+
+router.get("/jobs/:jobId", async (req, res) => {
+ const jobId = typeof req.params.jobId === "string" ? req.params.jobId.trim() : "";
+ if (!jobId) {
+ return res.status(404).json({ error: "job_not_found" });
+ }
+
+ const status = await getClawhubInstallJobStatus(jobId);
+ if (!status) {
+ return res.status(404).json({ error: "job_not_found" });
+ }
+
+ return res.json(status);
+});
+
+module.exports = router;
diff --git a/backend-api/server.ts b/backend-api/server.ts
index c5fc7e5..c08e3cd 100644
--- a/backend-api/server.ts
+++ b/backend-api/server.ts
@@ -52,10 +52,14 @@ if (!process.env.JWT_SECRET) {
if (IS_TEST_ENV) {
process.env.JWT_SECRET = "secret";
} else if (process.env.NODE_ENV === "production") {
- console.error("FATAL: JWT_SECRET must be set in production. Refusing to start with an ephemeral secret.");
+ console.error(
+ "FATAL: JWT_SECRET must be set in production. Refusing to start with an ephemeral secret.",
+ );
process.exit(1);
} else {
- console.warn("SECURITY WARNING: JWT_SECRET not configured. Using ephemeral secret — all tokens will invalidate on restart. Set JWT_SECRET in .env.");
+ console.warn(
+ "SECURITY WARNING: JWT_SECRET not configured. Using ephemeral secret — all tokens will invalidate on restart. Set JWT_SECRET in .env.",
+ );
process.env.JWT_SECRET = crypto.randomBytes(32).toString("hex");
}
}
@@ -93,10 +97,7 @@ function requestProtocol(req) {
return req.protocol;
}
-function getEmbedSessionCookieName(
- agentId,
- prefix = EMBED_SESSION_COOKIE_PREFIX
-) {
+function getEmbedSessionCookieName(agentId, prefix = EMBED_SESSION_COOKIE_PREFIX) {
return `${prefix}${agentId}`;
}
@@ -251,7 +252,7 @@ function injectEmbedBootstrapScript(html, agentId) {
const bootstrapSrc = `/api/agents/${encodeURIComponent(agentId)}/gateway/embed/bootstrap.js`;
return html.replace(
/
]*>/i,
- (match) => `${match} `
+ (match) => `${match} `,
);
}
@@ -279,18 +280,12 @@ function rewriteHermesEmbedHtml(html, agentId) {
return html
.replace(/(["'])\/assets\//g, `$1${embedBase}/assets/`)
.replace(/(["'])\/fonts\//g, `$1${embedBase}/fonts/`)
- .replace(
- /(["'])\/favicon\.ico(["'])/g,
- `$1${embedBase}/favicon.ico$2`
- );
+ .replace(/(["'])\/favicon\.ico(["'])/g, `$1${embedBase}/favicon.ico$2`);
}
function rewriteHermesEmbedCss(css, agentId) {
const embedBase = hermesEmbedBasePath(agentId);
- return css.replace(
- /url\((['"]?)\/fonts\//g,
- `url($1${embedBase}/fonts/`
- );
+ return css.replace(/url\((['"]?)\/fonts\//g, `url($1${embedBase}/fonts/`);
}
function rewriteHermesEmbedJavascript(source, agentId) {
@@ -300,7 +295,7 @@ function rewriteHermesEmbedJavascript(source, agentId) {
if (rewritten.includes(routerMarker)) {
rewritten = rewritten.replace(
routerMarker,
- `jsx($y,{basename:${JSON.stringify(embedBase)},children:`
+ `jsx($y,{basename:${JSON.stringify(embedBase)},children:`,
);
}
return rewritten;
@@ -328,9 +323,13 @@ async function lookupEmbedAgent(agentId, userId) {
`SELECT host, gateway_token, gateway_host_port, gateway_host, gateway_port, status
FROM agents
WHERE id = $1 AND user_id = $2`,
- [agentId, userId]
+ [agentId, userId],
);
- if (!result.rows[0] || !isGatewayAvailableStatus(result.rows[0].status) || !hasGatewayEndpoint(result.rows[0])) {
+ if (
+ !result.rows[0] ||
+ !isGatewayAvailableStatus(result.rows[0].status) ||
+ !hasGatewayEndpoint(result.rows[0])
+ ) {
return null;
}
return result.rows[0];
@@ -341,7 +340,7 @@ async function lookupHermesEmbedAgent(agentId, userId) {
`SELECT host, runtime_host, runtime_port, status, runtime_family, backend_type
FROM agents
WHERE id = $1 AND user_id = $2`,
- [agentId, userId]
+ [agentId, userId],
);
if (
!result.rows[0] ||
@@ -361,7 +360,7 @@ async function resolveEmbedAccess(
lookupAgent = lookupEmbedAgent,
cookiePrefix = EMBED_SESSION_COOKIE_PREFIX,
scope = "gateway-embed",
- } = {}
+ } = {},
) {
const jwt = require("jsonwebtoken");
const agentId = req.params.agentId;
@@ -416,11 +415,9 @@ async function resolveEmbedAccess(
}
if (!relayToken) {
- relayToken = jwt.sign(
- { id: userId, agentId, scope },
- process.env.JWT_SECRET,
- { expiresIn: Math.floor(EMBED_SESSION_TTL_MS / 1000) }
- );
+ relayToken = jwt.sign({ id: userId, agentId, scope }, process.env.JWT_SECRET, {
+ expiresIn: Math.floor(EMBED_SESSION_TTL_MS / 1000),
+ });
res.cookie(embedCookieName, relayToken, {
httpOnly: true,
sameSite: "lax",
@@ -447,13 +444,22 @@ function getEmbeddedHermesPath(req) {
return suffix.replace(/^\/+/, "");
}
-const corsOrigins = (process.env.CORS_ORIGINS || process.env.NEXTAUTH_URL || "http://localhost:8080")
+const corsOrigins = (
+ process.env.CORS_ORIGINS ||
+ process.env.NEXTAUTH_URL ||
+ "http://localhost:8080"
+)
.split(",")
- .map(s => s.trim())
+ .map((s) => s.trim())
.filter(Boolean);
app.use(cors({ origin: corsOrigins }));
-const globalLimiter = rateLimit({ windowMs: 15 * 60 * 1000, max: 1000, standardHeaders: true, legacyHeaders: false });
+const globalLimiter = rateLimit({
+ windowMs: 15 * 60 * 1000,
+ max: 1000,
+ standardHeaders: true,
+ legacyHeaders: false,
+});
app.use(globalLimiter);
// Stripe webhook needs raw body — must come before express.json()
@@ -498,8 +504,7 @@ app.get("/config/platform", async (_req, res) => {
});
res.json({
mode: billing.PLATFORM_MODE,
- selfhosted:
- billing.PLATFORM_MODE !== "paas" ? billing.SELFHOSTED_LIMITS : null,
+ selfhosted: billing.PLATFORM_MODE !== "paas" ? billing.SELFHOSTED_LIMITS : null,
billingEnabled: billing.BILLING_ENABLED,
enabledBackends: getEnabledBackends(),
defaultBackend: getDefaultBackend(),
@@ -581,15 +586,7 @@ app.use("/auth", require("./routes/auth"));
// internal gateway API/config endpoints.
const gatewayUIAssetProxy = require("express").Router();
const PREAUTH_ASSET_METHODS = new Set(["GET", "HEAD"]);
-const EMBED_PROXY_METHODS = new Set([
- "DELETE",
- "GET",
- "HEAD",
- "OPTIONS",
- "PATCH",
- "POST",
- "PUT",
-]);
+const EMBED_PROXY_METHODS = new Set(["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"]);
gatewayUIAssetProxy.use("/agents/:agentId/gateway", (req, res, next) => {
if (!PREAUTH_ASSET_METHODS.has(req.method)) return next();
@@ -618,15 +615,19 @@ gatewayUIAssetProxy.get("/agents/:agentId/gateway/embed/bootstrap.js", async (re
res.setHeader("Referrer-Policy", "no-referrer");
res.setHeader("X-Content-Type-Options", "nosniff");
res.setHeader("Vary", "Cookie");
- res.send(buildEmbedBootstrapScript({
- agentId: access.agentId,
- requestHost: req.headers.host,
- requestScheme: requestProtocol(req),
- gatewayToken: access.agent.gateway_token,
- }));
+ res.send(
+ buildEmbedBootstrapScript({
+ agentId: access.agentId,
+ requestHost: req.headers.host,
+ requestScheme: requestProtocol(req),
+ gatewayToken: access.agent.gateway_token,
+ }),
+ );
} catch (err) {
console.error("[gateway-embed-bootstrap] error:", err);
- if (!res.headersSent) res.status(502).send(`embed bootstrap error: ${err.message}`);
+ if (!res.headersSent) {
+ res.status(502).type("text/plain").send("embed bootstrap error");
+ }
}
});
@@ -744,14 +745,8 @@ async function proxyEmbeddedHermes(req, res) {
return;
}
- if (
- /(?:javascript|ecmascript)/i.test(contentType) ||
- /\.js(?:$|\?)/i.test(hermesPath)
- ) {
- const javascript = rewriteHermesEmbedJavascript(
- await resp.text(),
- access.agentId
- );
+ if (/(?:javascript|ecmascript)/i.test(contentType) || /\.js(?:$|\?)/i.test(hermesPath)) {
+ const javascript = rewriteHermesEmbedJavascript(await resp.text(), access.agentId);
setProxyResponseHeaders(res, resp, {
cachePolicy: isApiRequest ? "no-store" : "asset",
});
@@ -795,16 +790,20 @@ async function proxyGatewayAsset(req, res) {
`SELECT host, gateway_host_port, gateway_host, gateway_port, status
FROM agents
WHERE id = $1`,
- [agentId]
+ [agentId],
);
- if (!result.rows[0] || !isGatewayAvailableStatus(result.rows[0].status) || !hasGatewayEndpoint(result.rows[0])) {
+ if (
+ !result.rows[0] ||
+ !isGatewayAvailableStatus(result.rows[0].status) ||
+ !hasGatewayEndpoint(result.rows[0])
+ ) {
return res.status(404).end();
}
const gatewayPath = req.path || "/";
const targetUrl = `${gatewayUrlForAgent(result.rows[0], gatewayPath)}${req._parsedUrl?.search || ""}`;
const resp = await fetch(targetUrl, {
method: req.method,
- headers: { "Accept": req.headers.accept || "*/*", "Accept-Encoding": "identity" },
+ headers: { Accept: req.headers.accept || "*/*", "Accept-Encoding": "identity" },
signal: AbortSignal.timeout(10000),
});
res.status(resp.status);
@@ -824,18 +823,19 @@ app.use(authenticateToken);
app.use(createGatewayRouter());
// ─── Protected Routes ─────────────────────────────────────────────
-app.use("/agents", require("./routes/agents"));
-app.use("/agents", require("./routes/agentFiles"));
-app.use("/agents", require("./routes/channels"));
-app.use("/agents", require("./routes/nemoclaw"));
+app.use("/agents", require("./routes/agents"));
+app.use("/agents", require("./routes/agentFiles"));
+app.use("/agents", require("./routes/channels"));
+app.use("/agents", require("./routes/nemoclaw"));
app.use("/agent-migrations", require("./routes/agentMigrations"));
-app.use("/", require("./routes/integrations")); // handles /agents/:id/integrations + /integrations/catalog
-app.use("/", require("./routes/monitoring")); // handles /monitoring/* + /agents/:id/metrics
+app.use("/", require("./routes/integrations")); // handles /agents/:id/integrations + /integrations/catalog
+app.use("/", require("./routes/monitoring")); // handles /monitoring/* + /agents/:id/metrics
app.use("/llm-providers", require("./routes/llmProviders"));
-app.use("/marketplace", require("./routes/marketplace"));
-app.use("/workspaces", require("./routes/workspaces"));
-app.use("/billing", require("./routes/billing"));
-app.use("/admin", require("./routes/admin"));
+app.use("/clawhub", require("./routes/clawhub"));
+app.use("/marketplace", require("./routes/marketplace"));
+app.use("/workspaces", require("./routes/workspaces"));
+app.use("/billing", require("./routes/billing"));
+app.use("/admin", require("./routes/admin"));
// ─── Central Error Handler ────────────────────────────────────────
app.use(errorHandler);
@@ -995,7 +995,7 @@ async function migrateDB() {
`DO $$ BEGIN ALTER TABLE platform_settings ADD COLUMN system_banner_severity TEXT NOT NULL DEFAULT 'warning'; EXCEPTION WHEN duplicate_column THEN NULL; END $$`,
`DO $$ BEGIN ALTER TABLE platform_settings ADD COLUMN system_banner_title TEXT NOT NULL DEFAULT ''; EXCEPTION WHEN duplicate_column THEN NULL; END $$`,
`DO $$ BEGIN ALTER TABLE platform_settings ADD COLUMN system_banner_message TEXT NOT NULL DEFAULT ''; EXCEPTION WHEN duplicate_column THEN NULL; END $$`,
- `CREATE TABLE IF NOT EXISTS usage_metrics (
+ `CREATE TABLE IF NOT EXISTS usage_metrics (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
agent_id UUID REFERENCES agents(id) ON DELETE CASCADE,
user_id UUID REFERENCES users(id) ON DELETE CASCADE,
@@ -1035,6 +1035,8 @@ async function migrateDB() {
`DO $$ BEGIN ALTER TABLE agents ADD COLUMN image TEXT; EXCEPTION WHEN duplicate_column THEN NULL; END $$`,
`DO $$ BEGIN ALTER TABLE agents ADD COLUMN template_payload JSONB DEFAULT '{}'; EXCEPTION WHEN duplicate_column THEN NULL; END $$`,
`UPDATE agents SET template_payload = '{}'::jsonb WHERE template_payload IS NULL`,
+ `DO $$ BEGIN ALTER TABLE agents ADD COLUMN clawhub_skills JSONB DEFAULT '[]'; EXCEPTION WHEN duplicate_column THEN NULL; END $$`,
+ `UPDATE agents SET clawhub_skills = '[]'::jsonb WHERE clawhub_skills IS NULL`,
`CREATE TABLE IF NOT EXISTS agent_migrations (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
user_id UUID REFERENCES users(id) ON DELETE CASCADE,
@@ -1164,9 +1166,7 @@ function stableStringify(value) {
async function seedStarterMarketplace() {
for (const template of STARTER_TEMPLATES) {
- const existingListing = await marketplace.getPlatformListingByTemplateKey(
- template.templateKey
- );
+ const existingListing = await marketplace.getPlatformListingByTemplateKey(template.templateKey);
let snapshotId = existingListing?.snapshot_id || null;
let shouldCreateSnapshot = !snapshotId;
@@ -1181,8 +1181,7 @@ async function seedStarterMarketplace() {
!currentSnapshot ||
currentSnapshot.name !== template.name ||
currentSnapshot.description !== template.description ||
- stableStringify(currentConfig) !==
- stableStringify(template.snapshotConfig);
+ stableStringify(currentConfig) !== stableStringify(template.snapshotConfig);
}
if (shouldCreateSnapshot) {
@@ -1195,7 +1194,7 @@ async function seedStarterMarketplace() {
kind: template.snapshotConfig.kind || "starter-template",
templateKey: template.templateKey,
builtIn: true,
- }
+ },
);
snapshotId = snapshot.id;
}
@@ -1228,7 +1227,11 @@ if (require.main === module) {
const server = app.listen(PORT, async () => {
console.log(`api running on ${PORT}`);
- try { await migrateDB(); } catch (e) { console.error("DB migration error:", e.message); }
+ try {
+ await migrateDB();
+ } catch (e) {
+ console.error("DB migration error:", e.message);
+ }
// Seed bootstrap admin account on first boot only when explicit secure credentials are provided.
try {
@@ -1240,29 +1243,43 @@ if (require.main === module) {
});
if (!bootstrapAdmin.shouldSeed) {
- console.warn("Skipping bootstrap admin seed: set explicit DEFAULT_ADMIN_EMAIL and a non-default DEFAULT_ADMIN_PASSWORD with at least 12 characters.");
+ console.warn(
+ "Skipping bootstrap admin seed: set explicit DEFAULT_ADMIN_EMAIL and a non-default DEFAULT_ADMIN_PASSWORD with at least 12 characters.",
+ );
} else {
const bcrypt = require("bcryptjs");
const hash = await bcrypt.hash(bootstrapAdmin.password, 10);
await db.query(
"INSERT INTO users(email, password_hash, role, name) VALUES($1, $2, 'admin', 'Admin') ON CONFLICT DO NOTHING",
- [bootstrapAdmin.email, hash]
+ [bootstrapAdmin.email, hash],
);
console.log(`Bootstrap admin account created: ${bootstrapAdmin.email}`);
}
}
- } catch (e) { console.error("Failed to seed admin account:", e.message); }
+ } catch (e) {
+ console.error("Failed to seed admin account:", e.message);
+ }
try {
const promotedUser = await ensureFirstRegisteredUserIsAdmin(db);
if (promotedUser) {
console.log(`Promoted first registered user to admin: ${promotedUser.email}`);
}
- } catch (e) { console.error("Failed to ensure an admin user exists:", e.message); }
+ } catch (e) {
+ console.error("Failed to ensure an admin user exists:", e.message);
+ }
- try { await integrations.seedCatalog(); } catch (e) { console.error("Failed to seed integration catalog:", e.message); }
+ try {
+ await integrations.seedCatalog();
+ } catch (e) {
+ console.error("Failed to seed integration catalog:", e.message);
+ }
- try { await seedStarterMarketplace(); } catch (e) { console.error("Failed to seed marketplace:", e.message); }
+ try {
+ await seedStarterMarketplace();
+ } catch (e) {
+ console.error("Failed to seed marketplace:", e.message);
+ }
_startupComplete = true;
console.log("Startup complete — health check now returning ok");
diff --git a/backend-api/starterTemplates.js b/backend-api/starterTemplates.js
new file mode 100644
index 0000000..d01fc4b
--- /dev/null
+++ b/backend-api/starterTemplates.js
@@ -0,0 +1,97 @@
+const fs = require("fs");
+const path = require("path");
+const { encodeContentBase64, normalizeTemplatePayload } = require("./agentPayloads");
+const { getDefaultAgentImage } = require("../agent-runtime/lib/agentImages");
+const { getDefaultBackend } = require("../agent-runtime/lib/backendCatalog");
+
+const TEMPLATES_DIR = path.join(__dirname, "marketplace-templates");
+const CORE_FILES = [
+ "AGENTS.md",
+ "SOUL.md",
+ "TOOLS.md",
+ "IDENTITY.md",
+ "USER.md",
+ "HEARTBEAT.md",
+ "MEMORY.md",
+ "BOOTSTRAP.md",
+];
+
+function textFile(filePath, content) {
+ return {
+ path: filePath,
+ contentBase64: encodeContentBase64(content.trim() + "\n"),
+ };
+}
+
+function buildStarterPayload(coreFiles, metadata = {}) {
+ return normalizeTemplatePayload({
+ files: coreFiles,
+ memoryFiles: [],
+ wiring: { channels: [], integrations: [] },
+ metadata,
+ });
+}
+
+function buildSnapshotConfig(templateKey, payload, defaults = {}) {
+ const backend = defaults.backend || getDefaultBackend(process.env, { sandbox: "standard" });
+ return {
+ kind: "starter-template",
+ templateKey,
+ builtIn: true,
+ defaults: {
+ backend,
+ sandbox: "standard",
+ vcpu: 2,
+ ram_mb: 2048,
+ disk_gb: 20,
+ image:
+ defaults.image ||
+ getDefaultAgentImage({
+ sandbox: "standard",
+ backend,
+ }),
+ },
+ templatePayload: payload,
+ };
+}
+
+function loadTemplatesFromDisk() {
+ const entries = fs.readdirSync(TEMPLATES_DIR, { withFileTypes: true });
+ const templates = [];
+
+ for (const entry of entries) {
+ if (!entry.isDirectory()) continue;
+
+ const dir = path.join(TEMPLATES_DIR, entry.name);
+ const manifestPath = path.join(dir, "manifest.json");
+ if (!fs.existsSync(manifestPath)) continue;
+
+ const manifest = JSON.parse(fs.readFileSync(manifestPath, "utf8"));
+ const { templateKey, name, description, price, category, starterType } = manifest;
+ if (!templateKey) continue;
+
+ const coreFiles = CORE_FILES.filter((f) => fs.existsSync(path.join(dir, f))).map((f) =>
+ textFile(f, fs.readFileSync(path.join(dir, f), "utf8")),
+ );
+
+ const payload = buildStarterPayload(coreFiles, { starterType });
+
+ templates.push({
+ templateKey,
+ name,
+ description,
+ price,
+ category,
+ payload,
+ snapshotConfig: buildSnapshotConfig(templateKey, payload),
+ });
+ }
+
+ return templates;
+}
+
+const STARTER_TEMPLATES = loadTemplatesFromDisk();
+
+module.exports = {
+ STARTER_TEMPLATES,
+};
diff --git a/docker-compose.e2e.yml b/docker-compose.e2e.yml
index 348b64d..1440143 100644
--- a/docker-compose.e2e.yml
+++ b/docker-compose.e2e.yml
@@ -47,6 +47,7 @@ services:
context: .
dockerfile: backend-api/Dockerfile
volumes:
+ - ./workers:/workers:ro
- ./agent-runtime:/agent-runtime:ro
extra_hosts:
- "host.docker.internal:host-gateway"
diff --git a/docker-compose.yml b/docker-compose.yml
index 2fdc504..68ea1ca 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -58,6 +58,7 @@ services:
working_dir: /app
volumes:
- ./backend-api:/app
+ - ./workers:/workers:ro
- ./workers/provisioner/backends:/app/backends
- ./agent-runtime:/agent-runtime:ro
- ./tsconfig.base.json:/tsconfig.base.json:ro
@@ -92,7 +93,7 @@ services:
- ./backend-api:/backend-api:ro
- ./tsconfig.base.json:/tsconfig.base.json:ro
- /var/run/docker.sock:/var/run/docker.sock
- command: sh -c "npm install && npm start"
+ command: sh -c "npm install --no-audit --no-fund --no-update-notifier --loglevel=error && npm start"
environment:
DEPLOYMENT_WORKER_CONCURRENCY: ${DEPLOYMENT_WORKER_CONCURRENCY:-6}
extra_hosts:
@@ -114,6 +115,8 @@ services:
postgres:
image: postgres:15
restart: always
+ ports:
+ - "5433:5432"
environment:
POSTGRES_USER: ${DB_USER:-nora}
POSTGRES_PASSWORD: ${DB_PASSWORD:-nora}
diff --git a/frontend-dashboard/components/agents/OpenClawTab.tsx b/frontend-dashboard/components/agents/OpenClawTab.tsx
index 323335f..6478a74 100644
--- a/frontend-dashboard/components/agents/OpenClawTab.tsx
+++ b/frontend-dashboard/components/agents/OpenClawTab.tsx
@@ -3,6 +3,7 @@ import { MessageSquare, Radio, CalendarClock, Puzzle, MonitorPlay } from "lucide
import StatusPanel from "./openclaw/StatusPanel";
import ChatPanel from "./openclaw/ChatPanel";
import IntegrationsTab from "./IntegrationsTab";
+import ClawHubTab from "./openclaw/ClawHubTab";
import CronPanel from "./openclaw/CronPanel";
import OpenClawUIPanel from "./openclaw/OpenClawUIPanel";
@@ -10,11 +11,17 @@ const subTabs = [
{ id: "status", label: "Status", icon: Radio },
{ id: "chat", label: "Chat", icon: MessageSquare },
{ id: "integrations", label: "Integrations", icon: Puzzle },
+ { id: "clawhub", label: "ClawHub", icon: Puzzle },
{ id: "cron", label: "Cron", icon: CalendarClock },
{ id: "ui", label: "UI", icon: MonitorPlay },
];
-export default function OpenClawTab({ agentId, agentStatus }) {
+export default function OpenClawTab({
+ agentId,
+ agentStatus,
+ agentContainerId,
+ onClawhubInstallSuccess,
+}) {
const [activeSubTab, setActiveSubTab] = useState("status");
if (agentStatus !== "running" && agentStatus !== "warning") {
@@ -61,6 +68,13 @@ export default function OpenClawTab({ agentId, agentStatus }) {
{activeSubTab === "status" && }
{activeSubTab === "chat" && }
{activeSubTab === "integrations" && }
+ {activeSubTab === "clawhub" && (
+
+ )}
{activeSubTab === "cron" && }
{activeSubTab === "ui" && }
diff --git a/frontend-dashboard/components/agents/openclaw/ClawHubTab.tsx b/frontend-dashboard/components/agents/openclaw/ClawHubTab.tsx
new file mode 100644
index 0000000..4f04e02
--- /dev/null
+++ b/frontend-dashboard/components/agents/openclaw/ClawHubTab.tsx
@@ -0,0 +1,530 @@
+import { useEffect, useMemo, useRef, useState } from "react";
+import { Boxes, RefreshCw } from "lucide-react";
+import { useToast } from "../../Toast";
+import { fetchWithAuth } from "../../../lib/api";
+import SkillDetailPanel, { SkillDetail, SkillDetailActionState } from "./SkillDetailPanel";
+import SkillGrid from "./SkillGrid";
+import SkillSearchBar from "./SkillSearchBar";
+import SkillSelectionTray from "./SkillSelectionTray";
+import { SkillSummary } from "./SkillCard";
+import { DeployClawHubSkill } from "../../../lib/clawhubDeploy";
+
+type ClawHubTabProps = {
+ agentId: string;
+ refreshToken?: string | null;
+ onInstallSuccess?: () => void;
+};
+
+type SkillListResponse = {
+ skills?: SkillSummary[];
+ cursor?: string | null;
+ error?: string;
+ message?: string;
+};
+
+type InstalledSkill = {
+ slug: string;
+ version: string;
+};
+
+type InstalledSkillsResponse = {
+ skills?: InstalledSkill[];
+ error?: string;
+ message?: string;
+};
+
+type InstallJobResponse = {
+ jobId: string;
+ agentId: string;
+ slug: string;
+ status: "pending" | "running" | "success" | "failed";
+};
+
+type InstallJobStatus = {
+ jobId: string;
+ agentId: string;
+ slug: string;
+ status: "pending" | "running" | "success" | "failed";
+ error: string | null;
+ completedAt: string | null;
+};
+
+function buildSelectedSkill(detail: SkillDetail): DeployClawHubSkill {
+ return {
+ source: "clawhub",
+ installSlug: detail.slug,
+ author: detail.author || "",
+ pagePath: detail.pagePath || (detail.author ? `${detail.author}/${detail.slug}` : detail.slug),
+ installedAt: new Date().toISOString(),
+ name: detail.name,
+ description: detail.description,
+ };
+}
+
+export default function ClawHubTab({ agentId, refreshToken, onInstallSuccess }: ClawHubTabProps) {
+ const toast = useToast();
+ const [query, setQuery] = useState("");
+ const [skills, setSkills] = useState([]);
+ const [loading, setLoading] = useState(true);
+ const [error, setError] = useState(null);
+ const [selectedSkill, setSelectedSkill] = useState(null);
+ const [selectedSkillDetail, setSelectedSkillDetail] = useState(null);
+ const [detailLoading, setDetailLoading] = useState(false);
+ const [detailError, setDetailError] = useState(null);
+ const [selectedSkills, setSelectedSkills] = useState([]);
+ const [selectionBusySlug, setSelectionBusySlug] = useState(null);
+ const [jobStatuses, setJobStatuses] = useState>({});
+ const [installError, setInstallError] = useState(null);
+ const [installedSkills, setInstalledSkills] = useState([]);
+ const requestIdRef = useRef(0);
+ const detailCacheRef = useRef>({});
+
+ const showingDefaultBrowseEmptyState = !query.trim() && !loading && !error && skills.length === 0;
+ const installedSlugs = useMemo(
+ () => new Set(installedSkills.map((skill) => skill.slug)),
+ [installedSkills],
+ );
+ const selectedSkillKeys = useMemo(
+ () => new Set(selectedSkills.map((skill) => `${skill.author}:${skill.installSlug}`)),
+ [selectedSkills],
+ );
+ const selectedSkillSlugs = useMemo(
+ () => new Set(selectedSkills.map((skill) => skill.installSlug)),
+ [selectedSkills],
+ );
+ const selectedCurrentSkill = selectedSkillDetail
+ ? selectedSkillKeys.has(`${selectedSkillDetail.author || ""}:${selectedSkillDetail.slug}`)
+ : false;
+ const activeInstallCount = useMemo(
+ () =>
+ Object.values(jobStatuses).filter(
+ (status) => status.status === "pending" || status.status === "running",
+ ).length,
+ [jobStatuses],
+ );
+
+ async function loadInstalledSkills() {
+ try {
+ const res = await fetchWithAuth(`/api/clawhub/agents/${agentId}/skills`);
+ const data: InstalledSkillsResponse = await res.json().catch(() => ({}));
+ if (!res.ok) {
+ throw new Error(data.message || data.error || "Could not load installed skills.");
+ }
+ setInstalledSkills(Array.isArray(data.skills) ? data.skills : []);
+ } catch (err: any) {
+ console.error(err);
+ }
+ }
+
+ async function loadBrowseResults() {
+ const requestId = ++requestIdRef.current;
+ setLoading(true);
+ setError(null);
+
+ try {
+ const res = await fetchWithAuth("/api/clawhub/skills");
+ const data: SkillListResponse = await res.json();
+ if (requestId !== requestIdRef.current) return;
+
+ if (!res.ok) {
+ throw new Error(
+ data.message || data.error || "Could not load skills. ClawHub may be unavailable.",
+ );
+ }
+
+ setSkills(Array.isArray(data.skills) ? data.skills : []);
+ } catch (err: any) {
+ if (requestId !== requestIdRef.current) return;
+ setSkills([]);
+ setError(err?.message || "Could not load skills. ClawHub may be unavailable.");
+ } finally {
+ if (requestId === requestIdRef.current) {
+ setLoading(false);
+ }
+ }
+ }
+
+ async function searchSkills() {
+ const trimmed = query.trim();
+ if (!trimmed) {
+ loadBrowseResults();
+ return;
+ }
+
+ const requestId = ++requestIdRef.current;
+ setLoading(true);
+ setError(null);
+
+ try {
+ const res = await fetchWithAuth(
+ `/api/clawhub/skills/search?q=${encodeURIComponent(trimmed)}`,
+ );
+ const data: SkillListResponse = await res.json();
+ if (requestId !== requestIdRef.current) return;
+
+ if (!res.ok) {
+ throw new Error(
+ data.message || data.error || "Could not load skills. ClawHub may be unavailable.",
+ );
+ }
+
+ setSkills(Array.isArray(data.skills) ? data.skills : []);
+ } catch (err: any) {
+ if (requestId !== requestIdRef.current) return;
+ setSkills([]);
+ setError(err?.message || "Could not load skills. ClawHub may be unavailable.");
+ } finally {
+ if (requestId === requestIdRef.current) {
+ setLoading(false);
+ }
+ }
+ }
+
+ async function fetchSkillDetail(skill: SkillSummary) {
+ const cached = detailCacheRef.current[skill.slug];
+ if (cached) {
+ return cached;
+ }
+
+ const res = await fetchWithAuth(`/api/clawhub/skills/${encodeURIComponent(skill.slug)}`);
+ const data = await res.json();
+
+ if (!res.ok) {
+ throw new Error(data.message || data.error || "Could not load skill details.");
+ }
+
+ detailCacheRef.current[skill.slug] = data;
+ return data as SkillDetail;
+ }
+
+ async function loadSkillDetail(skill: SkillSummary) {
+ setSelectedSkill(skill);
+ setSelectedSkillDetail(detailCacheRef.current[skill.slug] || null);
+ setDetailError(null);
+ setDetailLoading(true);
+
+ try {
+ const detail = await fetchSkillDetail(skill);
+ setSelectedSkill({
+ slug: detail.slug,
+ name: detail.name,
+ description: detail.description,
+ downloads: detail.downloads,
+ stars: detail.stars,
+ updatedAt: detail.updatedAt || null,
+ });
+ setSkills((current) =>
+ current.map((entry) =>
+ entry.slug === detail.slug
+ ? {
+ ...entry,
+ name: detail.name,
+ description: detail.description,
+ downloads: detail.downloads,
+ stars: detail.stars,
+ updatedAt: detail.updatedAt || entry.updatedAt,
+ }
+ : entry,
+ ),
+ );
+ setSelectedSkillDetail(detail);
+ } catch (err: any) {
+ setDetailError(err?.message || "Could not load skill details.");
+ } finally {
+ setDetailLoading(false);
+ }
+ }
+
+ function addSelectedSkill(detail: SkillDetail) {
+ const nextSkill = buildSelectedSkill(detail);
+ const nextKey = `${nextSkill.author}:${nextSkill.installSlug}`;
+ setSelectedSkills((current) => {
+ if (current.some((skill) => `${skill.author}:${skill.installSlug}` === nextKey)) {
+ return current;
+ }
+ return [...current, nextSkill];
+ });
+ }
+
+ function removeSelectedSkill(skill: SkillSummary | DeployClawHubSkill | SkillDetail) {
+ const installSlug = "installSlug" in skill ? skill.installSlug : skill.slug;
+ const author = "author" in skill ? skill.author || "" : "";
+ setSelectedSkills((current) =>
+ current.filter((entry) => !(entry.installSlug === installSlug && entry.author === author)),
+ );
+ }
+
+ function removeSelectedSkillBySlug(slug: string) {
+ setSelectedSkills((current) => current.filter((entry) => entry.installSlug !== slug));
+ }
+
+ function clearSelectedSkills() {
+ setSelectedSkills([]);
+ }
+
+ async function toggleSkillSelection(skill: SkillSummary) {
+ const cached = detailCacheRef.current[skill.slug];
+ const cachedKey = `${cached?.author || ""}:${skill.slug}`;
+ if (cached && selectedSkillKeys.has(cachedKey)) {
+ removeSelectedSkill(cached);
+ return;
+ }
+
+ setSelectionBusySlug(skill.slug);
+ try {
+ const detail = cached || (await fetchSkillDetail(skill));
+ const detailKey = `${detail.author || ""}:${detail.slug}`;
+ if (selectedSkillKeys.has(detailKey)) {
+ removeSelectedSkill(detail);
+ } else {
+ addSelectedSkill(detail);
+ }
+ } catch (err: any) {
+ toast.error(err?.message || "Could not update that selection.");
+ } finally {
+ setSelectionBusySlug(null);
+ }
+ }
+
+ async function handleInstallSelected() {
+ const installable = selectedSkills.filter((skill) => !installedSlugs.has(skill.installSlug));
+ if (!installable.length) {
+ setInstallError("All selected skills are already installed.");
+ return;
+ }
+
+ setInstallError(null);
+
+ for (const skill of installable) {
+ try {
+ const res = await fetchWithAuth(
+ `/api/clawhub/agents/${agentId}/skills/${encodeURIComponent(skill.installSlug)}/install`,
+ {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ source: "clawhub",
+ author: skill.author,
+ pagePath: skill.pagePath,
+ installedAt: skill.installedAt,
+ }),
+ },
+ );
+ const data: InstallJobResponse & { error?: string; message?: string } = await res.json();
+ if (!res.ok) {
+ throw new Error(data.message || data.error || "Could not queue install.");
+ }
+
+ setJobStatuses((current) => ({
+ ...current,
+ [skill.installSlug]: {
+ jobId: data.jobId,
+ agentId: data.agentId,
+ slug: data.slug,
+ status: data.status,
+ error: null,
+ completedAt: null,
+ },
+ }));
+ } catch (err: any) {
+ setJobStatuses((current) => ({
+ ...current,
+ [skill.installSlug]: {
+ jobId: current[skill.installSlug]?.jobId || `${skill.installSlug}-failed`,
+ agentId,
+ slug: skill.installSlug,
+ status: "failed",
+ error: err?.message || "Could not queue install.",
+ completedAt: null,
+ },
+ }));
+ }
+ }
+ }
+
+ function handleQueryChange(value: string) {
+ setQuery(value);
+ if (!value.trim()) {
+ setSelectedSkill(null);
+ setSelectedSkillDetail(null);
+ setDetailError(null);
+ loadBrowseResults();
+ }
+ }
+
+ function handleClearSearch() {
+ setQuery("");
+ setSelectedSkill(null);
+ setSelectedSkillDetail(null);
+ setDetailError(null);
+ loadBrowseResults();
+ }
+
+ useEffect(() => {
+ loadBrowseResults();
+ }, [agentId]);
+
+ useEffect(() => {
+ loadInstalledSkills();
+ }, [agentId, refreshToken]);
+
+ useEffect(() => {
+ const activeJobs = Object.values(jobStatuses).filter(
+ (status) => status.status === "pending" || status.status === "running",
+ );
+ if (!activeJobs.length) return;
+
+ const intervalId = window.setInterval(async () => {
+ for (const job of activeJobs) {
+ try {
+ const res = await fetchWithAuth(`/api/clawhub/jobs/${encodeURIComponent(job.jobId)}`);
+ const data: InstallJobStatus & { error?: string } = await res.json();
+ if (!res.ok) {
+ continue;
+ }
+
+ setJobStatuses((current) => ({
+ ...current,
+ [data.slug]: data,
+ }));
+
+ if (data.status === "success") {
+ await loadInstalledSkills();
+ removeSelectedSkillBySlug(data.slug);
+ toast.success(`${data.slug} installed. Restart your agent session to activate it.`);
+ onInstallSuccess?.();
+ }
+
+ if (data.status === "failed" && data.error) {
+ toast.error(data.error);
+ }
+ } catch (err) {
+ console.error(err);
+ }
+ }
+ }, 2000);
+
+ return () => {
+ window.clearInterval(intervalId);
+ };
+ }, [agentId, jobStatuses, onInstallSuccess, toast]);
+
+ const detailActionState: SkillDetailActionState | undefined = selectedSkillDetail
+ ? installedSlugs.has(selectedSkillDetail.slug)
+ ? {
+ label: "Installed",
+ disabled: true,
+ }
+ : {
+ label: selectedCurrentSkill ? "Remove from selection" : "Add to selection",
+ disabled: Boolean(selectionBusySlug && selectionBusySlug !== selectedSkillDetail.slug),
+ loading: selectionBusySlug === selectedSkillDetail.slug,
+ onClick: () => {
+ if (selectedCurrentSkill) {
+ removeSelectedSkill(selectedSkillDetail);
+ return;
+ }
+ addSelectedSkill(selectedSkillDetail);
+ },
+ }
+ : undefined;
+
+ return (
+
+
+
+
+
+
+ ClawHub Catalog
+
+
Install skills on this agent
+
+ Browse the public ClawHub registry from Nora, select one or more skills, and queue
+ runtime installs for this running agent.
+
+
+
+
{
+ loadBrowseResults();
+ loadInstalledSkills();
+ }}
+ disabled={loading}
+ className="inline-flex items-center gap-2 self-start rounded-xl border border-slate-200 bg-white px-4 py-2 text-sm font-bold text-slate-700 transition-colors hover:bg-slate-50 disabled:opacity-60"
+ >
+
+ Refresh
+
+
+
+
+
0}
+ installError={installError}
+ onInstall={handleInstallSelected}
+ onRemoveSkill={removeSelectedSkill}
+ onClearAll={clearSelectedSkills}
+ />
+
+
+
+
+
+
+
+
+
+ {
+ setSelectedSkill(null);
+ setSelectedSkillDetail(null);
+ setDetailError(null);
+ setDetailLoading(false);
+ }}
+ />
+
+
+
+ );
+}
diff --git a/frontend-dashboard/components/agents/openclaw/SkillCard.tsx b/frontend-dashboard/components/agents/openclaw/SkillCard.tsx
new file mode 100644
index 0000000..a1d50bb
--- /dev/null
+++ b/frontend-dashboard/components/agents/openclaw/SkillCard.tsx
@@ -0,0 +1,123 @@
+import { ArrowUpRight, Check, Download, Plus, Star } from "lucide-react";
+
+export type SkillSummary = {
+ slug: string;
+ name: string;
+ description: string;
+ downloads: number | null;
+ stars: number | null;
+ updatedAt: string | null;
+};
+
+type SkillCardProps = {
+ skill: SkillSummary;
+ selected?: boolean;
+ installed?: boolean;
+ onSelect: (skill: SkillSummary) => void;
+ selectable?: boolean;
+ selectionBusy?: boolean;
+ selectedForAction?: boolean;
+ onToggleSelection?: (skill: SkillSummary) => void;
+};
+
+function formatCompactNumber(value: number) {
+ return new Intl.NumberFormat("en-US", {
+ notation: "compact",
+ maximumFractionDigits: 1,
+ }).format(value || 0);
+}
+
+function formatUpdatedAt(value: string | null) {
+ if (!value) return "Unknown update";
+ const parsed = new Date(value);
+ if (Number.isNaN(parsed.getTime())) return "Unknown update";
+ return `Updated ${parsed.toLocaleDateString()}`;
+}
+
+export default function SkillCard({
+ skill,
+ selected = false,
+ installed = false,
+ onSelect,
+ selectable = false,
+ selectionBusy = false,
+ selectedForAction = false,
+ onToggleSelection,
+}: SkillCardProps) {
+ const showStats = typeof skill.downloads === "number" || typeof skill.stars === "number";
+
+ return (
+
+
onSelect(skill)}
+ className="flex flex-1 flex-col text-left"
+ >
+
+
+
{skill.name || skill.slug}
+
+ {skill.slug}
+
+
+
+ {installed ? (
+
+ Installed
+
+ ) : null}
+
+
+
+
+
+ {skill.description || "No description provided by ClawHub."}
+
+
+ {showStats ? (
+
+ {typeof skill.downloads === "number" ? (
+
+
+ {formatCompactNumber(skill.downloads)}
+
+ ) : null}
+ {typeof skill.stars === "number" ? (
+
+
+ {formatCompactNumber(skill.stars)}
+
+ ) : null}
+
+ ) : null}
+
+ {formatUpdatedAt(skill.updatedAt)}
+
+
+ {selectable && onToggleSelection ? (
+
onToggleSelection(skill)}
+ disabled={selectionBusy}
+ className={`mt-4 inline-flex items-center justify-center gap-2 rounded-xl px-3 py-2 text-sm font-black transition-colors ${
+ selectedForAction
+ ? "bg-emerald-100 text-emerald-800 hover:bg-emerald-200"
+ : "bg-slate-100 text-slate-700 hover:bg-slate-200"
+ } disabled:opacity-60`}
+ >
+ {selectedForAction ? : }
+ {selectionBusy ? "Updating..." : selectedForAction ? "Selected" : "Add to selection"}
+
+ ) : null}
+
+ );
+}
diff --git a/frontend-dashboard/components/agents/openclaw/SkillDetailPanel.tsx b/frontend-dashboard/components/agents/openclaw/SkillDetailPanel.tsx
new file mode 100644
index 0000000..696c2d3
--- /dev/null
+++ b/frontend-dashboard/components/agents/openclaw/SkillDetailPanel.tsx
@@ -0,0 +1,501 @@
+import type { ReactNode } from "react";
+import {
+ ChevronLeft,
+ Download,
+ Star,
+ Box,
+ Cpu,
+ FileText,
+ Lock,
+ CircleAlert,
+ Check,
+ Plus,
+} from "lucide-react";
+
+export type SkillRequirementItem = {
+ kind?: string;
+ package?: string;
+ name?: string;
+};
+
+export type SkillRequirements = {
+ bins?: string[];
+ env?: string[];
+ config?: string[];
+ install?: SkillRequirementItem[];
+};
+
+export type SkillDetail = {
+ slug: string;
+ name: string;
+ description: string;
+ downloads: number;
+ stars: number;
+ updatedAt: string;
+ author?: string;
+ pagePath?: string;
+ installedAt?: string;
+ readme?: string;
+ requirements?: SkillRequirements | null;
+};
+
+export type SkillDetailActionState = {
+ label: string;
+ disabled?: boolean;
+ loading?: boolean;
+ onClick?: () => void;
+ onAction?: () => void;
+};
+
+type SkillDetailPanelProps = {
+ skill: SkillDetail | null;
+ detail?: SkillDetail | null;
+ loading: boolean;
+ error: string | null;
+ onClose: () => void;
+ action?: SkillDetailActionState;
+};
+
+function formatCount(value: number | undefined) {
+ const safeValue = Number.isFinite(value) ? value : 0;
+ if (safeValue >= 1000000) return `${(safeValue / 1000000).toFixed(1)}M`;
+ if (safeValue >= 1000) return `${Math.round(safeValue / 100) / 10}K`;
+ return `${safeValue}`;
+}
+
+function RequirementChip({ label, value }: { label: string; value: string }) {
+ return (
+
+ );
+}
+
+function renderInline(text: string) {
+ const tokens: Array = [];
+ const pattern = /(`[^`]+`|\[[^\]]+\]\([^)]+\)|\*\*[^*]+\*\*|\*[^*]+\*)/g;
+ let lastIndex = 0;
+ let key = 0;
+
+ for (const match of text.matchAll(pattern)) {
+ const index = match.index || 0;
+ if (index > lastIndex) {
+ tokens.push(text.slice(lastIndex, index));
+ }
+
+ const token = match[0];
+ if (token.startsWith("`") && token.endsWith("`")) {
+ tokens.push(
+
+ {token.slice(1, -1)}
+ ,
+ );
+ } else if (token.startsWith("**") && token.endsWith("**")) {
+ tokens.push({token.slice(2, -2)} );
+ } else if (token.startsWith("*") && token.endsWith("*")) {
+ tokens.push({token.slice(1, -1)} );
+ } else if (token.startsWith("[") && token.includes("](") && token.endsWith(")")) {
+ const labelEnd = token.indexOf("](");
+ const label = token.slice(1, labelEnd);
+ const href = token.slice(labelEnd + 2, -1);
+ tokens.push(
+
+ {label}
+ ,
+ );
+ } else {
+ tokens.push(token);
+ }
+
+ lastIndex = index + token.length;
+ }
+
+ if (lastIndex < text.length) {
+ tokens.push(text.slice(lastIndex));
+ }
+
+ return tokens;
+}
+
+function MarkdownView({ source }: { source: string }) {
+ const lines = source.replace(/\r\n/g, "\n").split("\n");
+ const blocks: Array = [];
+
+ let i = 0;
+ let key = 0;
+
+ while (i < lines.length) {
+ const line = lines[i];
+ const trimmed = line.trim();
+
+ if (!trimmed) {
+ i += 1;
+ continue;
+ }
+
+ if (trimmed.startsWith("```")) {
+ const codeLines: string[] = [];
+ i += 1;
+ while (i < lines.length && !lines[i].trim().startsWith("```")) {
+ codeLines.push(lines[i]);
+ i += 1;
+ }
+ if (i < lines.length) i += 1;
+ blocks.push(
+
+ {codeLines.join("\n")}
+ ,
+ );
+ continue;
+ }
+
+ if (/^#{1,3}\s+/.test(trimmed)) {
+ const level = Math.min(trimmed.match(/^#{1,3}/)?.[0].length || 1, 3);
+ const content = trimmed.replace(/^#{1,3}\s+/, "");
+ const Tag = level === 1 ? "h1" : level === 2 ? "h2" : "h3";
+ const className =
+ level === 1
+ ? "mb-3 text-2xl font-black tracking-tight text-slate-900"
+ : level === 2
+ ? "mb-2 mt-5 text-lg font-bold text-slate-900"
+ : "mb-2 mt-4 text-base font-bold text-slate-900";
+ blocks.push(
+
+ {renderInline(content)}
+ ,
+ );
+ i += 1;
+ continue;
+ }
+
+ if (/^>\s?/.test(trimmed)) {
+ const quoteLines: string[] = [];
+ while (i < lines.length && /^>\s?/.test(lines[i].trim())) {
+ quoteLines.push(lines[i].trim().replace(/^>\s?/, ""));
+ i += 1;
+ }
+ blocks.push(
+
+ {renderInline(quoteLines.join(" "))}
+ ,
+ );
+ continue;
+ }
+
+ if (/^(\s*[-*]\s+)/.test(trimmed)) {
+ const items: string[] = [];
+ while (i < lines.length && /^(\s*[-*]\s+)/.test(lines[i].trim())) {
+ items.push(lines[i].trim().replace(/^[-*]\s+/, ""));
+ i += 1;
+ }
+ blocks.push(
+
+ {items.map((item, idx) => (
+
+ {renderInline(item)}
+
+ ))}
+ ,
+ );
+ continue;
+ }
+
+ if (/^\d+\.\s+/.test(trimmed)) {
+ const items: string[] = [];
+ while (i < lines.length && /^\d+\.\s+/.test(lines[i].trim())) {
+ items.push(lines[i].trim().replace(/^\d+\.\s+/, ""));
+ i += 1;
+ }
+ blocks.push(
+
+ {items.map((item, idx) => (
+
+ {renderInline(item)}
+
+ ))}
+ ,
+ );
+ continue;
+ }
+
+ const paragraphLines = [trimmed];
+ i += 1;
+ while (
+ i < lines.length &&
+ lines[i].trim() &&
+ !/^#{1,3}\s+/.test(lines[i].trim()) &&
+ !/^>\s?/.test(lines[i].trim()) &&
+ !/^(\s*[-*]\s+)/.test(lines[i].trim()) &&
+ !/^\d+\.\s+/.test(lines[i].trim()) &&
+ !lines[i].trim().startsWith("```")
+ ) {
+ paragraphLines.push(lines[i].trim());
+ i += 1;
+ }
+
+ blocks.push(
+
+ {renderInline(paragraphLines.join(" "))}
+
,
+ );
+ }
+
+ return {blocks}
;
+}
+
+export default function SkillDetailPanel({
+ skill,
+ detail,
+ loading,
+ error,
+ onClose,
+ action,
+}: SkillDetailPanelProps) {
+ const activeSkill = detail || skill;
+ const helperText = action
+ ? action.label.toLowerCase().includes("selection")
+ ? "Use this action to add or remove the skill from the deploy selection."
+ : "This action is controlled by the current flow."
+ : "Install is disabled in Phase 1. This panel is read-only while we finish the browse and detail experience.";
+
+ return (
+
+
+
+
+
+ Skill Detail
+
+
+ {activeSkill?.name || "Select a skill to inspect"}
+
+
+ {activeSkill ? (
+
+
+ Close
+
+ ) : null}
+
+
+ {!activeSkill ? (
+
+
+
+ Pick a card to open the README and requirements.
+
+
+ The detail panel is read-only in Phase 1. Install actions will unlock in a later
+ phase.
+
+
+ ) : (
+
+
+
+
+ {activeSkill.slug}
+
+
+
+ {formatCount(activeSkill.downloads)} downloads
+
+
+
+ {formatCount(activeSkill.stars)} stars
+
+
+
+ {activeSkill.description || "No description provided."}
+
+
+
+
+
+
+ Install
+
+
{helperText}
+
+ {action?.loading ? (
+
+
+ {action.label}
+
+ ) : action ? (
+ action.disabled ? (
+ <>
+
+ {action.label}
+ >
+ ) : (
+ <>
+ {action.label === "Add to selection" ? (
+
+ ) : (
+
+ )}
+ {action.label}
+ >
+ )
+ ) : (
+ <>
+
+ Install
+ >
+ )}
+
+
+
+ {error ? (
+
+
+
+
+
Could not load skill details.
+
{error}
+
+
+
+ ) : null}
+
+
+
+
+ Requirements
+
+ {loading && !activeSkill.requirements ? (
+
+ Loading requirement metadata...
+
+ ) : activeSkill.requirements ? (
+
+
+
+
+
+ entry.kind || entry.package || entry.name || "unknown",
+ )
+ .join(", ")
+ : "None listed"
+ }
+ />
+
+
+ {activeSkill.requirements.install?.length ? (
+
+ {activeSkill.requirements.install.map((entry, index) => (
+
+
+
+ {entry.kind || entry.package || entry.name || "install"}
+
+
+ {entry.package || entry.name
+ ? `Package: ${entry.package || entry.name}`
+ : "No package name supplied."}
+
+
+ ))}
+
+ ) : null}
+
+ ) : (
+
+ No{" "}
+
+ metadata.openclaw
+ {" "}
+ requirements were declared for this skill.
+
+ )}
+
+
+
+
+
+ SKILL.md
+
+ {loading && !skill.readme ? (
+
+ ) : (
+
+ )}
+
+
+ )}
+
+
+ );
+}
diff --git a/frontend-dashboard/components/agents/openclaw/SkillGrid.tsx b/frontend-dashboard/components/agents/openclaw/SkillGrid.tsx
new file mode 100644
index 0000000..80a8dcb
--- /dev/null
+++ b/frontend-dashboard/components/agents/openclaw/SkillGrid.tsx
@@ -0,0 +1,106 @@
+import { Loader2, SearchX, WifiOff } from "lucide-react";
+import SkillCard, { SkillSummary } from "./SkillCard";
+
+type SkillGridProps = {
+ skills: SkillSummary[];
+ loading: boolean;
+ error: string | null;
+ query: string;
+ selectedSlug?: string | null;
+ installedSlugs?: Set;
+ selectedSkillSlugs?: Set;
+ selectionBusySlug?: string | null;
+ onSelect: (skill: SkillSummary) => void;
+ onToggleSelection?: (skill: SkillSummary) => void;
+ emptyTitle?: string;
+ emptyMessage?: string;
+};
+
+function LoadingSkeleton() {
+ return (
+
+ );
+}
+
+export default function SkillGrid({
+ skills,
+ loading,
+ error,
+ query,
+ selectedSlug = null,
+ installedSlugs,
+ selectedSkillSlugs,
+ selectionBusySlug = null,
+ onSelect,
+ onToggleSelection,
+ emptyTitle = "No skills found.",
+ emptyMessage,
+}: SkillGridProps) {
+ if (loading) {
+ return (
+
+
+
+ Loading ClawHub skills...
+
+
+ {Array.from({ length: 6 }).map((_, index) => (
+
+ ))}
+
+
+ );
+ }
+
+ if (error) {
+ return (
+
+
+
Could not load skills.
+
+ {error || "ClawHub may be unavailable."}
+
+
+ );
+ }
+
+ if (!skills.length) {
+ const message =
+ emptyMessage ||
+ (query
+ ? "No skills found for your search."
+ : "ClawHub did not return any skills for the default browse view.");
+
+ return (
+
+
+
{emptyTitle}
+
{message}
+
+ );
+ }
+
+ return (
+
+ {skills.map((skill) => (
+
+ ))}
+
+ );
+}
diff --git a/frontend-dashboard/components/agents/openclaw/SkillSearchBar.tsx b/frontend-dashboard/components/agents/openclaw/SkillSearchBar.tsx
new file mode 100644
index 0000000..fabf89f
--- /dev/null
+++ b/frontend-dashboard/components/agents/openclaw/SkillSearchBar.tsx
@@ -0,0 +1,64 @@
+import { FormEvent } from "react";
+import { Search, X } from "lucide-react";
+
+type SkillSearchBarProps = {
+ query: string;
+ loading?: boolean;
+ onQueryChange: (value: string) => void;
+ onSubmit: () => void;
+ onClear: () => void;
+};
+
+export default function SkillSearchBar({
+ query,
+ loading = false,
+ onQueryChange,
+ onSubmit,
+ onClear,
+}: SkillSearchBarProps) {
+ function handleSubmit(event: FormEvent) {
+ event.preventDefault();
+ onSubmit();
+ }
+
+ return (
+
+ );
+}
diff --git a/frontend-dashboard/components/agents/openclaw/SkillSelectionTray.tsx b/frontend-dashboard/components/agents/openclaw/SkillSelectionTray.tsx
new file mode 100644
index 0000000..2be5dd3
--- /dev/null
+++ b/frontend-dashboard/components/agents/openclaw/SkillSelectionTray.tsx
@@ -0,0 +1,125 @@
+import { CheckCircle2, ChevronLeft, Rocket, X } from "lucide-react";
+import { DeployClawHubSkill } from "../../../lib/clawhubDeploy";
+
+type SkillSelectionTrayProps = {
+ skills: DeployClawHubSkill[];
+ mode?: "deploy" | "install";
+ deploying?: boolean;
+ installLabel?: string;
+ installDisabled?: boolean;
+ installError?: string | null;
+ onBack?: () => void;
+ onDeploy?: () => void;
+ onInstall?: () => void;
+ onRemoveSkill?: (skill: DeployClawHubSkill) => void;
+ onClearAll?: () => void;
+};
+
+export default function SkillSelectionTray({
+ skills,
+ mode = "deploy",
+ deploying = false,
+ installLabel,
+ installDisabled = false,
+ installError = null,
+ onBack,
+ onDeploy,
+ onInstall,
+ onRemoveSkill,
+ onClearAll,
+}: SkillSelectionTrayProps) {
+ const isDeployMode = mode === "deploy";
+
+ return (
+
+
+
+
+ Selected Skills
+
+
+
+ {skills.length} {isDeployMode ? "chosen for this deploy" : "selected for install"}
+
+
+ {isDeployMode
+ ? "These skills will be saved onto the new agent record when you click deploy. Runtime installation happens later in the deploy lifecycle, not on this page."
+ : "Queue one install job per selected skill for this running agent. Successful installs will update the saved ClawHub skill list and prompt a session restart."}
+
+ {skills.length ? (
+
+
+
+ Click a selected skill chip to review it, or remove it with the close button.
+
+ {onClearAll ? (
+
+ Clear all
+
+ ) : null}
+
+
+ {skills.map((skill) => (
+
+ {skill.name || skill.installSlug}
+ {onRemoveSkill ? (
+ onRemoveSkill(skill)}
+ className="inline-flex h-4 w-4 items-center justify-center rounded-full text-blue-500 transition-colors hover:bg-blue-100 hover:text-blue-700"
+ aria-label={`Remove ${skill.name || skill.installSlug} from selection`}
+ >
+
+
+ ) : null}
+
+ ))}
+
+
+ ) : (
+
+ {isDeployMode
+ ? "No ClawHub skills selected. You can still continue and deploy the agent without any."
+ : "No ClawHub skills selected yet. Pick one or more cards to queue installs."}
+
+ )}
+ {installError ?
{installError}
: null}
+
+
+
+ {isDeployMode && onBack ? (
+
+
+ Back
+
+ ) : null}
+
+
+
+ {isDeployMode
+ ? deploying
+ ? "Deploying..."
+ : "Deploy Agent & Open Validation"
+ : installLabel || "Install Selected Skills"}
+
+
+
+
+ );
+}
diff --git a/frontend-dashboard/lib/clawhubDeploy.ts b/frontend-dashboard/lib/clawhubDeploy.ts
new file mode 100644
index 0000000..dbdd4c1
--- /dev/null
+++ b/frontend-dashboard/lib/clawhubDeploy.ts
@@ -0,0 +1,90 @@
+export const DEPLOY_DRAFT_STORAGE_KEY = "nora.deployDraft.v1";
+
+export type DeployClawHubSkill = {
+ source: "clawhub";
+ installSlug: string;
+ author: string;
+ pagePath: string;
+ installedAt: string;
+ name?: string;
+ description?: string;
+};
+
+export type DeployDraft = {
+ name: string;
+ containerName: string;
+ runtimeFamily: string;
+ deployTarget: string;
+ sandboxProfile: string;
+ model: string;
+ deploymentMode: string;
+ migrationMethod: string;
+ migrationDraft: any;
+ migrationSource: any;
+ vcpu: number;
+ ramMb: number;
+ diskGb: number;
+ clawhubSkills: DeployClawHubSkill[];
+};
+
+type DraftResourceOptions = {
+ defaultVcpu?: number;
+ defaultRamMb?: number;
+ defaultDiskGb?: number;
+ maxVcpu?: number;
+ maxRamMb?: number;
+ maxDiskGb?: number;
+};
+
+function canUseStorage() {
+ return typeof window !== "undefined" && typeof window.sessionStorage !== "undefined";
+}
+
+export function loadDeployDraft(): DeployDraft | null {
+ if (!canUseStorage()) return null;
+
+ try {
+ const raw = window.sessionStorage.getItem(DEPLOY_DRAFT_STORAGE_KEY);
+ if (!raw) return null;
+ return JSON.parse(raw);
+ } catch {
+ return null;
+ }
+}
+
+export function saveDeployDraft(draft: DeployDraft) {
+ if (!canUseStorage()) return;
+ window.sessionStorage.setItem(DEPLOY_DRAFT_STORAGE_KEY, JSON.stringify(draft));
+}
+
+export function clearDeployDraft() {
+ if (!canUseStorage()) return;
+ window.sessionStorage.removeItem(DEPLOY_DRAFT_STORAGE_KEY);
+}
+
+function normalizeInteger(value: unknown, fallback: number) {
+ const parsed = Number.parseInt(String(value ?? ""), 10);
+ return Number.isFinite(parsed) ? parsed : fallback;
+}
+
+function clamp(value: number, min: number, max: number) {
+ return Math.min(max, Math.max(min, value));
+}
+
+export function normalizeDeployDraftResources(
+ draft: DeployDraft | null,
+ {
+ defaultVcpu = 1,
+ defaultRamMb = 1024,
+ defaultDiskGb = 10,
+ maxVcpu = 16,
+ maxRamMb = 32768,
+ maxDiskGb = 500,
+ }: DraftResourceOptions = {},
+) {
+ return {
+ vcpu: clamp(normalizeInteger(draft?.vcpu, defaultVcpu), 1, maxVcpu),
+ ramMb: clamp(normalizeInteger(draft?.ramMb, defaultRamMb), 512, maxRamMb),
+ diskGb: clamp(normalizeInteger(draft?.diskGb, defaultDiskGb), 10, maxDiskGb),
+ };
+}
diff --git a/frontend-dashboard/package-lock.json b/frontend-dashboard/package-lock.json
index 05dd097..08037f0 100644
--- a/frontend-dashboard/package-lock.json
+++ b/frontend-dashboard/package-lock.json
@@ -17,7 +17,9 @@
"postcss": "^8.5.10",
"react": "^19.2.5",
"react-dom": "^19.2.5",
+ "react-markdown": "^10.1.0",
"recharts": "^3.8.1",
+ "remark-gfm": "^4.0.1",
"tailwind-merge": "^3.5.0",
"tailwindcss": "^4.2.2"
},
@@ -589,9 +591,6 @@
"cpu": [
"arm"
],
- "libc": [
- "glibc"
- ],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -608,9 +607,6 @@
"cpu": [
"arm64"
],
- "libc": [
- "glibc"
- ],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -627,9 +623,6 @@
"cpu": [
"ppc64"
],
- "libc": [
- "glibc"
- ],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -646,9 +639,6 @@
"cpu": [
"riscv64"
],
- "libc": [
- "glibc"
- ],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -665,9 +655,6 @@
"cpu": [
"s390x"
],
- "libc": [
- "glibc"
- ],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -684,9 +671,6 @@
"cpu": [
"x64"
],
- "libc": [
- "glibc"
- ],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -703,9 +687,6 @@
"cpu": [
"arm64"
],
- "libc": [
- "musl"
- ],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -722,9 +703,6 @@
"cpu": [
"x64"
],
- "libc": [
- "musl"
- ],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -741,9 +719,6 @@
"cpu": [
"arm"
],
- "libc": [
- "glibc"
- ],
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -766,9 +741,6 @@
"cpu": [
"arm64"
],
- "libc": [
- "glibc"
- ],
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -791,9 +763,6 @@
"cpu": [
"ppc64"
],
- "libc": [
- "glibc"
- ],
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -816,9 +785,6 @@
"cpu": [
"riscv64"
],
- "libc": [
- "glibc"
- ],
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -841,9 +807,6 @@
"cpu": [
"s390x"
],
- "libc": [
- "glibc"
- ],
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -866,9 +829,6 @@
"cpu": [
"x64"
],
- "libc": [
- "glibc"
- ],
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -891,9 +851,6 @@
"cpu": [
"arm64"
],
- "libc": [
- "musl"
- ],
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -916,9 +873,6 @@
"cpu": [
"x64"
],
- "libc": [
- "musl"
- ],
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -1100,9 +1054,6 @@
"cpu": [
"arm64"
],
- "libc": [
- "glibc"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -1119,9 +1070,6 @@
"cpu": [
"arm64"
],
- "libc": [
- "musl"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -1138,9 +1086,6 @@
"cpu": [
"x64"
],
- "libc": [
- "glibc"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -1157,9 +1102,6 @@
"cpu": [
"x64"
],
- "libc": [
- "musl"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -1383,9 +1325,6 @@
"cpu": [
"arm64"
],
- "libc": [
- "glibc"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -1402,9 +1341,6 @@
"cpu": [
"arm64"
],
- "libc": [
- "musl"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -1421,9 +1357,6 @@
"cpu": [
"x64"
],
- "libc": [
- "glibc"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -1440,9 +1373,6 @@
"cpu": [
"x64"
],
- "libc": [
- "musl"
- ],
"license": "MIT",
"optional": true,
"os": [
@@ -1589,6 +1519,54 @@
"integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==",
"license": "MIT"
},
+ "node_modules/@types/debug": {
+ "version": "4.1.13",
+ "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.13.tgz",
+ "integrity": "sha512-KSVgmQmzMwPlmtljOomayoR89W4FynCAi3E8PPs7vmDVPe84hT+vGPKkJfThkmXs0x0jAaa9U8uW8bbfyS2fWw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/ms": "*"
+ }
+ },
+ "node_modules/@types/estree": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
+ "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==",
+ "license": "MIT"
+ },
+ "node_modules/@types/estree-jsx": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz",
+ "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "*"
+ }
+ },
+ "node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/@types/mdast": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz",
+ "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/@types/ms": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz",
+ "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==",
+ "license": "MIT"
+ },
"node_modules/@types/node": {
"version": "25.6.0",
"resolved": "https://registry.npmjs.org/@types/node/-/node-25.6.0.tgz",
@@ -1603,7 +1581,6 @@
"version": "19.2.14",
"resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz",
"integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==",
- "devOptional": true,
"license": "MIT",
"dependencies": {
"csstype": "^3.2.2"
@@ -1619,12 +1596,24 @@
"@types/react": "^19.2.0"
}
},
+ "node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
"node_modules/@types/use-sync-external-store": {
"version": "0.0.6",
"resolved": "https://registry.npmjs.org/@types/use-sync-external-store/-/use-sync-external-store-0.0.6.tgz",
"integrity": "sha512-zFDAD+tlpf2r4asuHEj0XH6pY6i0g5NeAHPn+15wk3BV6JA69eERFXC1gyGThDkVa1zCyKr5jox1+2LbV/AMLg==",
"license": "MIT"
},
+ "node_modules/@ungap/structured-clone": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz",
+ "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==",
+ "license": "ISC"
+ },
"node_modules/@xterm/addon-fit": {
"version": "0.11.0",
"resolved": "https://registry.npmjs.org/@xterm/addon-fit/-/addon-fit-0.11.0.tgz",
@@ -1682,6 +1671,16 @@
"postcss": "^8.1.0"
}
},
+ "node_modules/bail": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
+ "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
"node_modules/baseline-browser-mapping": {
"version": "2.10.20",
"resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.20.tgz",
@@ -1747,6 +1746,56 @@
],
"license": "CC-BY-4.0"
},
+ "node_modules/ccount": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz",
+ "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz",
+ "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities-html4": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz",
+ "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities-legacy": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz",
+ "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-reference-invalid": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz",
+ "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
"node_modules/client-only": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz",
@@ -1761,11 +1810,20 @@
"node": ">=6"
}
},
+ "node_modules/comma-separated-tokens": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz",
+ "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
"node_modules/csstype": {
"version": "3.2.3",
"resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz",
"integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==",
- "devOptional": true,
"license": "MIT"
},
"node_modules/d3-array": {
@@ -1889,12 +1947,51 @@
"node": ">=12"
}
},
+ "node_modules/debug": {
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+ "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
"node_modules/decimal.js-light": {
"version": "2.5.1",
"resolved": "https://registry.npmjs.org/decimal.js-light/-/decimal.js-light-2.5.1.tgz",
"integrity": "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==",
"license": "MIT"
},
+ "node_modules/decode-named-character-reference": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.3.0.tgz",
+ "integrity": "sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q==",
+ "license": "MIT",
+ "dependencies": {
+ "character-entities": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/dequal": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
+ "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
"node_modules/detect-libc": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz",
@@ -1904,6 +2001,19 @@
"node": ">=8"
}
},
+ "node_modules/devlop": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz",
+ "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==",
+ "license": "MIT",
+ "dependencies": {
+ "dequal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
"node_modules/electron-to-chromium": {
"version": "1.5.340",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.340.tgz",
@@ -1984,12 +2094,40 @@
"node": ">=6"
}
},
+ "node_modules/escape-string-regexp": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz",
+ "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/estree-util-is-identifier-name": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz",
+ "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
"node_modules/eventemitter3": {
"version": "5.0.4",
"resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.4.tgz",
"integrity": "sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==",
"license": "MIT"
},
+ "node_modules/extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
+ "license": "MIT"
+ },
"node_modules/fraction.js": {
"version": "5.3.4",
"resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz",
@@ -2036,6 +2174,56 @@
"integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==",
"license": "ISC"
},
+ "node_modules/hast-util-to-jsx-runtime": {
+ "version": "2.3.6",
+ "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz",
+ "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/unist": "^3.0.0",
+ "comma-separated-tokens": "^2.0.0",
+ "devlop": "^1.0.0",
+ "estree-util-is-identifier-name": "^3.0.0",
+ "hast-util-whitespace": "^3.0.0",
+ "mdast-util-mdx-expression": "^2.0.0",
+ "mdast-util-mdx-jsx": "^3.0.0",
+ "mdast-util-mdxjs-esm": "^2.0.0",
+ "property-information": "^7.0.0",
+ "space-separated-tokens": "^2.0.0",
+ "style-to-js": "^1.0.0",
+ "unist-util-position": "^5.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-whitespace": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz",
+ "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/html-url-attributes": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.1.tgz",
+ "integrity": "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
"node_modules/immer": {
"version": "10.2.0",
"resolved": "https://registry.npmjs.org/immer/-/immer-10.2.0.tgz",
@@ -2046,6 +2234,12 @@
"url": "https://opencollective.com/immer"
}
},
+ "node_modules/inline-style-parser": {
+ "version": "0.2.7",
+ "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz",
+ "integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==",
+ "license": "MIT"
+ },
"node_modules/internmap": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz",
@@ -2055,6 +2249,62 @@
"node": ">=12"
}
},
+ "node_modules/is-alphabetical": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz",
+ "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-alphanumerical": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz",
+ "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==",
+ "license": "MIT",
+ "dependencies": {
+ "is-alphabetical": "^2.0.0",
+ "is-decimal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-decimal": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz",
+ "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-hexadecimal": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz",
+ "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-plain-obj": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
+ "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
"node_modules/jiti": {
"version": "2.6.1",
"resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz",
@@ -2200,9 +2450,6 @@
"cpu": [
"arm64"
],
- "libc": [
- "glibc"
- ],
"license": "MPL-2.0",
"optional": true,
"os": [
@@ -2223,9 +2470,6 @@
"cpu": [
"arm64"
],
- "libc": [
- "musl"
- ],
"license": "MPL-2.0",
"optional": true,
"os": [
@@ -2246,9 +2490,6 @@
"cpu": [
"x64"
],
- "libc": [
- "glibc"
- ],
"license": "MPL-2.0",
"optional": true,
"os": [
@@ -2269,9 +2510,6 @@
"cpu": [
"x64"
],
- "libc": [
- "musl"
- ],
"license": "MPL-2.0",
"optional": true,
"os": [
@@ -2325,6 +2563,16 @@
"url": "https://opencollective.com/parcel"
}
},
+ "node_modules/longest-streak": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz",
+ "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
"node_modules/lucide-react": {
"version": "1.8.0",
"resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-1.8.0.tgz",
@@ -2343,52 +2591,901 @@
"@jridgewell/sourcemap-codec": "^1.5.5"
}
},
- "node_modules/nanoid": {
- "version": "3.3.11",
- "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
- "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
- "funding": [
- {
- "type": "github",
- "url": "https://github.com/sponsors/ai"
- }
- ],
+ "node_modules/markdown-table": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz",
+ "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==",
"license": "MIT",
- "bin": {
- "nanoid": "bin/nanoid.cjs"
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/mdast-util-find-and-replace": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz",
+ "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "escape-string-regexp": "^5.0.0",
+ "unist-util-is": "^6.0.0",
+ "unist-util-visit-parents": "^6.0.0"
},
- "engines": {
- "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
}
},
- "node_modules/next": {
- "version": "16.2.4",
- "resolved": "https://registry.npmjs.org/next/-/next-16.2.4.tgz",
- "integrity": "sha512-kPvz56wF5frc+FxlHI5qnklCzbq53HTwORaWBGdT0vNoKh1Aya9XC8aPauH4NJxqtzbWsS5mAbctm4cr+EkQ2Q==",
+ "node_modules/mdast-util-from-markdown": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.3.tgz",
+ "integrity": "sha512-W4mAWTvSlKvf8L6J+VN9yLSqQ9AOAAvHuoDAmPkz4dHf553m5gVj2ejadHJhoJmcmxEnOv6Pa8XJhpxE93kb8Q==",
"license": "MIT",
"dependencies": {
- "@next/env": "16.2.4",
- "@swc/helpers": "0.5.15",
- "baseline-browser-mapping": "^2.9.19",
- "caniuse-lite": "^1.0.30001579",
- "postcss": "8.4.31",
- "styled-jsx": "5.1.6"
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-to-string": "^4.0.0",
+ "micromark": "^4.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-decode-string": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0",
+ "unist-util-stringify-position": "^4.0.0"
},
- "bin": {
- "next": "dist/bin/next"
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz",
+ "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==",
+ "license": "MIT",
+ "dependencies": {
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-gfm-autolink-literal": "^2.0.0",
+ "mdast-util-gfm-footnote": "^2.0.0",
+ "mdast-util-gfm-strikethrough": "^2.0.0",
+ "mdast-util-gfm-table": "^2.0.0",
+ "mdast-util-gfm-task-list-item": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
},
- "engines": {
- "node": ">=20.9.0"
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-autolink-literal": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz",
+ "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "ccount": "^2.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-find-and-replace": "^3.0.0",
+ "micromark-util-character": "^2.0.0"
},
- "optionalDependencies": {
- "@next/swc-darwin-arm64": "16.2.4",
- "@next/swc-darwin-x64": "16.2.4",
- "@next/swc-linux-arm64-gnu": "16.2.4",
- "@next/swc-linux-arm64-musl": "16.2.4",
- "@next/swc-linux-x64-gnu": "16.2.4",
- "@next/swc-linux-x64-musl": "16.2.4",
- "@next/swc-win32-arm64-msvc": "16.2.4",
- "@next/swc-win32-x64-msvc": "16.2.4",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-footnote": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz",
+ "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.1.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-strikethrough": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz",
+ "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-table": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz",
+ "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "markdown-table": "^3.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-task-list-item": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz",
+ "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-expression": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz",
+ "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-jsx": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz",
+ "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "ccount": "^2.0.0",
+ "devlop": "^1.1.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0",
+ "parse-entities": "^4.0.0",
+ "stringify-entities": "^4.0.0",
+ "unist-util-stringify-position": "^4.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdxjs-esm": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz",
+ "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-phrasing": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz",
+ "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "unist-util-is": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-hast": {
+ "version": "13.2.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz",
+ "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "@ungap/structured-clone": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "trim-lines": "^3.0.0",
+ "unist-util-position": "^5.0.0",
+ "unist-util-visit": "^5.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-markdown": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz",
+ "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "longest-streak": "^3.0.0",
+ "mdast-util-phrasing": "^4.0.0",
+ "mdast-util-to-string": "^4.0.0",
+ "micromark-util-classify-character": "^2.0.0",
+ "micromark-util-decode-string": "^2.0.0",
+ "unist-util-visit": "^5.0.0",
+ "zwitch": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-string": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz",
+ "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz",
+ "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "@types/debug": "^4.0.0",
+ "debug": "^4.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-core-commonmark": "^2.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-combine-extensions": "^2.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-encode": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-resolve-all": "^2.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "micromark-util-subtokenize": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-core-commonmark": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz",
+ "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-factory-destination": "^2.0.0",
+ "micromark-factory-label": "^2.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-factory-title": "^2.0.0",
+ "micromark-factory-whitespace": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-classify-character": "^2.0.0",
+ "micromark-util-html-tag-name": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-resolve-all": "^2.0.0",
+ "micromark-util-subtokenize": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-extension-gfm": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz",
+ "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==",
+ "license": "MIT",
+ "dependencies": {
+ "micromark-extension-gfm-autolink-literal": "^2.0.0",
+ "micromark-extension-gfm-footnote": "^2.0.0",
+ "micromark-extension-gfm-strikethrough": "^2.0.0",
+ "micromark-extension-gfm-table": "^2.0.0",
+ "micromark-extension-gfm-tagfilter": "^2.0.0",
+ "micromark-extension-gfm-task-list-item": "^2.0.0",
+ "micromark-util-combine-extensions": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-autolink-literal": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz",
+ "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==",
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-footnote": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz",
+ "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==",
+ "license": "MIT",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-core-commonmark": "^2.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-strikethrough": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz",
+ "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==",
+ "license": "MIT",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-classify-character": "^2.0.0",
+ "micromark-util-resolve-all": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-table": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz",
+ "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==",
+ "license": "MIT",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-tagfilter": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz",
+ "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==",
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-task-list-item": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz",
+ "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==",
+ "license": "MIT",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-factory-destination": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz",
+ "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-label": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz",
+ "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-space": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz",
+ "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-title": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz",
+ "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-whitespace": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz",
+ "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-character": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz",
+ "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-chunked": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz",
+ "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-classify-character": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz",
+ "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-combine-extensions": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz",
+ "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-decode-numeric-character-reference": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz",
+ "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-decode-string": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz",
+ "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "decode-named-character-reference": "^1.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-encode": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz",
+ "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-html-tag-name": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz",
+ "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-normalize-identifier": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz",
+ "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-resolve-all": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz",
+ "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-sanitize-uri": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz",
+ "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-encode": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-subtokenize": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz",
+ "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-symbol": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz",
+ "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-types": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz",
+ "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.11",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
+ "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/next": {
+ "version": "16.2.4",
+ "resolved": "https://registry.npmjs.org/next/-/next-16.2.4.tgz",
+ "integrity": "sha512-kPvz56wF5frc+FxlHI5qnklCzbq53HTwORaWBGdT0vNoKh1Aya9XC8aPauH4NJxqtzbWsS5mAbctm4cr+EkQ2Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@next/env": "16.2.4",
+ "@swc/helpers": "0.5.15",
+ "baseline-browser-mapping": "^2.9.19",
+ "caniuse-lite": "^1.0.30001579",
+ "postcss": "8.4.31",
+ "styled-jsx": "5.1.6"
+ },
+ "bin": {
+ "next": "dist/bin/next"
+ },
+ "engines": {
+ "node": ">=20.9.0"
+ },
+ "optionalDependencies": {
+ "@next/swc-darwin-arm64": "16.2.4",
+ "@next/swc-darwin-x64": "16.2.4",
+ "@next/swc-linux-arm64-gnu": "16.2.4",
+ "@next/swc-linux-arm64-musl": "16.2.4",
+ "@next/swc-linux-x64-gnu": "16.2.4",
+ "@next/swc-linux-x64-musl": "16.2.4",
+ "@next/swc-win32-arm64-msvc": "16.2.4",
+ "@next/swc-win32-x64-msvc": "16.2.4",
"sharp": "^0.34.5"
},
"peerDependencies": {
@@ -2447,6 +3544,31 @@
"integrity": "sha512-1h5gKZCF+pO/o3Iqt5Jp7wc9rH3eJJ0+nh/CIoiRwjRxde/hAHyLPXYN4V3CqKAbiZPSeJFSWHmJsbkicta0Eg==",
"license": "MIT"
},
+ "node_modules/parse-entities": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz",
+ "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "character-entities-legacy": "^3.0.0",
+ "character-reference-invalid": "^2.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "is-alphanumerical": "^2.0.0",
+ "is-decimal": "^2.0.0",
+ "is-hexadecimal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/parse-entities/node_modules/@types/unist": {
+ "version": "2.0.11",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz",
+ "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==",
+ "license": "MIT"
+ },
"node_modules/picocolors": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
@@ -2486,6 +3608,16 @@
"resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz",
"integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="
},
+ "node_modules/property-information": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz",
+ "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
"node_modules/react": {
"version": "19.2.5",
"resolved": "https://registry.npmjs.org/react/-/react-19.2.5.tgz",
@@ -2514,6 +3646,33 @@
"license": "MIT",
"peer": true
},
+ "node_modules/react-markdown": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-10.1.0.tgz",
+ "integrity": "sha512-qKxVopLT/TyA6BX3Ue5NwabOsAzm0Q7kAPwq6L+wWDwisYs7R8vZ0nRXqq6rkueboxpkjvLGU9fWifiX/ZZFxQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "hast-util-to-jsx-runtime": "^2.0.0",
+ "html-url-attributes": "^3.0.0",
+ "mdast-util-to-hast": "^13.0.0",
+ "remark-parse": "^11.0.0",
+ "remark-rehype": "^11.0.0",
+ "unified": "^11.0.0",
+ "unist-util-visit": "^5.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ },
+ "peerDependencies": {
+ "@types/react": ">=18",
+ "react": ">=18"
+ }
+ },
"node_modules/react-redux": {
"version": "9.2.0",
"resolved": "https://registry.npmjs.org/react-redux/-/react-redux-9.2.0.tgz",
@@ -2582,6 +3741,72 @@
"redux": "^5.0.0"
}
},
+ "node_modules/remark-gfm": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz",
+ "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "mdast-util-gfm": "^3.0.0",
+ "micromark-extension-gfm": "^3.0.0",
+ "remark-parse": "^11.0.0",
+ "remark-stringify": "^11.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-parse": {
+ "version": "11.0.0",
+ "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz",
+ "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "micromark-util-types": "^2.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-rehype": {
+ "version": "11.1.2",
+ "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz",
+ "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "mdast-util-to-hast": "^13.0.0",
+ "unified": "^11.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-stringify": {
+ "version": "11.0.0",
+ "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz",
+ "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "mdast-util-to-markdown": "^2.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
"node_modules/reselect": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/reselect/-/reselect-5.1.1.tgz",
@@ -2671,6 +3896,48 @@
"node": ">=0.10.0"
}
},
+ "node_modules/space-separated-tokens": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz",
+ "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/stringify-entities": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz",
+ "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==",
+ "license": "MIT",
+ "dependencies": {
+ "character-entities-html4": "^2.0.0",
+ "character-entities-legacy": "^3.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/style-to-js": {
+ "version": "1.1.21",
+ "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz",
+ "integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==",
+ "license": "MIT",
+ "dependencies": {
+ "style-to-object": "1.0.14"
+ }
+ },
+ "node_modules/style-to-object": {
+ "version": "1.0.14",
+ "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz",
+ "integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==",
+ "license": "MIT",
+ "dependencies": {
+ "inline-style-parser": "0.2.7"
+ }
+ },
"node_modules/styled-jsx": {
"version": "5.1.6",
"resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.6.tgz",
@@ -2729,6 +3996,26 @@
"integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==",
"license": "MIT"
},
+ "node_modules/trim-lines": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz",
+ "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/trough": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz",
+ "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
"node_modules/tslib": {
"version": "2.8.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
@@ -2776,6 +4063,93 @@
"dev": true,
"license": "MIT"
},
+ "node_modules/unified": {
+ "version": "11.0.5",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz",
+ "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "bail": "^2.0.0",
+ "devlop": "^1.0.0",
+ "extend": "^3.0.0",
+ "is-plain-obj": "^4.0.0",
+ "trough": "^2.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-is": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz",
+ "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-position": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz",
+ "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-stringify-position": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz",
+ "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.1.0.tgz",
+ "integrity": "sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0",
+ "unist-util-visit-parents": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit-parents": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz",
+ "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
"node_modules/update-browserslist-db": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz",
@@ -2815,6 +4189,34 @@
"react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
}
},
+ "node_modules/vfile": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz",
+ "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile-message": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz",
+ "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-stringify-position": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
"node_modules/victory-vendor": {
"version": "37.3.6",
"resolved": "https://registry.npmjs.org/victory-vendor/-/victory-vendor-37.3.6.tgz",
@@ -2836,6 +4238,16 @@
"d3-time": "^3.0.0",
"d3-timer": "^3.0.1"
}
+ },
+ "node_modules/zwitch": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz",
+ "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
}
}
}
diff --git a/frontend-dashboard/package.json b/frontend-dashboard/package.json
index 6fa2447..4722ec3 100644
--- a/frontend-dashboard/package.json
+++ b/frontend-dashboard/package.json
@@ -19,10 +19,12 @@
"clsx": "^2.1.0",
"lucide-react": "^1.8.0",
"next": "^16.2.4",
+ "react-markdown": "^10.1.0",
"postcss": "^8.5.10",
"react": "^19.2.5",
"react-dom": "^19.2.5",
"recharts": "^3.8.1",
+ "remark-gfm": "^4.0.1",
"tailwind-merge": "^3.5.0",
"tailwindcss": "^4.2.2"
},
diff --git a/frontend-dashboard/pages/agents/[id].tsx b/frontend-dashboard/pages/agents/[id].tsx
index a78821d..bdc442e 100644
--- a/frontend-dashboard/pages/agents/[id].tsx
+++ b/frontend-dashboard/pages/agents/[id].tsx
@@ -65,6 +65,7 @@ export default function AgentDetail() {
const [publishDescription, setPublishDescription] = useState("");
const [publishCategory, setPublishCategory] = useState("General");
const [publishIssues, setPublishIssues] = useState([]);
+ const [showRestartBanner, setShowRestartBanner] = useState(false);
const [backendConfig, setBackendConfig] = useState(null);
const [viewerRole, setViewerRole] = useState("user");
const toast = useToast();
@@ -128,7 +129,9 @@ export default function AgentDetail() {
// Refresh immediately when tab becomes visible (e.g. after using Docker Desktop)
useEffect(() => {
- const onVisible = () => { if (document.visibilityState === "visible" && id) refreshAgent(); };
+ const onVisible = () => {
+ if (document.visibilityState === "visible" && id) refreshAgent();
+ };
document.addEventListener("visibilitychange", onVisible);
return () => document.removeEventListener("visibilitychange", onVisible);
}, [id]);
@@ -138,7 +141,7 @@ export default function AgentDetail() {
setDuplicateName(`${agent.name} Copy`);
setPublishName(agent.name);
setPublishDescription(
- `Shared template built from ${agent.name}. Review the included instructions before installing.`
+ `Shared template built from ${agent.name}. Review the included instructions before installing.`,
);
setPublishCategory("General");
}
@@ -164,7 +167,7 @@ export default function AgentDetail() {
setPublishIssues([]);
setPublishName(agent?.name || "Untitled Template");
setPublishDescription(
- `Shared template built from ${agent?.name || "this agent"}. Review the included instructions before installing.`
+ `Shared template built from ${agent?.name || "this agent"}. Review the included instructions before installing.`,
);
setPublishCategory("General");
setShowPublishDialog(true);
@@ -174,22 +177,34 @@ export default function AgentDetail() {
setActionLoading(action);
try {
const endpoint =
- action === "start" ? `/api/agents/${id}/start` :
- action === "stop" ? `/api/agents/${id}/stop` :
- action === "restart" ? `/api/agents/${id}/restart` :
- action === "redeploy" ? `/api/agents/${id}/redeploy` : null;
+ action === "start"
+ ? `/api/agents/${id}/start`
+ : action === "stop"
+ ? `/api/agents/${id}/stop`
+ : action === "restart"
+ ? `/api/agents/${id}/restart`
+ : action === "redeploy"
+ ? `/api/agents/${id}/redeploy`
+ : null;
if (!endpoint) return;
const res = await fetchWithAuth(endpoint, { method: "POST" });
if (res.ok) {
- const statusMap = { start: "running", stop: "stopped", restart: "running", redeploy: "queued" };
+ const statusMap = {
+ start: "running",
+ stop: "stopped",
+ restart: "running",
+ redeploy: "queued",
+ };
setAgent((a) => ({ ...a, status: statusMap[action] || a.status }));
- toast.success(`Agent ${action === "redeploy" ? "re-queued" : action + (action.endsWith("e") ? "d" : "ed")}`);
+ toast.success(
+ `Agent ${action === "redeploy" ? "re-queued" : action + (action.endsWith("e") ? "d" : "ed")}`,
+ );
// Refresh to get authoritative state from server
setTimeout(refreshAgent, 2000);
} else {
const data = await res.json();
- const ref = data.correlationId ? ` (ref: ${data.correlationId.slice(0, 8)})` : '';
+ const ref = data.correlationId ? ` (ref: ${data.correlationId.slice(0, 8)})` : "";
toast.error((data.error || `Failed to ${action} agent`) + ref);
}
} catch (err) {
@@ -250,9 +265,7 @@ export default function AgentDetail() {
name: trimmedName,
clone_mode: duplicateCloneMode,
runtime_family:
- duplicateRuntimeFamily ||
- runtimeFamilyFromConfig(backendConfig)?.id ||
- "openclaw",
+ duplicateRuntimeFamily || runtimeFamilyFromConfig(backendConfig)?.id || "openclaw",
deploy_target: duplicateExecutionTarget,
sandbox_profile: duplicateSandboxProfile || "standard",
}),
@@ -288,9 +301,7 @@ export default function AgentDetail() {
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
runtime_family:
- redeployRuntimeFamily ||
- runtimeFamilyFromConfig(backendConfig)?.id ||
- "openclaw",
+ redeployRuntimeFamily || runtimeFamilyFromConfig(backendConfig)?.id || "openclaw",
deploy_target: redeployExecutionTarget,
sandbox_profile: redeploySandboxProfile || "standard",
}),
@@ -298,8 +309,7 @@ export default function AgentDetail() {
if (res.ok) {
const nextSandboxProfile = redeploySandboxProfile || "standard";
- const nextExecutionTarget =
- redeployExecutionTarget || resolveAgentExecutionTarget(agent);
+ const nextExecutionTarget = redeployExecutionTarget || resolveAgentExecutionTarget(agent);
setShowRedeployDialog(false);
setAgent((current) =>
current
@@ -314,16 +324,13 @@ export default function AgentDetail() {
deploy_target: nextExecutionTarget,
sandbox_profile: nextSandboxProfile,
backend_type: resolveBackendTypeForSelection({
- runtimeFamily:
- redeployRuntimeFamily ||
- current.runtime_family ||
- "openclaw",
+ runtimeFamily: redeployRuntimeFamily || current.runtime_family || "openclaw",
deployTarget: nextExecutionTarget,
sandboxProfile: nextSandboxProfile,
}),
sandbox_type: nextSandboxProfile,
}
- : current
+ : current,
);
toast.success("Agent re-queued");
setTimeout(refreshAgent, 2000);
@@ -331,9 +338,7 @@ export default function AgentDetail() {
}
const data = await res.json().catch(() => ({}));
- const ref = data.correlationId
- ? ` (ref: ${data.correlationId.slice(0, 8)})`
- : "";
+ const ref = data.correlationId ? ` (ref: ${data.correlationId.slice(0, 8)})` : "";
toast.error((data.error || "Failed to redeploy agent") + ref);
} catch (err) {
console.error(err);
@@ -442,10 +447,7 @@ export default function AgentDetail() {
const supportsGateway = runtimeSupportsGateway(runtimeFamily);
useEffect(() => {
- if (
- runtimeFamily === "hermes" &&
- (activeTab === "openclaw" || activeTab === "nemoclaw")
- ) {
+ if (runtimeFamily === "hermes" && (activeTab === "openclaw" || activeTab === "nemoclaw")) {
setActiveTab("overview");
return;
}
@@ -470,7 +472,10 @@ export default function AgentDetail() {
@@ -478,43 +483,42 @@ export default function AgentDetail() {
);
}
- const executionTargetLabel = formatExecutionTargetLabel(
- resolveAgentExecutionTarget(agent)
- );
+ const executionTargetLabel = formatExecutionTargetLabel(resolveAgentExecutionTarget(agent));
const sandboxProfile = resolveAgentSandboxProfile(agent);
const sandboxLabel = formatSandboxProfileLabel(sandboxProfile);
const duplicateActiveExecutionTarget = activeExecutionTargetFromConfig(
backendConfig,
duplicateRuntimeFamily,
- duplicateExecutionTarget
+ duplicateExecutionTarget,
);
const duplicateActiveSandboxOption = activeSandboxOptionFromTarget(
duplicateActiveExecutionTarget,
- duplicateSandboxProfile
+ duplicateSandboxProfile,
);
const redeployActiveExecutionTarget = activeExecutionTargetFromConfig(
backendConfig,
redeployRuntimeFamily,
- redeployExecutionTarget
+ redeployExecutionTarget,
);
const redeployActiveSandboxOption = activeSandboxOptionFromTarget(
redeployActiveExecutionTarget,
- redeploySandboxProfile
- );
- const canDuplicate = Boolean(
- backendConfig && duplicateActiveSandboxOption?.available
- );
- const canRedeploy = Boolean(
- backendConfig && redeployActiveSandboxOption?.available
+ redeploySandboxProfile,
);
+ const canDuplicate = Boolean(backendConfig && duplicateActiveSandboxOption?.available);
+ const canRedeploy = Boolean(backendConfig && redeployActiveSandboxOption?.available);
return (
-
+
{/* Header Bar */}
-
+
@@ -522,7 +526,9 @@ export default function AgentDetail() {
-
{agent.name}
+
+ {agent.name}
+
{agent.id.slice(0, 8)}
@@ -539,15 +545,23 @@ export default function AgentDetail() {
-
+
-
Step 3 of 3 — Validate
+
+ Step 3 of 3 — Validate
+
{agent.status === "running" || agent.status === "warning"
? "Use this agent detail view to prove the runtime works end-to-end."
: "This agent still needs to finish starting before the full validation pass."}
-
+
{agent.status === "running" || agent.status === "warning"
? supportsGateway
? "Check chat, logs, terminal, and the OpenClaw surface from this page before scaling the fleet."
@@ -559,43 +573,71 @@ export default function AgentDetail() {
{supportsGateway ? (
- setActiveTab("openclaw")} className="inline-flex items-center gap-2 px-3 py-2 bg-white border border-slate-200 rounded-xl text-sm font-bold text-slate-800 hover:bg-slate-50 transition-all">
+ setActiveTab("openclaw")}
+ className="inline-flex items-center gap-2 px-3 py-2 bg-white border border-slate-200 rounded-xl text-sm font-bold text-slate-800 hover:bg-slate-50 transition-all"
+ >
OpenClaw
) : (
- setActiveTab("hermes-webui")} className="inline-flex items-center gap-2 px-3 py-2 bg-white border border-slate-200 rounded-xl text-sm font-bold text-slate-800 hover:bg-slate-50 transition-all">
+ setActiveTab("hermes-webui")}
+ className="inline-flex items-center gap-2 px-3 py-2 bg-white border border-slate-200 rounded-xl text-sm font-bold text-slate-800 hover:bg-slate-50 transition-all"
+ >
Hermes WebUI
)}
- setActiveTab("logs")} className="inline-flex items-center gap-2 px-3 py-2 bg-white border border-slate-200 rounded-xl text-sm font-bold text-slate-800 hover:bg-slate-50 transition-all">
+ setActiveTab("logs")}
+ className="inline-flex items-center gap-2 px-3 py-2 bg-white border border-slate-200 rounded-xl text-sm font-bold text-slate-800 hover:bg-slate-50 transition-all"
+ >
Logs
- setActiveTab("files")} className="inline-flex items-center gap-2 px-3 py-2 bg-white border border-slate-200 rounded-xl text-sm font-bold text-slate-800 hover:bg-slate-50 transition-all">
+ setActiveTab("files")}
+ className="inline-flex items-center gap-2 px-3 py-2 bg-white border border-slate-200 rounded-xl text-sm font-bold text-slate-800 hover:bg-slate-50 transition-all"
+ >
Files
setActiveTab(
- agent.status === "running"
- ? "terminal"
- : supportsGateway
- ? "openclaw"
- : "logs"
+ agent.status === "running" ? "terminal" : supportsGateway ? "openclaw" : "logs",
)
}
className="inline-flex items-center gap-2 px-3 py-2 bg-white border border-slate-200 rounded-xl text-sm font-bold text-slate-800 hover:bg-slate-50 transition-all"
>
- {agent.status === "running" ? : supportsGateway ? : }
+ {agent.status === "running" ? (
+
+ ) : supportsGateway ? (
+
+ ) : (
+
+ )}
{agent.status === "running" ? "Terminal" : supportsGateway ? "Chat" : "Logs"}
{/* Tab Bar */}
+ {showRestartBanner ? (
+
+
+ ClawHub Install Complete
+
+
+ New skills were installed for this agent.
+
+
+ Restart your agent session to activate them in the next OpenClaw session.
+
+
+ ) : null}
+
{/* Tab Content */}
-
+
{activeTab === "overview" && (
}
- {activeTab === "files" && (
-
- )}
+ {activeTab === "files" &&
}
{/* Terminal — always mounted when agent is running, hidden via CSS when not active */}
{agent.status === "running" ? (
@@ -647,7 +689,8 @@ export default function AgentDetail() {
- Terminal available when agent is running
+ Terminal available when agent is{" "}
+ running
Agent is currently {agent.status}
@@ -665,15 +708,16 @@ export default function AgentDetail() {
visibility: activeTab === "logs" ? "visible" : "hidden",
}}
>
-
+
{activeTab === "openclaw" && supportsGateway && (
-
+
setShowRestartBanner(true)}
+ />
)}
{activeTab === "hermes-webui" && runtimeFamily === "hermes" && (
@@ -767,7 +811,8 @@ export default function AgentDetail() {
const CLONE_MODE_COPY = {
files_only: "Copies only the OpenClaw agent files.",
files_plus_memory: "Copies the agent files plus OpenClaw workspace and session memory.",
- full_clone: "Copies files, memory, and Nora wiring structure. Secrets are stripped and must be reconnected.",
+ full_clone:
+ "Copies files, memory, and Nora wiring structure. Secrets are stripped and must be reconnected.",
};
function DuplicateAgentDialog({
@@ -806,17 +851,20 @@ function DuplicateAgentDialog({
-
+
Duplicate Agent
- Create a new agent from {sourceName} . Wiring structure can be copied, but secrets stay disconnected.
+ Create a new agent from{" "}
+ {sourceName} . Wiring structure
+ can be copied, but secrets stay disconnected.
-
+
@@ -921,10 +969,15 @@ function RedeployAgentDialog({
Redeploy Agent
- Re-queue {agentName} and choose the runtime path it should use next.
+ Re-queue {agentName} and choose
+ the runtime path it should use next.
-
+
@@ -993,24 +1046,29 @@ function PublishMarketplaceDialog({
-
+
Publish to Marketplace
- Share {sourceName} as a community template. Nora publishes only the template files and runs a secret scan before submission.
+ Share {sourceName} as a
+ community template. Nora publishes only the template files and runs a secret scan
+ before submission.
-
+
{issues.length > 0 && (
-
Publish blocked
+
+ Publish blocked
+
{issues.map((issue, index) => (
@@ -1070,7 +1128,9 @@ function PublishMarketplaceDialog({
- Credentials, session memory, integrations, and channels are not published. If Nora detects `.env`, token-like values, or private keys, the submission is blocked until you remove them.
+ Credentials, session memory, integrations, and channels are not published. If Nora detects
+ `.env`, token-like values, or private keys, the submission is blocked until you remove
+ them.
diff --git a/frontend-dashboard/pages/clawhub/index.tsx b/frontend-dashboard/pages/clawhub/index.tsx
new file mode 100644
index 0000000..ff7881b
--- /dev/null
+++ b/frontend-dashboard/pages/clawhub/index.tsx
@@ -0,0 +1,435 @@
+import { useEffect, useMemo, useRef, useState } from "react";
+import { useRouter } from "next/router";
+import { Boxes, RefreshCw } from "lucide-react";
+import Layout from "../../components/layout/Layout";
+import { useToast } from "../../components/Toast";
+import { fetchWithAuth } from "../../lib/api";
+import {
+ clearDeployDraft,
+ DeployClawHubSkill,
+ DeployDraft,
+ loadDeployDraft,
+ normalizeDeployDraftResources,
+ saveDeployDraft,
+} from "../../lib/clawhubDeploy";
+import SkillDetailPanel, {
+ SkillDetail,
+ SkillDetailActionState,
+} from "../../components/agents/openclaw/SkillDetailPanel";
+import SkillGrid from "../../components/agents/openclaw/SkillGrid";
+import SkillSearchBar from "../../components/agents/openclaw/SkillSearchBar";
+import SkillSelectionTray from "../../components/agents/openclaw/SkillSelectionTray";
+import { SkillSummary } from "../../components/agents/openclaw/SkillCard";
+
+type SkillListResponse = {
+ skills?: SkillSummary[];
+ cursor?: string | null;
+ error?: string;
+ message?: string;
+};
+
+function buildSelectedSkill(detail: SkillDetail): DeployClawHubSkill {
+ return {
+ source: "clawhub",
+ installSlug: detail.slug,
+ author: detail.author || "",
+ pagePath: detail.pagePath || (detail.author ? `${detail.author}/${detail.slug}` : detail.slug),
+ installedAt: new Date().toISOString(),
+ name: detail.name,
+ description: detail.description,
+ };
+}
+
+export default function ClawHubDeployPage() {
+ const router = useRouter();
+ const toast = useToast();
+ const [draft, setDraft] = useState
(null);
+ const [query, setQuery] = useState("");
+ const [skills, setSkills] = useState([]);
+ const [loading, setLoading] = useState(true);
+ const [error, setError] = useState(null);
+ const [selectedSkill, setSelectedSkill] = useState(null);
+ const [selectedSkillDetail, setSelectedSkillDetail] = useState(null);
+ const [detailLoading, setDetailLoading] = useState(false);
+ const [detailError, setDetailError] = useState(null);
+ const [selectedSkills, setSelectedSkills] = useState([]);
+ const [selectionBusySlug, setSelectionBusySlug] = useState(null);
+ const [deploying, setDeploying] = useState(false);
+ const requestIdRef = useRef(0);
+ const detailCacheRef = useRef>({});
+
+ const showingDefaultBrowseEmptyState = !query.trim() && !loading && !error && skills.length === 0;
+ const selectedSkillKeys = useMemo(
+ () => new Set(selectedSkills.map((skill) => `${skill.author}:${skill.installSlug}`)),
+ [selectedSkills],
+ );
+ const selectedSkillSlugs = useMemo(
+ () => new Set(selectedSkills.map((skill) => skill.installSlug)),
+ [selectedSkills],
+ );
+ const selectedCurrentSkill = selectedSkillDetail
+ ? selectedSkillKeys.has(`${selectedSkillDetail.author || ""}:${selectedSkillDetail.slug}`)
+ : false;
+
+ useEffect(() => {
+ const nextDraft = loadDeployDraft();
+ if (!nextDraft) {
+ toast.error("Start from the deploy page before choosing ClawHub skills.");
+ router.replace("/deploy");
+ return;
+ }
+
+ setDraft(nextDraft);
+ setSelectedSkills(Array.isArray(nextDraft.clawhubSkills) ? nextDraft.clawhubSkills : []);
+ }, [router, toast]);
+
+ useEffect(() => {
+ if (!draft) return;
+ saveDeployDraft({
+ ...draft,
+ clawhubSkills: selectedSkills,
+ });
+ }, [draft, selectedSkills]);
+
+ async function loadBrowseResults() {
+ const requestId = ++requestIdRef.current;
+ setLoading(true);
+ setError(null);
+
+ try {
+ const res = await fetchWithAuth("/api/clawhub/skills");
+ const data: SkillListResponse = await res.json();
+ if (requestId !== requestIdRef.current) return;
+
+ if (!res.ok) {
+ throw new Error(
+ data.message || data.error || "Could not load skills. ClawHub may be unavailable.",
+ );
+ }
+
+ setSkills(Array.isArray(data.skills) ? data.skills : []);
+ } catch (err: any) {
+ if (requestId !== requestIdRef.current) return;
+ setSkills([]);
+ setError(err?.message || "Could not load skills. ClawHub may be unavailable.");
+ } finally {
+ if (requestId === requestIdRef.current) {
+ setLoading(false);
+ }
+ }
+ }
+
+ async function searchSkills() {
+ const trimmed = query.trim();
+ if (!trimmed) {
+ loadBrowseResults();
+ return;
+ }
+
+ const requestId = ++requestIdRef.current;
+ setLoading(true);
+ setError(null);
+
+ try {
+ const res = await fetchWithAuth(
+ `/api/clawhub/skills/search?q=${encodeURIComponent(trimmed)}`,
+ );
+ const data: SkillListResponse = await res.json();
+ if (requestId !== requestIdRef.current) return;
+
+ if (!res.ok) {
+ throw new Error(
+ data.message || data.error || "Could not load skills. ClawHub may be unavailable.",
+ );
+ }
+
+ setSkills(Array.isArray(data.skills) ? data.skills : []);
+ } catch (err: any) {
+ if (requestId !== requestIdRef.current) return;
+ setSkills([]);
+ setError(err?.message || "Could not load skills. ClawHub may be unavailable.");
+ } finally {
+ if (requestId === requestIdRef.current) {
+ setLoading(false);
+ }
+ }
+ }
+
+ async function fetchSkillDetail(skill: SkillSummary) {
+ const cached = detailCacheRef.current[skill.slug];
+ if (cached) {
+ return cached;
+ }
+
+ const res = await fetchWithAuth(`/api/clawhub/skills/${encodeURIComponent(skill.slug)}`);
+ const data = await res.json();
+
+ if (!res.ok) {
+ throw new Error(data.message || data.error || "Could not load skill details.");
+ }
+
+ detailCacheRef.current[skill.slug] = data;
+ return data as SkillDetail;
+ }
+
+ async function loadSkillDetail(skill: SkillSummary) {
+ setSelectedSkill(skill);
+ setSelectedSkillDetail(detailCacheRef.current[skill.slug] || null);
+ setDetailError(null);
+ setDetailLoading(true);
+
+ try {
+ const detail = await fetchSkillDetail(skill);
+ setSelectedSkillDetail(detail);
+ } catch (err: any) {
+ setDetailError(err?.message || "Could not load skill details.");
+ } finally {
+ setDetailLoading(false);
+ }
+ }
+
+ function addSelectedSkill(detail: SkillDetail) {
+ const nextSkill = buildSelectedSkill(detail);
+ const nextKey = `${nextSkill.author}:${nextSkill.installSlug}`;
+ setSelectedSkills((current) => {
+ if (current.some((skill) => `${skill.author}:${skill.installSlug}` === nextKey)) {
+ return current;
+ }
+ return [...current, nextSkill];
+ });
+ }
+
+ function removeSelectedSkill(skill: SkillSummary | DeployClawHubSkill | SkillDetail) {
+ const installSlug = "installSlug" in skill ? skill.installSlug : skill.slug;
+ const author = "author" in skill ? skill.author || "" : "";
+ setSelectedSkills((current) =>
+ current.filter((entry) => !(entry.installSlug === installSlug && entry.author === author)),
+ );
+ }
+
+ function clearSelectedSkills() {
+ setSelectedSkills([]);
+ }
+
+ async function toggleSkillSelection(skill: SkillSummary) {
+ const cached = detailCacheRef.current[skill.slug];
+ const cachedKey = `${cached?.author || ""}:${skill.slug}`;
+ if (cached && selectedSkillKeys.has(cachedKey)) {
+ removeSelectedSkill(cached);
+ return;
+ }
+
+ setSelectionBusySlug(skill.slug);
+ try {
+ const detail = cached || (await fetchSkillDetail(skill));
+ const detailKey = `${detail.author || ""}:${detail.slug}`;
+ if (selectedSkillKeys.has(detailKey)) {
+ removeSelectedSkill(detail);
+ } else {
+ addSelectedSkill(detail);
+ }
+ } catch (err: any) {
+ toast.error(err?.message || "Could not select that skill.");
+ } finally {
+ setSelectionBusySlug(null);
+ }
+ }
+
+ function handleQueryChange(value: string) {
+ setQuery(value);
+ if (!value.trim()) {
+ setSelectedSkill(null);
+ setSelectedSkillDetail(null);
+ setDetailError(null);
+ loadBrowseResults();
+ }
+ }
+
+ function handleClearSearch() {
+ setQuery("");
+ setSelectedSkill(null);
+ setSelectedSkillDetail(null);
+ setDetailError(null);
+ loadBrowseResults();
+ }
+
+ async function handleDeploy() {
+ if (!draft) return;
+
+ const normalizedResources = normalizeDeployDraftResources(draft);
+
+ setDeploying(true);
+ try {
+ const res = await fetchWithAuth("/api/agents/deploy", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ name: draft.name,
+ runtime_family: draft.runtimeFamily,
+ deploy_target: draft.deployTarget,
+ sandbox_profile: draft.sandboxProfile || "standard",
+ ...(draft.containerName.trim() ? { container_name: draft.containerName.trim() } : {}),
+ ...(draft.model ? { model: draft.model } : {}),
+ ...(draft.deploymentMode === "migrate" && draft.migrationDraft?.id
+ ? { migration_draft_id: draft.migrationDraft.id }
+ : {}),
+ ...(draft.vcpu ? { vcpu: normalizedResources.vcpu } : {}),
+ ...(draft.ramMb ? { ram_mb: normalizedResources.ramMb } : {}),
+ ...(draft.diskGb ? { disk_gb: normalizedResources.diskGb } : {}),
+ clawhub_skills: selectedSkills.map((skill) => ({
+ source: "clawhub",
+ installSlug: skill.installSlug,
+ author: skill.author,
+ pagePath: skill.pagePath,
+ installedAt: skill.installedAt,
+ })),
+ }),
+ });
+
+ if (res.ok) {
+ const data = await res.json();
+ clearDeployDraft();
+ window.location.href = data?.id ? `/app/agents/${data.id}` : "/app/agents";
+ return;
+ }
+
+ if (res.status === 402) {
+ toast.error("You've reached your plan's agent limit. Please upgrade.");
+ } else {
+ const data = await res.json().catch(() => ({}));
+ toast.error(data.error || "Deployment failed. Please try again.");
+ }
+ } catch (err) {
+ console.error(err);
+ toast.error("Network error during deployment.");
+ } finally {
+ setDeploying(false);
+ }
+ }
+
+ function handleBack() {
+ if (!draft) {
+ router.push("/deploy");
+ return;
+ }
+
+ saveDeployDraft({
+ ...draft,
+ clawhubSkills: selectedSkills,
+ });
+ router.push("/deploy");
+ }
+
+ useEffect(() => {
+ if (!draft) return;
+ loadBrowseResults();
+ }, [draft]);
+
+ const detailActionState: SkillDetailActionState | undefined = selectedSkillDetail
+ ? {
+ label: selectedCurrentSkill ? "Remove from selection" : "Add to selection",
+ disabled: Boolean(selectionBusySlug && selectionBusySlug !== selectedSkillDetail.slug),
+ loading: selectionBusySlug === selectedSkillDetail.slug,
+ onClick: () => {
+ if (selectedCurrentSkill) {
+ removeSelectedSkill(selectedSkillDetail);
+ return;
+ }
+ addSelectedSkill(selectedSkillDetail);
+ },
+ }
+ : undefined;
+
+ return (
+
+
+
+
+
+
+
+ ClawHub Selection
+
+
+ Choose skills for this new agent
+
+
+ Search ClawHub, inspect each skill’s README and requirements, and attach only the
+ skills you want saved on this agent at deploy time.
+
+
+
+
+
+ Refresh
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {
+ setSelectedSkill(null);
+ setSelectedSkillDetail(null);
+ setDetailError(null);
+ setDetailLoading(false);
+ }}
+ />
+
+
+
+
+ );
+}
diff --git a/frontend-dashboard/pages/deploy/index.tsx b/frontend-dashboard/pages/deploy/index.tsx
index 33eeedf..44f6fee 100644
--- a/frontend-dashboard/pages/deploy/index.tsx
+++ b/frontend-dashboard/pages/deploy/index.tsx
@@ -1,5 +1,6 @@
import Layout from "../../components/layout/Layout";
import { useState, useEffect, useMemo, useRef } from "react";
+import { useRouter } from "next/router";
import {
Rocket,
Server,
@@ -35,6 +36,11 @@ import {
visibleExecutionTargetsFromConfig,
visibleRuntimeFamiliesFromConfig,
} from "../../lib/runtime";
+import {
+ loadDeployDraft,
+ normalizeDeployDraftResources,
+ saveDeployDraft,
+} from "../../lib/clawhubDeploy";
function slugifyName(value) {
return value
@@ -62,7 +68,7 @@ function MaturityBadge({ maturityTier = "ga", maturityLabel = "GA" }) {
return (
{maturityLabel}
@@ -95,13 +101,16 @@ function formatDateTime(value) {
}
function formatMigrationTransportLabel(value) {
- const normalized = String(value || "").trim().toLowerCase();
+ const normalized = String(value || "")
+ .trim()
+ .toLowerCase();
if (normalized === "ssh") return "SSH";
if (normalized === "docker") return "Docker";
return "Bundle";
}
export default function Deploy() {
+ const router = useRouter();
const [name, setName] = useState("");
const [containerName, setContainerName] = useState("");
const [loading, setLoading] = useState(false);
@@ -116,19 +125,41 @@ export default function Deploy() {
const [migrationMethod, setMigrationMethod] = useState("upload");
const [migrationDraft, setMigrationDraft] = useState(null);
const [migrationBusyAction, setMigrationBusyAction] = useState("");
- const [migrationSource, setMigrationSource] = useState(() =>
- createEmptyMigrationSource()
- );
+ const [migrationSource, setMigrationSource] = useState(() => createEmptyMigrationSource());
const [platformConfig, setPlatformConfig] = useState(null);
const [viewerRole, setViewerRole] = useState("user");
const migrationUploadInputRef = useRef(null);
const [selVcpu, setSelVcpu] = useState(1);
const [selRam, setSelRam] = useState(1024);
const [selDisk, setSelDisk] = useState(10);
+ const deployDraftHydratedRef = useRef(false);
+ const deployDraftRef = useRef(null);
const resourceDefaultsInitializedRef = useRef(false);
const resourceSelectionDirtyRef = useRef(false);
const toast = useToast();
+ useEffect(() => {
+ if (deployDraftHydratedRef.current) return;
+ const draft = loadDeployDraft();
+ if (!draft) {
+ deployDraftHydratedRef.current = true;
+ return;
+ }
+
+ deployDraftRef.current = draft;
+ setName(draft.name || "");
+ setContainerName(draft.containerName || "");
+ setSelectedRuntimeFamily(draft.runtimeFamily || "");
+ setSelectedExecutionTarget(draft.deployTarget || "");
+ setSelectedSandboxProfile(draft.sandboxProfile || "");
+ setSelectedModel(draft.model || "");
+ setDeploymentMode(draft.deploymentMode || "blank");
+ setMigrationMethod(draft.migrationMethod || "upload");
+ setMigrationDraft(draft.migrationDraft || null);
+ setMigrationSource(draft.migrationSource || createEmptyMigrationSource());
+ deployDraftHydratedRef.current = true;
+ }, []);
+
useEffect(() => {
fetchWithAuth("/api/billing/subscription")
.then((r) => r.json())
@@ -159,48 +190,63 @@ export default function Deploy() {
};
useEffect(() => {
- if (
- !platformConfig?.deploymentDefaults ||
- resourceDefaultsInitializedRef.current ||
- resourceSelectionDirtyRef.current
- ) {
+ if (!platformConfig?.deploymentDefaults || resourceDefaultsInitializedRef.current) {
return;
}
- setSelVcpu(deploymentDefaults.vcpu);
- setSelRam(deploymentDefaults.ram_mb);
- setSelDisk(deploymentDefaults.disk_gb);
+ if (deployDraftRef.current) {
+ const normalizedResources = normalizeDeployDraftResources(deployDraftRef.current, {
+ defaultVcpu: deploymentDefaults.vcpu,
+ defaultRamMb: deploymentDefaults.ram_mb,
+ defaultDiskGb: deploymentDefaults.disk_gb,
+ maxVcpu: platformConfig?.selfhosted?.max_vcpu || 16,
+ maxRamMb: platformConfig?.selfhosted?.max_ram_mb || 32768,
+ maxDiskGb: platformConfig?.selfhosted?.max_disk_gb || 500,
+ });
+
+ setSelVcpu(normalizedResources.vcpu);
+ setSelRam(normalizedResources.ramMb);
+ setSelDisk(normalizedResources.diskGb);
+ resourceSelectionDirtyRef.current = true;
+ } else {
+ setSelVcpu(deploymentDefaults.vcpu);
+ setSelRam(deploymentDefaults.ram_mb);
+ setSelDisk(deploymentDefaults.disk_gb);
+ }
+
resourceDefaultsInitializedRef.current = true;
}, [deploymentDefaults, platformConfig?.deploymentDefaults]);
const isSelfHosted = platformConfig?.mode !== "paas";
const plan = sub?.plan || "free";
const planLabel = isSelfHosted ? "Self-hosted" : plan.charAt(0).toUpperCase() + plan.slice(1);
- const limit = isSelfHosted ? (platformConfig?.selfhosted?.max_agents || 50) : (sub?.agent_limit || 3);
+ const limit = isSelfHosted ? platformConfig?.selfhosted?.max_agents || 50 : sub?.agent_limit || 3;
const atLimit = agentCount >= limit;
const isAdmin = viewerRole === "admin";
const runtimeFamilyLocked =
deploymentMode === "migrate"
- ? String(migrationDraft?.runtimeFamily || "").trim().toLowerCase()
+ ? String(migrationDraft?.runtimeFamily || "")
+ .trim()
+ .toLowerCase()
: "";
const defaultRuntimeFamily = useMemo(
() => runtimeFamilyFromConfig(backendConfig),
- [backendConfig]
+ [backendConfig],
);
const activeRuntimeFamily = useMemo(
() => runtimeFamilyFromConfig(backendConfig, selectedRuntimeFamily),
- [backendConfig, selectedRuntimeFamily]
+ [backendConfig, selectedRuntimeFamily],
);
const visibleRuntimeFamilies = useMemo(
() => visibleRuntimeFamiliesFromConfig(backendConfig, viewerRole),
- [backendConfig, viewerRole]
+ [backendConfig, viewerRole],
);
const visibleExecutionTargets = useMemo(
() =>
visibleExecutionTargetsFromConfig(
backendConfig,
viewerRole,
- runtimeFamilyLocked || activeRuntimeFamily?.id || selectedRuntimeFamily
+ runtimeFamilyLocked || activeRuntimeFamily?.id || selectedRuntimeFamily,
),
[
backendConfig,
@@ -208,14 +254,14 @@ export default function Deploy() {
runtimeFamilyLocked,
activeRuntimeFamily?.id,
selectedRuntimeFamily,
- ]
+ ],
);
const activeExecutionTarget = useMemo(
() =>
activeExecutionTargetFromConfig(
backendConfig,
runtimeFamilyLocked || activeRuntimeFamily?.id || selectedRuntimeFamily,
- selectedExecutionTarget
+ selectedExecutionTarget,
),
[
backendConfig,
@@ -223,7 +269,7 @@ export default function Deploy() {
activeRuntimeFamily?.id,
selectedRuntimeFamily,
selectedExecutionTarget,
- ]
+ ],
);
const visibleSandboxOptions = useMemo(() => {
const sandboxProfiles = activeExecutionTarget?.sandboxProfiles || [];
@@ -236,18 +282,18 @@ export default function Deploy() {
const activeSandboxOption = useMemo(
() =>
(activeExecutionTarget?.sandboxProfiles || []).find(
- (profile) => profile.id === selectedSandboxProfile
+ (profile) => profile.id === selectedSandboxProfile,
) || null,
- [activeExecutionTarget, selectedSandboxProfile]
+ [activeExecutionTarget, selectedSandboxProfile],
);
const ramOptions = useMemo(() => {
const maxRam = platformConfig?.selfhosted?.max_ram_mb || 32768;
return Array.from(
new Set(
[selRam, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536].filter(
- (value) => value <= maxRam || value === selRam
- )
- )
+ (value) => value <= maxRam || value === selRam,
+ ),
+ ),
).sort((left, right) => left - right);
}, [platformConfig?.selfhosted?.max_ram_mb, selRam]);
const diskOptions = useMemo(() => {
@@ -255,9 +301,9 @@ export default function Deploy() {
return Array.from(
new Set(
[selDisk, 10, 20, 50, 100, 200, 500, 1000].filter(
- (value) => value <= maxDisk || value === selDisk
- )
- )
+ (value) => value <= maxDisk || value === selDisk,
+ ),
+ ),
).sort((left, right) => left - right);
}, [platformConfig?.selfhosted?.max_disk_gb, selDisk]);
const canDeployExecutionTarget = Boolean(activeSandboxOption?.available);
@@ -274,20 +320,11 @@ export default function Deploy() {
const suggestedContainerName = useMemo(() => {
const slug = slugifyName(name);
const prefix = containerNamePrefixForSelection({
- runtimeFamily:
- effectiveRuntimeFamily,
- sandboxProfile:
- selectedSandboxProfile ||
- activeSandboxOption?.id ||
- "standard",
+ runtimeFamily: effectiveRuntimeFamily,
+ sandboxProfile: selectedSandboxProfile || activeSandboxOption?.id || "standard",
});
return slug ? `${prefix}-${slug}` : `${prefix}-my-first-agent`;
- }, [
- activeSandboxOption?.id,
- effectiveRuntimeFamily,
- name,
- selectedSandboxProfile,
- ]);
+ }, [activeSandboxOption?.id, effectiveRuntimeFamily, name, selectedSandboxProfile]);
useEffect(() => {
if (!runtimeFamilyLocked) return;
@@ -307,7 +344,7 @@ export default function Deploy() {
const nextRuntimeFamily = pickRuntimeFamilySelection(
backendConfig,
viewerRole,
- selectedRuntimeFamily
+ selectedRuntimeFamily,
);
if (nextRuntimeFamily && nextRuntimeFamily !== selectedRuntimeFamily) {
setSelectedRuntimeFamily(nextRuntimeFamily);
@@ -320,7 +357,7 @@ export default function Deploy() {
backendConfig,
viewerRole,
selectedExecutionTarget,
- runtimeFamilyLocked || activeRuntimeFamily?.id || selectedRuntimeFamily
+ runtimeFamilyLocked || activeRuntimeFamily?.id || selectedRuntimeFamily,
);
if (nextTarget && nextTarget !== selectedExecutionTarget) {
setSelectedExecutionTarget(nextTarget);
@@ -336,28 +373,21 @@ export default function Deploy() {
useEffect(() => {
const candidateSandboxProfiles = isAdmin
- ? (activeExecutionTarget?.sandboxProfiles || []).filter(
- (profile) => profile.enabled
- )
+ ? (activeExecutionTarget?.sandboxProfiles || []).filter((profile) => profile.enabled)
: visibleSandboxOptions;
if (!candidateSandboxProfiles.length) return;
const current = candidateSandboxProfiles.find(
- (profile) => profile.id === selectedSandboxProfile
+ (profile) => profile.id === selectedSandboxProfile,
);
const nextSandboxProfile =
current ||
- candidateSandboxProfiles.find(
- (profile) => profile.available && profile.isDefault
- ) ||
+ candidateSandboxProfiles.find((profile) => profile.available && profile.isDefault) ||
candidateSandboxProfiles.find((profile) => profile.available) ||
candidateSandboxProfiles[0] ||
null;
- if (
- nextSandboxProfile &&
- nextSandboxProfile.id !== selectedSandboxProfile
- ) {
+ if (nextSandboxProfile && nextSandboxProfile.id !== selectedSandboxProfile) {
setSelectedSandboxProfile(nextSandboxProfile.id);
}
@@ -376,44 +406,45 @@ export default function Deploy() {
visibleSandboxOptions,
]);
- async function deploy() {
+ function goToClawHubSelection() {
if (atLimit) return;
if (deploymentMode === "migrate" && !migrationDraft?.id) {
toast.error("Prepare a migration draft before deploying.");
return;
}
- setLoading(true);
- try {
- const res = await fetchWithAuth("/api/agents/deploy", {
- method: "POST",
- headers: { "Content-Type": "application/json" },
- body: JSON.stringify({
- name,
- runtime_family: effectiveRuntimeFamily,
- deploy_target: selectedExecutionTarget,
- sandbox_profile: selectedSandboxProfile || "standard",
- ...(containerName.trim() ? { container_name: containerName.trim() } : {}),
- ...(isNemoClaw && selectedModel ? { model: selectedModel } : {}),
- ...(deploymentMode === "migrate" && migrationDraft?.id
- ? { migration_draft_id: migrationDraft.id }
- : {}),
- ...(isSelfHosted ? { vcpu: selVcpu, ram_mb: selRam, disk_gb: selDisk } : {}),
- }),
- });
- if (res.ok) {
- const data = await res.json();
- window.location.href = data?.id ? `/app/agents/${data.id}` : "/app/agents";
- } else if (res.status === 402) {
- toast.error("You've reached your plan's agent limit. Please upgrade.");
- } else {
- const data = await res.json().catch(() => ({}));
- toast.error(data.error || "Deployment failed. Please try again.");
- }
- } catch (err) {
- console.error(err);
- toast.error("Network error during deployment.");
- }
- setLoading(false);
+ const normalizedResources = normalizeDeployDraftResources(
+ {
+ vcpu: selVcpu,
+ ramMb: selRam,
+ diskGb: selDisk,
+ } as any,
+ {
+ defaultVcpu: deploymentDefaults.vcpu,
+ defaultRamMb: deploymentDefaults.ram_mb,
+ defaultDiskGb: deploymentDefaults.disk_gb,
+ maxVcpu: platformConfig?.selfhosted?.max_vcpu || 16,
+ maxRamMb: platformConfig?.selfhosted?.max_ram_mb || 32768,
+ maxDiskGb: platformConfig?.selfhosted?.max_disk_gb || 500,
+ },
+ );
+
+ saveDeployDraft({
+ name,
+ containerName,
+ runtimeFamily: effectiveRuntimeFamily,
+ deployTarget: selectedExecutionTarget,
+ sandboxProfile: selectedSandboxProfile || "standard",
+ model: isNemoClaw && selectedModel ? selectedModel : "",
+ deploymentMode,
+ migrationMethod,
+ migrationDraft,
+ migrationSource,
+ vcpu: isSelfHosted ? normalizedResources.vcpu : 0,
+ ramMb: isSelfHosted ? normalizedResources.ramMb : 0,
+ diskGb: isSelfHosted ? normalizedResources.diskGb : 0,
+ clawhubSkills: loadDeployDraft()?.clawhubSkills || [],
+ });
+ router.push("/clawhub");
}
async function uploadMigrationFile(file) {
@@ -454,7 +485,9 @@ export default function Deploy() {
}
async function inspectLiveMigrationSource() {
- const transport = String(migrationSource.transport || "").trim().toLowerCase();
+ const transport = String(migrationSource.transport || "")
+ .trim()
+ .toLowerCase();
const runtimeFamily = runtimeFamilyLocked || effectiveRuntimeFamily;
if (transport === "docker" && !migrationSource.container.trim()) {
@@ -536,10 +569,9 @@ export default function Deploy() {
setMigrationBusyAction("discard");
try {
- const res = await fetchWithAuth(
- `/api/agent-migrations/${migrationDraft.id}`,
- { method: "DELETE" }
- );
+ const res = await fetchWithAuth(`/api/agent-migrations/${migrationDraft.id}`, {
+ method: "DELETE",
+ });
const data = await res.json().catch(() => ({}));
if (!res.ok) {
throw new Error(data.error || "Failed to discard migration draft");
@@ -603,9 +635,7 @@ export default function Deploy() {
- {deploymentMode === "migrate"
- ? "Migrate Existing Agent"
- : "Deploy New Agent"}
+ {deploymentMode === "migrate" ? "Migrate Existing Agent" : "Deploy New Agent"}
{deploymentMode === "migrate"
@@ -618,7 +648,9 @@ export default function Deploy() {
-
Fast path to activation
+
+ Fast path to activation
+
{deploymentMode === "migrate"
? "This flow does not adopt the old runtime in place. Nora inspects the source, stores a migration draft, then recreates the workload as a Nora-managed agent so files, managed secrets, and runtime validation all land in one control surface."
@@ -629,16 +661,24 @@ export default function Deploy() {
-
+
- {atLimit ?
:
}
+ {atLimit ? (
+
+ ) : (
+
+ )}
{planLabel} Plan — {agentCount}/{limit} agents used
{atLimit
- ? (isSelfHosted ? "Contact your administrator to increase the limit." : "Upgrade your plan to deploy more agents.")
+ ? isSelfHosted
+ ? "Contact your administrator to increase the limit."
+ : "Upgrade your plan to deploy more agents."
: `${limit - agentCount} deployment slot${limit - agentCount !== 1 ? "s" : ""} remaining.`}
@@ -676,9 +716,9 @@ export default function Deploy() {
Start clean or recreate an existing runtime under Nora.
- Blank deploy provisions a fresh agent. Migrate existing inspects an
- OpenClaw or Hermes runtime, previews the import surface, then deploys
- a new Nora-managed agent from that draft.
+ Blank deploy provisions a fresh agent. Migrate existing inspects an OpenClaw
+ or Hermes runtime, previews the import surface, then deploys a new
+ Nora-managed agent from that draft.
{migrationDraft ? (
@@ -810,10 +850,10 @@ export default function Deploy() {
Import an existing Nora bundle or OpenClaw template snapshot.
- Upload Nora migration bundles, Nora legacy template JSON, or
- previous exports from another Nora control plane. Nora will
- parse the package, summarize the managed state, and keep the
- source runtime family aligned for deploy.
+ Upload Nora migration bundles, Nora legacy template JSON, or previous
+ exports from another Nora control plane. Nora will parse the package,
+ summarize the managed state, and keep the source runtime family aligned
+ for deploy.
- {activeRuntimeFamily?.label ||
- formatRuntimeFamilyLabel(effectiveRuntimeFamily)}
+ {activeRuntimeFamily?.label || formatRuntimeFamilyLabel(effectiveRuntimeFamily)}
{runtimeFamilyLocked
@@ -1155,9 +1194,7 @@ export default function Deploy() {
disabled={!isAvailable}
>
-
- {family.label}
-
+ {family.label}
{family.contractStatusLabel}
@@ -1176,7 +1213,10 @@ export default function Deploy() {
) : null}
- Container Name (optional)
+ Container Name{" "}
+
+ (optional)
+
-
Execution Target
-
2 ? "md:grid-cols-2" : "md:grid-cols-2"} gap-3`}>
+
+ Execution Target
+
+
2 ? "md:grid-cols-2" : "md:grid-cols-2"} gap-3`}
+ >
{visibleExecutionTargets.map((target) => {
const Icon = executionTargetIcon(target.id);
const isSelected = selectedExecutionTarget === target.id;
@@ -1215,18 +1259,14 @@ export default function Deploy() {
size={16}
className={!isAvailable ? "text-slate-400" : "text-blue-600"}
/>
-
- {target.label}
-
+ {target.label}
-
- {target.summary}
-
+
{target.summary}
{target.runtimeFamilyLabel || "OpenClaw"}
@@ -1242,7 +1282,9 @@ export default function Deploy() {
)}
{!isAvailable && target.issue ? (
-
{target.issue}
+
+ {target.issue}
+
) : null}
);
@@ -1259,7 +1301,9 @@ export default function Deploy() {
{showSandboxSelection && (
-
Sandbox
+
+ Sandbox
+
{visibleSandboxOptions.map((profile) => {
const Icon = sandboxIcon(profile.id);
@@ -1322,7 +1366,9 @@ export default function Deploy() {
{isNemoClaw && activeSandboxOption?.models?.length > 0 && (
-
Nemotron Model
+
+ Nemotron Model
+
{activeSandboxOption.models.map((model) => (
- {model.replace("nvidia/", "")}
+
+ {model.replace("nvidia/", "")}
+
))}
- Deny-by-default network
- Capability-restricted
+
+ Deny-by-default network
+
+
+ Capability-restricted
+
)}
@@ -1357,8 +1409,13 @@ export default function Deploy() {
}}
className="text-xl font-black text-slate-900 bg-transparent outline-none"
>
- {Array.from({ length: platformConfig?.selfhosted?.max_vcpu || 16 }, (_, i) => i + 1).map((v) => (
-
{v}
+ {Array.from(
+ { length: platformConfig?.selfhosted?.max_vcpu || 16 },
+ (_, i) => i + 1,
+ ).map((v) => (
+
+ {v}
+
))}
) : (
@@ -1425,7 +1482,7 @@ export default function Deploy() {
- {loading ? : }
+ {loading ? (
+
+ ) : (
+
+ )}
{atLimit
? "Agent Limit Reached"
: deploymentMode === "migrate" && !migrationDraft?.id
? "Prepare Migration Draft First"
- : !canDeployExecutionTarget
- ? "Selected Runtime Path Unavailable"
- : deploymentMode === "migrate"
- ? "Recreate Agent In Nora & Open Validation"
- : "Deploy Agent & Open Validation"}
+ : !canDeployExecutionTarget
+ ? "Selected Runtime Path Unavailable"
+ : deploymentMode === "migrate"
+ ? "Next: Choose Skills"
+ : "Next: Choose Skills"}
-
- {isNemoClaw ?
:
}
+
+ {isNemoClaw ? (
+
+ ) : (
+
+ )}
-
+
{deploymentMode === "migrate"
? "Destination Runtime Summary"
: "Runtime Path Summary"}
-
+
{activeSandboxOption?.detail ||
activeExecutionTarget?.detail ||
"Select an enabled execution target to see the runtime summary."}
-
- {(activeExecutionTarget?.runtimeFamilyLabel || activeRuntimeFamily?.label || defaultRuntimeFamily?.label || "OpenClaw") +
+
+ {(activeExecutionTarget?.runtimeFamilyLabel ||
+ activeRuntimeFamily?.label ||
+ defaultRuntimeFamily?.label ||
+ "OpenClaw") +
" runtime" +
" • " +
(activeExecutionTarget?.label || "Docker") +
@@ -1473,25 +1552,35 @@ export default function Deploy() {
((activeSandboxOption?.label || "Standard") + " sandbox")}
{isAdmin && activeExecutionTarget?.maturityTier === "blocked" ? (
- Blocked targets stay visible to admins for release awareness, but they remain disabled for onboarding and deployment.
+ Blocked targets stay visible to admins for release awareness, but they remain
+ disabled for onboarding and deployment.
) : null}
{deploymentMode === "migrate" && migrationDraft ? (
- Source draft: {migrationDraft?.source?.label || migrationDraft.name}
+ Source draft:{" "}
+
+ {migrationDraft?.source?.label || migrationDraft.name}
+
) : null}
-
What happens next
+
+ What happens next
+
@@ -1538,7 +1627,9 @@ export default function Deploy() {
-
Operator checklist
+
+ Operator checklist
+
{checklist.map((item) => (
@@ -1562,13 +1653,10 @@ function MigrationDraftPreview({ draft, busyAction, onDiscard }) {
Migration Preview
-
- No draft prepared yet.
-
+
No draft prepared yet.
- Upload a Nora export bundle or inspect a live Docker or SSH source to
- preview files, imported channels, provider keys, warnings, and the
- runtime family Nora will recreate.
+ Upload a Nora export bundle or inspect a live Docker or SSH source to preview files,
+ imported channels, provider keys, warnings, and the runtime family Nora will recreate.
);
@@ -1576,8 +1664,7 @@ function MigrationDraftPreview({ draft, busyAction, onDiscard }) {
const isHermesDraft = draft.runtimeFamily === "hermes";
const managedWiringCount =
- Number(draft.summary?.integrationCount || 0) +
- Number(draft.summary?.channelCount || 0);
+ Number(draft.summary?.integrationCount || 0) + Number(draft.summary?.channelCount || 0);
const sourceKindLabel =
draft?.source?.kind === "docker" || draft?.source?.kind === "ssh"
? "Live source"
@@ -1594,10 +1681,7 @@ function MigrationDraftPreview({ draft, busyAction, onDiscard }) {
},
{
label: "Hermes Channels",
- value:
- draft.hermes?.channels?.length ||
- draft.summary?.hermesChannelCount ||
- 0,
+ value: draft.hermes?.channels?.length || draft.summary?.hermesChannelCount || 0,
},
{
label: "LLM Providers",
@@ -1615,10 +1699,7 @@ function MigrationDraftPreview({ draft, busyAction, onDiscard }) {
},
{
label: "Session Memory",
- value:
- draft.openclaw?.memoryFileCount ||
- draft.summary?.memoryFileCount ||
- 0,
+ value: draft.openclaw?.memoryFileCount || draft.summary?.memoryFileCount || 0,
},
{
label: "LLM Providers",
@@ -1637,9 +1718,7 @@ function MigrationDraftPreview({ draft, busyAction, onDiscard }) {
Migration Preview
-
- {draft.name}
-
+
{draft.name}
{formatRuntimeFamilyLabel(draft.runtimeFamily)} from{" "}
@@ -1664,23 +1743,17 @@ function MigrationDraftPreview({ draft, busyAction, onDiscard }) {
-
- Source
-
+
Source
{formatMigrationTransportLabel(draft?.source?.transport)}
-
- {sourceKindLabel}
-
+
{sourceKindLabel}
Draft Expires
-
- {formatDateTime(draft.expiresAt)}
-
+
{formatDateTime(draft.expiresAt)}
Deploy attaches this draft to the new agent and clears the expiry.
@@ -1745,8 +1818,8 @@ function MigrationDraftPreview({ draft, busyAction, onDiscard }) {
!draft.managed?.channels?.length &&
!draft.managed?.agentSecretOverrides?.length ? (
- No Nora-managed records were detected in this source. Nora will
- still import files and any supported runtime state it can see.
+ No Nora-managed records were detected in this source. Nora will still import files
+ and any supported runtime state it can see.
) : null}
@@ -1793,9 +1866,9 @@ function MigrationDraftPreview({ draft, busyAction, onDiscard }) {
OpenClaw Import Surface
- Nora imports the agent files, workspace contents, session memory, and
- supported provider material from the source runtime. Deploy target and
- sandbox profile remain operator-controlled on this screen.
+ Nora imports the agent files, workspace contents, session memory, and supported
+ provider material from the source runtime. Deploy target and sandbox profile remain
+ operator-controlled on this screen.
)}
@@ -1810,11 +1883,7 @@ function MigrationDraftPreview({ draft, busyAction, onDiscard }) {
0
- ? "text-amber-600"
- : "text-emerald-600"
- }
+ className={(draft.warnings || []).length > 0 ? "text-amber-600" : "text-emerald-600"}
/>
Warnings
@@ -1822,9 +1891,7 @@ function MigrationDraftPreview({ draft, busyAction, onDiscard }) {
{draft.warnings.map((warning, index) => (
-
- {warning.path ? `${warning.path}: ` : ""}
-
+ {warning.path ? `${warning.path}: ` : ""}
{warning.message}
))}
diff --git a/plans/clawhub_integration/clawhub-integration-plan.md b/plans/clawhub_integration/clawhub-integration-plan.md
new file mode 100644
index 0000000..3cd624b
--- /dev/null
+++ b/plans/clawhub_integration/clawhub-integration-plan.md
@@ -0,0 +1,401 @@
+# ClawHub Integration Plan
+
+This plan breaks the ClawHub feature into small, testable phases. Each phase is designed so a backend worker can define the API contract first, then a frontend worker can build against it. Earlier phases unblock later ones. Phase 0 is required scaffolding.
+
+All file references use the current TypeScript extensions (`.ts` / `.tsx`). The full API contract and persistence shape are defined in `clawhub_integrations_manifest.md` in this directory — this plan references that document as the source of truth for shapes, error codes, and field names.
+
+---
+
+## Phase 0: Schema And Routing Scaffolding
+### Goal
+Create the minimal backend and frontend plumbing needed so later phases can add ClawHub behavior without blocking on missing tables, routes, or shared types.
+
+### Backend (Worker 1)
+Files to create/modify:
+- `backend-api/db_schema.sql`
+- `backend-api/server.ts`
+- `backend-api/redisQueue.ts`
+- `backend-api/package.json`
+- `backend-api/routes/clawhub.ts` — stub router only, no business logic
+
+Tasks:
+- Apply the DB migration: `ALTER TABLE agents ADD COLUMN clawhub_skills JSONB DEFAULT '[]';`
+- Register `/api/clawhub` in `server.ts` pointing at the stub router so the route exists and the app compiles
+- Add the `clawhubInstalls` BullMQ queue definition in `redisQueue.ts` alongside the existing `deployments` queue — queue plumbing only, no job handlers yet
+- Add the YAML frontmatter parser dependency to `backend-api/package.json`
+
+Do NOT touch:
+- ClawHub catalog fetch or parse logic
+- Install job worker behavior
+- Any agent ownership or runtime validation
+
+### Frontend (Worker 2)
+Files to create/modify:
+- `frontend-dashboard/pages/agents/[id].tsx` — add the `ClawHub` tab entry to the tab list; render a placeholder panel for now
+- `frontend-dashboard/components/agents/OpenClawTab.tsx` — add `ClawHub` to the subtab list
+- `frontend-dashboard/components/agents/openclaw/ClawHubTab.tsx` — stub component, renders a "Coming soon" placeholder
+
+Do NOT touch:
+- Search result rendering
+- Detail panels
+- Selection or install flows
+- Any install polling logic
+
+### Acceptance Criteria
+- [ ] DB: `agents.clawhub_skills JSONB DEFAULT '[]'` exists on the agents table and the column is queryable.
+- [ ] Backend: the `/api/clawhub` route is mounted and returns a non-404 response (even if empty).
+- [ ] Backend: the `clawhubInstalls` BullMQ queue is defined in `redisQueue.ts` without crashing the worker on startup.
+- [ ] Frontend: the agent detail page renders a visible `ClawHub` tab that loads without a runtime error.
+- [ ] End-to-end: the app starts with no missing-route or missing-column errors related to the ClawHub scaffolding.
+
+### ✅ Gate
+Do not proceed to Phase 1 until all acceptance criteria pass and both workers have reported completion.
+
+---
+
+## Phase 1: ClawHub Catalog Discovery
+### Goal
+Allow Nora to proxy ClawHub browse, search, and detail requests so the UI can render a real catalog without talking to ClawHub directly.
+
+### Backend (Worker 1)
+Files to create/modify:
+- `backend-api/clawhubClient.ts` — all fetch, SKILL.md download, frontmatter parse, and normalization logic
+- `backend-api/routes/clawhub.ts` — thin route handlers for browse, search, detail
+- `backend-api/__tests__/clawhub.test.ts`
+
+API contract (exact shapes — see manifest §Shared API Contract for full detail):
+
+`GET /api/clawhub/skills?limit=20&cursor=
`
+- `200`
+ ```json
+ { "skills": [{ "slug": "github", "name": "GitHub", "description": "...", "downloads": 0, "stars": 0, "updatedAt": "2026-04-01T12:00:00Z" }], "cursor": null }
+ ```
+- `502`
+ ```json
+ { "error": "clawhub_unavailable", "message": "Could not reach ClawHub registry." }
+ ```
+
+`GET /api/clawhub/skills/search?q=&limit=20`
+- `200` — same shape as browse
+- `400`
+ ```json
+ { "error": "missing_query", "message": "q is required." }
+ ```
+- `502`
+ ```json
+ { "error": "clawhub_unavailable", "message": "Could not reach ClawHub registry." }
+ ```
+
+`GET /api/clawhub/skills/:slug`
+- `200`
+ ```json
+ {
+ "slug": "github",
+ "name": "GitHub",
+ "description": "...",
+ "downloads": 0,
+ "stars": 0,
+ "updatedAt": "2026-04-01T12:00:00Z",
+ "readme": "# GitHub Skill\n...",
+ "requirements": {
+ "bins": ["gh"],
+ "env": ["GITHUB_TOKEN"],
+ "config": [],
+ "install": [{ "kind": "node", "name": "@github/gh-cli" }]
+ }
+ }
+ ```
+ Note: `requirements` is `null` when SKILL.md has no `metadata.openclaw` block. When non-null, all four array fields are always present (may be empty). The `kind` field is the normalized form of `package` from SKILL.md frontmatter.
+- `404`
+ ```json
+ { "error": "skill_not_found", "message": "No skill found with slug: github" }
+ ```
+
+Do NOT touch:
+- Deploy-time persistence
+- Install or job polling routes
+- Frontend install flow
+
+### Frontend (Worker 2)
+Files to create/modify:
+- `frontend-dashboard/components/agents/openclaw/ClawHubTab.tsx` — browse/search state, coordinates child components
+- `frontend-dashboard/components/agents/openclaw/SkillSearchBar.tsx`
+- `frontend-dashboard/components/agents/openclaw/SkillGrid.tsx`
+- `frontend-dashboard/components/agents/openclaw/SkillCard.tsx`
+- `frontend-dashboard/components/agents/openclaw/SkillDetailPanel.tsx` — read-only detail view; renders readme and requirements; no install action yet
+
+Depends on:
+- `GET /api/clawhub/skills`
+- `GET /api/clawhub/skills/search`
+- `GET /api/clawhub/skills/:slug`
+
+Implementation notes:
+- `SkillDetailPanel` is created in this phase as a read-only panel — it shows readme, requirements blocks, and install state, but the install/add-to-selection action is a disabled stub until Phase 2/3
+- Null-check `requirements` before rendering requirement blocks (may be `null` per manifest)
+- Add markdown rendering dependency to `frontend-dashboard/package.json`
+
+Do NOT touch:
+- Install buttons (leave disabled/hidden)
+- Batch-selection persistence
+- Deploy flow navigation
+- Job polling or success banners
+
+### Acceptance Criteria
+- [ ] Backend: browse, search, and detail endpoints return normalized Nora response shapes and handle registry unavailability with `clawhub_unavailable`.
+- [ ] Backend: the detail endpoint parses SKILL.md frontmatter correctly, normalizes `package` → `kind`, and returns `requirements: null` for skills with no `metadata.openclaw` block.
+- [ ] Frontend: the ClawHub tab loads browse results on mount, submits search on Enter, clears back to browse results when the query is emptied, and shows loading/empty/error states.
+- [ ] Frontend: clicking a skill card opens `SkillDetailPanel` showing readme and parsed requirements; the install action is visible but disabled.
+- [ ] End-to-end: a user can open the ClawHub tab on a running agent and see real catalog data with skill details.
+
+### ✅ Gate
+Do not proceed to Phase 2 until all acceptance criteria pass and both workers have reported completion.
+
+---
+
+## Phase 2: Deploy-Time Skill Selection
+### Goal
+Let a user pick ClawHub skills during agent creation and persist those selections only when they click the deploy action.
+
+### Backend (Worker 1)
+Files to create/modify:
+- `backend-api/routes/agents.ts` — accept `clawhub_skills` in the deploy request body; persist to `agents.clawhub_skills` on INSERT; pass through `addDeploymentJob()` payload
+
+Deploy request body extension:
+```json
+{
+ "clawhub_skills": [
+ {
+ "source": "clawhub",
+ "installSlug": "github",
+ "author": "steipete",
+ "pagePath": "steipete/github",
+ "installedAt": "2026-04-19T00:00:00Z"
+ }
+ ]
+}
+```
+- `clawhub_skills` is optional; omitting it or passing `[]` is valid
+- Persist only the durable identifier fields (`source`, `installSlug`, `author`, `pagePath`, `installedAt`); do not persist catalog metadata (stars, downloads, description, readme)
+- The deploy response shape is unchanged — do not add new fields to the deploy response
+
+Do NOT touch:
+- Running-agent install routes
+- Job polling
+- Catalog parsing
+- Frontend install polling state
+
+### Frontend (Worker 2)
+Files to create/modify:
+- `frontend-dashboard/pages/deploy/index.tsx` — change primary button to "Next: Choose Skills"; navigate to the ClawHub selection page carrying agent name and infra context
+- `frontend-dashboard/pages/clawhub/index.tsx` — deploy-time skill selection page; catalog + search; bottom actions are only "Deploy Agent & Open Validation" and "Back"
+- `frontend-dashboard/components/agents/openclaw/SkillSelectionTray.tsx` — **deploy-flow mode only in this phase**: shows selected skill count and names; primary action is "Deploy Agent & Open Validation"; install CTA is a disabled stub
+- `frontend-dashboard/components/agents/openclaw/SkillDetailPanel.tsx` — enable the "Add to selection" action (not install) for the deploy flow context
+
+Depends on:
+- `GET /api/clawhub/skills`
+- `GET /api/clawhub/skills/search`
+- `GET /api/clawhub/skills/:slug`
+- Deploy route accepting `clawhub_skills` in the request body
+
+Do NOT touch:
+- Runtime install polling
+- Running-agent install actions in `SkillSelectionTray` (leave as disabled stub)
+- Running-agent success banners
+- Reconciliation logic
+
+### Acceptance Criteria
+- [ ] Backend: `POST /api/agents/deploy` with a `clawhub_skills` array persists those entries to `agents.clawhub_skills` using the durable identifier shape from the manifest.
+- [ ] Backend: deploying without `clawhub_skills` (or with `[]`) succeeds unchanged.
+- [ ] Frontend: the deploy page button routes to the ClawHub selection page with agent context carried forward.
+- [ ] Frontend: the user can browse/search/select skills and click "Deploy Agent & Open Validation" to deploy with skills attached.
+- [ ] Frontend: `SkillSelectionTray` shows selected skills and deploy CTA; install CTA is not yet active.
+- [ ] End-to-end: a newly deployed agent has the selected skills recorded in `agents.clawhub_skills` and visible in the DB.
+
+### ✅ Gate
+Do not proceed to Phase 3 until all acceptance criteria pass and both workers have reported completion.
+
+---
+
+## Phase 3: Running-Agent Install Jobs
+### Goal
+Allow a user to install one or more ClawHub skills on an already running agent and poll for completion.
+
+### Backend (Worker 1)
+Files to create/modify:
+- `backend-api/routes/clawhub.ts` — add agent-scoped routes: installed-skills read, install trigger, job polling
+- `backend-api/redisQueue.ts` — add enqueue and poll helpers for `clawhubInstalls` queue
+- `workers/provisioner/worker.ts` — add the ClawHub install job handler
+- `backend-api/middleware/ownership.ts` — extend the agent SELECT to include `backend_type`, `runtime_family`, `container_id`, `status` if not already present
+- `backend-api/__tests__/clawhub.test.ts`
+
+API contract:
+
+`GET /api/clawhub/agents/:agentId/skills`
+- `200`
+ ```json
+ { "skills": [{ "slug": "github", "version": "2.1.0" }] }
+ ```
+ Reads from the lockfile at `/root/.openclaw/workspace/.clawhub/lock.json` inside the container; lockfile shape: `{ "version": 1, "skills": { "github": { "version": "2.1.0", "installedAt": 1700000000000 } } }` — normalize to array by iterating keys.
+
+`POST /api/clawhub/agents/:agentId/skills/:slug/install`
+- `202`
+ ```json
+ { "jobId": "uuid", "agentId": "uuid", "slug": "github", "status": "pending" }
+ ```
+- `404` `{ "error": "agent_not_found" }`
+- `409` `{ "error": "container_not_running", "message": "Start the agent before installing skills." }`
+- `409` `{ "error": "unsupported_runtime", "message": "ClawHub installs are only available for Docker-backed OpenClaw agents." }`
+- `422` `{ "error": "npm_unavailable", "message": "The clawhub CLI could not be installed. Ensure Node.js is in your base image." }`
+
+ Validation: `backend_type` must be `"docker"`, `runtime_family` must be `"openclaw"`, `status` must be `"running"`.
+ Bootstrap: if `clawhub` CLI is missing, run `npm install -g clawhub` first; if `npm` is also missing, return 422.
+ Persistence: append to `agents.clawhub_skills` **only** after the job completes successfully.
+
+`GET /api/clawhub/jobs/:jobId`
+- `200`
+ ```json
+ { "jobId": "uuid", "agentId": "uuid", "slug": "github", "status": "pending|running|success|failed", "error": null, "completedAt": null }
+ ```
+ BullMQ state mapping: `waiting`/`delayed` → `pending`, `active` → `running`, `completed` → `success`, `failed` → `failed`.
+
+Worker install handler (in `workers/provisioner/worker.ts`):
+- Receives `agentId`, `slug`, and skill metadata from job payload
+- Re-fetches the agent row before execution to confirm container is still running
+- Runs `clawhub install --no-input` in the container via `runContainerCommand(agent, cmd, { timeout })`
+- Treats non-zero exit code as job failure; captures output as the error payload
+- On success: appends the saved entry shape to `agents.clawhub_skills`
+
+Do NOT touch:
+- Deploy-time selection writes
+- Catalog browse/search responses
+- Frontend deploy flow
+
+### Frontend (Worker 2)
+Files to create/modify:
+- `frontend-dashboard/components/agents/openclaw/ClawHubTab.tsx` — add install trigger logic and `jobStatuses` state
+- `frontend-dashboard/components/agents/openclaw/SkillDetailPanel.tsx` — enable install and retry actions for running-agent context
+- `frontend-dashboard/components/agents/openclaw/SkillSelectionTray.tsx` — enable "Install X Skills" CTA for running-agent context; show per-skill installing/success/failed states
+- `frontend-dashboard/components/Toast.tsx` — add install success and restart-session feedback
+- `frontend-dashboard/pages/agents/[id].tsx` — own `showRestartBanner` state at the page level; render restart banner outside any tab panel so it survives tab switches
+
+Polling behavior:
+- Call install endpoint once per selected slug; store returned job ids
+- Poll `GET /api/clawhub/jobs/:jobId` every 2 seconds per active job
+- On `success`: refresh `installedSlugs`, mark skill installed in local state, set `showRestartBanner = true`
+- On `failed`: surface error from job record, keep skill available for retry
+- Stop polling when panel closes or selection is cleared
+
+Depends on:
+- `GET /api/clawhub/agents/:agentId/skills`
+- `POST /api/clawhub/agents/:agentId/skills/:slug/install`
+- `GET /api/clawhub/jobs/:jobId`
+
+Do NOT touch:
+- Deploy-time selection page
+- Catalog browsing endpoints
+- Reconciliation on redeploy
+
+### Acceptance Criteria
+- [ ] Backend: install endpoint validates agent ownership, runtime type, and container status correctly, returning the right error codes per the manifest.
+- [ ] Backend: install jobs are enqueued; the worker runs `clawhub install --no-input` inside the container; job polling returns correct normalized statuses.
+- [ ] Backend: `agents.clawhub_skills` is updated only after a successful install; failed installs leave the row unchanged.
+- [ ] Frontend: a running agent can trigger install on one or more selected skills; each skill shows installing/success/failed state independently.
+- [ ] Frontend: on success, the restart-session banner appears on the agent detail page and persists when the user switches to another tab.
+- [ ] End-to-end: a selected skill is installed into the running container, the lockfile is updated, and the UI reflects the new installed state.
+
+### ✅ Gate
+Do not proceed to Phase 4 until all acceptance criteria pass and both workers have reported completion.
+
+---
+
+## Phase 4: Reconciliation On Deploy Or Recreate
+### Goal
+Ensure future deploys and redeploys reinstall only the missing saved skills from `agents.clawhub_skills` into the new container.
+
+### Backend (Worker 1)
+Files to create/modify:
+- `workers/provisioner/worker.ts` — add reconciliation helper called after `provisioner.create()` succeeds and the agent row is updated to `status = "running"` (after the block around lines 478–516)
+
+Reconciliation logic:
+1. Read `agents.clawhub_skills` for the agent
+2. Read the installed-skill lockfile from the container (`/root/.openclaw/workspace/.clawhub/lock.json`)
+3. Compute the set difference: saved `installSlug` values not present as lockfile keys
+4. For each missing skill: run `clawhub install --no-input` via `runContainerCommand(...)`
+5. Log success/failure per skill; do not fail the entire deploy if one skill fails to reconcile
+
+No new public API routes are required for this phase.
+
+Do NOT touch:
+- Browse/search catalog endpoints
+- Running-agent install API
+- Frontend selection UI
+
+### Frontend (Worker 2)
+Files to create/modify:
+- `frontend-dashboard/pages/agents/[id].tsx` — verify `showRestartBanner` state survives a full page navigation back to the agent after redeploy
+- `frontend-dashboard/components/agents/openclaw/ClawHubTab.tsx` — after a redeploy, re-fetch `GET /api/clawhub/agents/:agentId/skills` so the installed state refreshes correctly
+
+Depends on:
+- Persisted `agents.clawhub_skills` from Phase 2
+- `GET /api/clawhub/agents/:agentId/skills` from Phase 3
+
+Do NOT touch:
+- Catalog browse/search layout
+- Deploy-time selection page
+- Any new API routes
+
+### Acceptance Criteria
+- [ ] Backend: redeploying an agent triggers reconciliation; only skills missing from the container lockfile are installed.
+- [ ] Backend: skills already present in the lockfile are not reinstalled.
+- [ ] Backend: a single reconciliation failure does not abort the deploy or other reconciliation installs.
+- [ ] Frontend: after redeploy, the ClawHub tab reflects the reconciled installed state when the user opens the agent.
+- [ ] End-to-end: a redeployed agent comes back with the expected ClawHub skills present in the container lockfile.
+
+### ✅ Gate
+Do not proceed to Phase 5 until all acceptance criteria pass and both workers have reported completion.
+
+---
+
+## Phase 5: Tests And Cleanup
+### Goal
+Lock in the feature with complete test coverage so future engineers can maintain it safely.
+
+### Backend (Worker 1)
+Files to create/modify:
+- `backend-api/__tests__/clawhub.test.ts`
+- Any route or worker file needing cleanup from earlier phases
+
+Required test coverage (each behavior should have at least one test):
+- **Catalog**: browse returns normalized skill list; registry unavailability returns `clawhub_unavailable`
+- **Search**: valid query returns results; missing `q` returns `missing_query`; registry down returns `clawhub_unavailable`
+- **Detail**: slug found returns full shape including `requirements`; slug not found returns `skill_not_found`; skill with no `metadata.openclaw` block returns `requirements: null`
+- **SKILL.md parsing**: `package` field in frontmatter is normalized to `kind` in the response; all four requirement arrays default to `[]` when absent
+- **Installed skills read**: lockfile is read and normalized to `{ skills: [{ slug, version }] }`
+- **Install route**: non-owned agent returns 404; non-Docker agent returns `unsupported_runtime` 409; stopped container returns `container_not_running` 409; missing npm returns `npm_unavailable` 422; valid agent enqueues job and returns 202 with `pending` status
+- **Job polling**: BullMQ states map correctly to `pending`/`running`/`success`/`failed`
+- **Persistence**: `agents.clawhub_skills` is updated only on success; failed job leaves row unchanged
+- **Reconciliation**: diff logic installs only missing skills; already-installed skills are skipped
+
+Do NOT touch:
+- User-facing behavior
+- Schema semantics
+- Route names or API shapes
+
+### Frontend (Worker 2)
+Files to create/modify:
+- Any cleanup in `frontend-dashboard/components/agents/openclaw/*`
+- `frontend-dashboard/pages/clawhub/index.tsx` if any edge cases need polish
+
+Do NOT touch:
+- API shapes
+- Database schema
+- Worker job orchestration
+
+### Acceptance Criteria
+- [ ] Backend: all behaviors listed in the required test coverage above have passing tests.
+- [ ] Backend: no test imports or mocks bypass the Nora error/response shape conventions.
+- [ ] Frontend: no console errors or unhandled promise rejections in the ClawHub flows.
+- [ ] End-to-end: a new engineer can follow the manifest and this plan to understand the full feature without guessing at any step.
+
+### ✅ Gate
+Do not proceed past this phase until all acceptance criteria pass and both workers have reported completion.
diff --git a/plans/clawhub_integration/clawhub_integrations_manifest.md b/plans/clawhub_integration/clawhub_integrations_manifest.md
new file mode 100644
index 0000000..e0c66f0
--- /dev/null
+++ b/plans/clawhub_integration/clawhub_integrations_manifest.md
@@ -0,0 +1,663 @@
+# ClawHub Integration Manifest
+
+## Overview
+ClawHub integration in Nora has two user-facing flows and one shared backend state model.
+
+- For new agents, the user chooses ClawHub skills during the deploy flow, and Nora saves those selections on the agent record when the user clicks `Deploy Agent & Open Validation`.
+- For already running agents, the user opens a `ClawHub` tab inside the agent detail page, searches the catalog, inspects a skill, and installs one or more selected skills immediately.
+- In both flows, Nora stores the intended skill list in `agents.clawhub_skills`, while the running container remains the source of truth for what is actually installed right now.
+
+## Backend
+
+### Goals
+- Proxy all ClawHub discovery and install traffic through Nora so the frontend never talks to ClawHub directly.
+- Support the three backend responsibilities we need for v1:
+ - catalog discovery and detail lookup
+ - deploy-time persistence of selected skills onto the agent record
+ - runtime install, job polling, and reconciliation against running containers
+- Reuse Nora's existing ownership, container exec, and BullMQ patterns instead of introducing a new persistence or job system.
+
+### Existing Nora Patterns To Reuse
+- Express routers under `backend-api/routes`
+- Route mounting in `backend-api/server.js`
+- Ownership checks via `requireOwnedAgent(...)`
+- Container exec through `runContainerCommand(...)` in `backend-api/authSync.js`
+- Async jobs through BullMQ in `backend-api/redisQueue.js` and `workers/provisioner/worker.js`
+
+### Runtime And Container Facts
+- Canonical OpenClaw workspace path: `/root/.openclaw/workspace`
+- Legacy mirrored agent path: `/root/.openclaw/agents/main/agent`
+- V1 installed-skills lockfile path: `/root/.openclaw/workspace/.clawhub/lock.json`
+- Existing Docker-backed OpenClaw agents already support command execution through Nora's backend abstractions
+
+### Locked Backend Decisions
+- Add a dedicated `clawhub_skills` column to the `agents` table instead of storing ClawHub selections inside `template_payload`
+- Persist only successful ClawHub installs in `agents.clawhub_skills`
+- Failed installs are never saved to the `agents` row
+- Existing running agents support immediate install attempts from the Nora UI
+- New deploys and later container recreations reconcile from `agents.clawhub_skills`
+- Reconciliation installs only saved skills that are missing from the container; it does not blindly reinstall all saved skills
+- Nora's deployment worker owns reconciliation, while the actual install commands run inside the container
+
+### Agent Skill Persistence Model
+- `agents.clawhub_skills` is the durable source of truth for which ClawHub skills an agent should keep across future deploys/recreates
+- The column should store only the minimum durable identifiers needed to reapply a skill and link it back to the catalog
+- Recommended saved entry shape:
+```json
+{
+ "source": "clawhub",
+ "installSlug": "sonoscli",
+ "author": "steipete",
+ "pagePath": "steipete/github",
+ "installedAt": "2026-04-17T15:48:45Z"
+}
+```
+- Do not persist volatile catalog metadata like stars, downloads, description, readme, or parsed requirements on the `agents` row
+
+### Backend Feature Areas
+
+#### 1. ClawHub Data Access
+This layer is responsible for everything Nora needs to show the ClawHub catalog in the UI without exposing the frontend to ClawHub directly.
+
+Responsibilities:
+- Discover the registry base URL through `GET https://clawhub.ai/.well-known/clawhub.json`
+- Fetch the browse list, search results, and detail payloads from ClawHub
+- Fetch the raw `SKILL.md` file for a selected skill
+- Parse `SKILL.md` frontmatter and extract:
+ - `metadata.openclaw.requires`
+ - `metadata.openclaw.install`
+- Return `requirements: null` when no `metadata.openclaw` block exists
+- Normalize all upstream responses into a stable Nora shape that the frontend can render consistently
+
+Routes powered by this layer:
+- `GET /api/clawhub/skills`
+ Browse default skills with Nora-owned pagination shape
+- `GET /api/clawhub/skills/search`
+ Search skills by query with Nora-owned validation and error responses
+- `GET /api/clawhub/skills/:slug`
+ Return normalized skill metadata plus raw `SKILL.md` content and parsed requirements
+
+Implementation touchpoints:
+- Modify `backend-api/server.js` to mount the ClawHub route
+- Modify `backend-api/package.json` to add a frontmatter/YAML parser dependency
+- Create `backend-api/routes/clawhub.js`
+- Create `backend-api/clawhubClient.js`
+- Create `backend-api/__tests__/clawhub.test.js`
+
+Primary implementation focus:
+- `backend-api/clawhubClient.js`
+- read-only route handlers in `backend-api/routes/clawhub.js`
+- route registration in `backend-api/server.js`
+- shared JSON/error handling conventions in `backend-api/routes/marketplace.js`
+- request/response wrapper patterns in `backend-api/routes/integrations.js`
+
+Implementation details:
+- Create a small client module that knows how to:
+ - discover the registry base URL from `/.well-known/clawhub.json`
+ - call browse/search/detail endpoints
+ - fetch raw `SKILL.md`
+ - parse frontmatter and return a normalized skill object
+- Keep the route handlers thin:
+ - validate query params
+ - call the client
+ - translate client/network failures into Nora errors
+ - always return the Nora response shape expected by the frontend
+- Follow the same flat `res.status(...).json({ error, message })` style already used in `backend-api/routes/marketplace.js` and `backend-api/routes/integrations.js`
+- Add any ClawHub-specific helpers in `backend-api/clawhubClient.js` rather than embedding fetch/parse logic directly in the route file
+
+#### 2. Install Preparation And Download Orchestration
+This layer is responsible for deciding whether Nora can install a skill for a specific agent, when to save the selected skill list, and how to prepare the running container before the actual install command runs.
+
+Responsibilities:
+- Confirm the agent exists, belongs to the current user, and is a Docker-backed OpenClaw agent
+- Confirm the target container is currently running before attempting an install
+- Distinguish between two cases:
+ - an existing running agent, where Nora should install immediately
+ - a new deploy or redeploy, where Nora should only save the desired skills and reconcile them later
+- Read the currently installed skills from `/root/.openclaw/workspace/.clawhub/lock.json`
+- Check whether the `clawhub` CLI exists in the container
+- If `clawhub` is missing, bootstrap it with `npm install -g clawhub`
+- If `npm` is also missing, return `422`
+- Enqueue install work and return a pollable job id instead of blocking the request
+- Persist a skill into `agents.clawhub_skills` only after the install succeeds
+- Surface normalized job status values to the frontend: `pending`, `running`, `success`, and `failed`
+
+Deployment-time persistence responsibilities:
+- Accept a selected-skill list during agent creation or redeploy flows
+- Store the selected skills on `agents.clawhub_skills` as the desired state for that agent
+- Keep that write path separate from the runtime install job so creation does not depend on the container already existing
+- Reuse the same minimum durable identifier shape used by running-agent installs
+
+Persistence semantics:
+- For a running agent:
+ - attempt the install first
+ - only append to `agents.clawhub_skills` after success
+- For a new deploy:
+ - skills selected during agent creation can be written to `agents.clawhub_skills` as deploy-time desired state
+ - later deployment reconciliation installs them into the container
+- Failed installs never create or append saved skill entries
+
+Routes powered by this layer:
+- `GET /api/clawhub/agents/:agentId/skills`
+ Read only the installed skills from the lockfile inside the agent container
+- `POST /api/clawhub/agents/:agentId/skills/:slug/install`
+ Validate the agent, bootstrap `clawhub` if needed, enqueue install work, and return a pollable job ID
+- `GET /api/clawhub/jobs/:jobId`
+ Return Nora-owned async install status
+- Agent creation / deploy routes in `backend-api/routes/agents.js` or the existing deploy flow route
+ Persist the selected skills onto `agents.clawhub_skills` when the user clicks `Deploy Agent & Open Validation`
+
+Implementation touchpoints:
+- Modify `backend-api/routes/agents.js` or the existing deploy flow route to persist selected skills on deploy
+- Modify `backend-api/redisQueue.js` to add the ClawHub install queue plumbing
+- Modify `backend-api/middleware/ownership.js` or reuse its lookup pattern for agent scoping
+- Modify `backend-api/routes/clawhub.js` to expose installed-skill reads, installs, and polling
+- Modify `workers/provisioner/worker.js` to execute installs and reconciliation
+- Create `backend-api/routes/clawhub.js`
+- Create `backend-api/clawhubClient.js`
+- Create `backend-api/__tests__/clawhub.test.js`
+
+Primary implementation focus:
+- agent-aware route handlers in `backend-api/routes/clawhub.js`
+- queue definitions in `backend-api/redisQueue.js`
+- existing agent ownership checks in `backend-api/middleware/ownership.js`
+- agent lifecycle patterns in `backend-api/routes/agents.js`
+- runtime/service lookup patterns in `backend-api/routes/integrations.js`
+- agent creation and deploy persistence in `backend-api/routes/agents.js` and `backend-api/routes/marketplace.js`
+
+Implementation details:
+- In `backend-api/routes/clawhub.js`, implement agent lookup and validation using the same style as `backend-api/routes/agents.js` and `backend-api/routes/integrations.js`
+- For running-agent installs:
+ - load the owned agent row
+ - confirm `backend_type`, `runtime_family`, `deploy_target`, `container_id`, and `status`
+ - reject non-Docker or non-OpenClaw agents early with a clear 409 response
+- For installed-skill reads:
+ - inspect the running container
+ - read `/root/.openclaw/workspace/.clawhub/lock.json`
+ - normalize the result into `{ skills: [{ slug, version }] }`
+- For install requests:
+ - enqueue the job instead of executing directly in the request
+ - return a job identifier immediately
+ - only persist to `agents.clawhub_skills` after the job completes successfully
+- Mirror the agent ownership lookup shape already used by `requireOwnedAgent(...)`, but extend it with the extra columns needed for container/runtime checks
+
+#### 3. Container Injection And Runtime Execution
+This layer is responsible for the side-effectful work that happens after the API accepts an install request.
+
+Responsibilities:
+- Execute `clawhub install --no-input` from the OpenClaw workspace context
+- Ensure the install runs against `/root/.openclaw/workspace`
+- Let the `clawhub` CLI download the skill directly inside the container workspace rather than downloading artifacts onto the Nora host
+- Capture command output and map failures into job error payloads
+- Re-read `.clawhub/lock.json` after install if needed to confirm the resulting installed state
+- Mark the async job as `success` or `failed` for frontend polling
+- Treat session restart as a post-install activation requirement, not part of the install itself
+
+Reconciliation semantics:
+- For deploy/start/recreate flows, the worker reads `agents.clawhub_skills`
+- It compares the saved entries against the container's installed-skill state
+- It installs only the saved skills that are missing
+- It does not reinstall saved skills that are already present
+
+Primary implementation focus:
+- BullMQ worker path in `workers/provisioner/worker.js`
+- existing container command execution path via `runContainerCommand(...)`
+- worker/deployment flow in `workers/provisioner/worker.js`
+- container bootstrap and workspace layout in `agent-runtime/lib/runtimeBootstrap.js`
+- existing Docker exec/install helpers in `workers/provisioner/backends/docker.js`
+- agent runtime conventions in `agent-runtime/lib/server.js`
+
+Implementation touchpoints:
+- Modify `workers/provisioner/worker.js` to run install and reconciliation jobs
+- Modify `backend-api/redisQueue.js` to enqueue and poll ClawHub install jobs
+- Modify `workers/provisioner/backends/docker.js` only if the existing exec helper cannot express the install flow
+- Modify `agent-runtime/lib/runtimeBootstrap.js` only if workspace layout details need to be surfaced more explicitly
+- Modify `agent-runtime/lib/server.js` only if runtime conventions need to expose install state more directly
+
+Implementation details:
+- Add a worker-side install handler that:
+ - receives the agent id, slug, and the skill metadata the route stored in the job payload
+ - resolves the current agent row again before execution
+ - verifies the container is still present and running
+ - runs `clawhub install --no-input` inside the container
+ - treats a non-zero exit as a job failure and captures the error text
+- Add a reconciliation helper for startup/redeploy flows that:
+ - reads `agents.clawhub_skills`
+ - reads the installed skill lockfile from the container
+ - computes the set difference of saved vs installed skills
+ - installs only the missing entries
+- Keep install/reconciliation logic in the worker rather than the route so requests stay fast and the job can be polled
+- Use `runContainerCommand(...)` if the implementation can reuse the existing shell/exec wrapper; otherwise add the smallest new helper that still follows the same error/timeout conventions
+
+#### Async Job Model
+- Use BullMQ for v1 instead of adding a new SQL job table
+- Provide a Nora-normalized polling endpoint:
+ - `pending`
+ - `running`
+ - `success`
+ - `failed`
+- Map BullMQ states to that simplified API contract
+
+#### Backend Error Model
+- `clawhub_unavailable`
+- `missing_query`
+- `skill_not_found`
+- `agent_not_found`
+- `container_not_running`
+- `unsupported_runtime`
+- `npm_unavailable`
+
+## Frontend
+
+### Goals
+- Support two operator flows:
+ - a deploy-time ClawHub selection step for new agents
+ - an existing-agent `ClawHub` tab for browsing and installing skills on a running agent
+- Keep the UI agent-scoped so it is always clear which agent receives the skill
+- Let operators search, inspect, multi-select, batch install, and then see restart guidance after a successful install
+
+### Existing Nora Patterns To Reuse
+- Agent detail page at `frontend-dashboard/pages/agents/[id].js`
+- OpenClaw subtab composition in `frontend-dashboard/components/agents/OpenClawTab.js`
+- Local component state with `useState` / `useEffect`
+- API access through `fetchWithAuth`
+- Toast feedback through `useToast`
+
+### Frontend Feature Areas
+
+#### 0. Agent Creation Skill Selection Page
+This is the page that appears after the user fills in agent name and infrastructure specs and clicks `Next: Choose Skills` from the deploy flow.
+It lets the user decide which ClawHub skills should be attached to the new agent before the agent is actually deployed.
+
+Responsibilities:
+- Act as the continuation of the agent-initiation flow
+- Show the ClawHub catalog before the agent is deployed
+- Let the user select one or more skills to save onto the new agent
+- Return the user to the deploy/validation action from this page when ready
+
+Primary implementation focus:
+- `frontend-dashboard/pages/deploy/index.js`
+- `frontend-dashboard/pages/clawhub/index.js` or the chosen routed equivalent
+- `frontend-dashboard/components/agents/openclaw/ClawHubTab.js`
+- `frontend-dashboard/components/agents/openclaw/SkillDetailPanel.js`
+
+Implementation details:
+- Change the deploy page primary button to navigate into the ClawHub selection page instead of immediately deploying
+- Carry forward the new agent's name and infrastructure context into the ClawHub page
+- On the ClawHub page, show the catalog, let the user search and select skills, and keep the bottom actions to only `Deploy Agent & Open Validation` and `Back`
+- Persist the selected skills when the user clicks `Deploy Agent & Open Validation`
+- Pass the selected skills back into the deploy request so the backend can save them on `agents.clawhub_skills`
+
+Implementation touchpoints:
+- Modify `frontend-dashboard/pages/deploy/index.js` to route into the ClawHub selection page
+- Modify `frontend-dashboard/pages/agents/[id].js` only if deploy flow context needs to be preserved across navigation
+- Create `frontend-dashboard/pages/clawhub/index.js` or the chosen routed equivalent
+- Create `frontend-dashboard/components/agents/openclaw/ClawHubTab.js`
+- Create `frontend-dashboard/components/agents/openclaw/SkillDetailPanel.js`
+- Create `frontend-dashboard/components/agents/openclaw/SkillSearchBar.js`
+- Create `frontend-dashboard/components/agents/openclaw/SkillGrid.js`
+- Create `frontend-dashboard/components/agents/openclaw/SkillCard.js`
+- Create `frontend-dashboard/components/agents/openclaw/SkillSelectionTray.js`
+
+Page layout decisions:
+- Use a card grid similar to the rest of Nora rather than a wizard layout
+- Submit search only when the user presses `Enter`
+- Present selected skills in a sticky summary panel so the user can always see what will be deployed
+- Let users select skills directly from cards and also from the detail panel
+- Keep the overall page feeling like a normal Nora operator page, not a marketing marketplace clone
+
+#### 1. Existing-Agent ClawHub Tab
+This is the top-level UI surface for browsing and installing skills on an already running agent.
+It should feel like part of the agent detail page, not a separate marketplace site.
+
+Responsibilities:
+- Add a visible `ClawHub` tab on the agent detail page
+- Pass the current `agentId` into the skills experience
+- Keep the browse experience scoped to the currently viewed agent
+- Preserve agent-level post-install messaging outside the panel so it survives subtab changes
+- Support selecting multiple skills before install
+- Allow batch install from the detail popup or selected-card tray
+
+Primary implementation focus:
+- `frontend-dashboard/pages/agents/[id].js`
+- `frontend-dashboard/components/agents/OpenClawTab.js`
+- `frontend-dashboard/components/agents/openclaw/ClawHubTab.js`
+- `frontend-dashboard/components/agents/openclaw/SkillSelectionTray.js`
+
+Implementation details:
+- Add a new `ClawHub` subtab to the agent detail tab navigation
+- Mount a dedicated ClawHub panel from that tab
+- Pass down the agent id and any installed-skill state the panel needs
+- Keep the restart banner at the agent detail page level, not buried inside the browser panel
+- Let users select skills from the grid and from the detail popup
+- Show the current selection count and a clear install action for multiple selected skills
+- Keep install actions scoped to the selected agent only
+
+Implementation touchpoints:
+- Modify `frontend-dashboard/pages/agents/[id].js` to own the tab state and restart banner state
+- Modify `frontend-dashboard/components/agents/OpenClawTab.js` to add the `ClawHub` tab
+- Create `frontend-dashboard/components/agents/openclaw/ClawHubTab.js`
+- Create `frontend-dashboard/components/agents/openclaw/SkillSelectionTray.js`
+
+#### 2. Skill Discovery And Search
+This part of the UI lets the user search the ClawHub catalog and browse popular skills.
+It is shared by both the deploy-time selection page and the existing-agent `ClawHub` tab.
+
+Responsibilities:
+- Load default browse results on mount
+- Let the user search ClawHub skills
+- Show loading, empty, and unavailable states
+- Mark already-installed skills in the results
+
+Primary implementation focus:
+- `frontend-dashboard/components/agents/openclaw/ClawHubTab.js`
+- `frontend-dashboard/components/agents/openclaw/SkillSearchBar.js`
+- `frontend-dashboard/components/agents/openclaw/SkillGrid.js`
+- `frontend-dashboard/components/agents/openclaw/SkillCard.js`
+
+Implementation details:
+- Keep the search input controlled in React state
+- Submit search only when the user presses `Enter`
+- Reset to browse results when the query is cleared
+- Render cards using the Nora response shape from `/api/clawhub/skills` and `/api/clawhub/skills/search`
+- Show a clear empty-state message when search returns no matches
+- Show a clear error state when the registry is unavailable
+- Annotate cards as already installed by comparing returned slugs against the agent's installed-skill state
+
+Implementation touchpoints:
+- Create `frontend-dashboard/components/agents/openclaw/SkillSearchBar.js`
+- Create `frontend-dashboard/components/agents/openclaw/SkillGrid.js`
+- Create `frontend-dashboard/components/agents/openclaw/SkillCard.js`
+- Modify `frontend-dashboard/components/agents/openclaw/ClawHubTab.js` to coordinate browse/search state
+
+#### 3. Skill Detail And Requirements
+This is the part of the UI that shows one skill's full details and the install requirements extracted from `SKILL.md`.
+Users should be able to inspect a skill before deciding whether to add it to the current batch selection or install it immediately on a running agent.
+
+Responsibilities:
+- Open a detail panel or modal for a selected skill
+- Render the returned `readme`
+- Show parsed requirement details
+- Show install state for the selected skill
+- Allow the current selection to be added to the batch install set from inside the panel
+
+Primary implementation focus:
+- `frontend-dashboard/components/agents/openclaw/SkillDetailPanel.js`
+- `frontend-dashboard/components/agents/openclaw/ClawHubTab.js`
+
+Implementation details:
+- Fetch full skill detail when the user selects a card
+- Render markdown for `readme` in a readable, scrollable panel
+- Present the parsed requirements in separate blocks:
+ - required binaries
+ - required environment variables
+ - config entries if present
+ - install method if present
+- Keep the detail panel aware of whether the skill is already installed on the current agent
+- Disable the install action if the skill is already present
+
+Implementation touchpoints:
+- Create `frontend-dashboard/components/agents/openclaw/SkillDetailPanel.js`
+- Modify `frontend-dashboard/components/agents/openclaw/ClawHubTab.js` to open and coordinate the detail panel
+- Modify `frontend-dashboard/package.json` to add markdown rendering support
+
+#### 4. Batch Install And Polling UX
+This is the interaction loop for starting install jobs on an already running agent and waiting for the backend to report success or failure.
+Because the install happens inside the running container, the UI should show progress, success, or failure per selected skill.
+
+Responsibilities:
+- Trigger install through Nora backend only
+- Queue one job per selected skill
+- Poll each job status every 2 seconds
+- Update installed state only after success
+- Show retry affordance for failed items
+- Show success/failure feedback with a clear next action
+
+Primary implementation focus:
+- `frontend-dashboard/components/agents/openclaw/ClawHubTab.js`
+- `frontend-dashboard/components/agents/openclaw/SkillDetailPanel.js`
+- `frontend-dashboard/components/agents/openclaw/SkillSelectionTray.js`
+- `frontend-dashboard/components/Toast.js`
+
+Implementation details:
+- When the user clicks install on a running agent:
+ - call the Nora install endpoint once for each selected skill
+ - store the returned job ids
+ - switch the selected skills into an installing state
+- Poll each job endpoint until it returns `success` or `failed`
+- On success:
+ - refresh the installed-skill list
+ - mark the skill as installed in local state
+ - show the restart-session toast
+- On failure:
+ - surface the error from the job record
+ - keep the skill available for retry
+- Keep polling bounded to the active selection so it stops when the user changes skills, closes the panel, or clears the batch selection
+
+Implementation touchpoints:
+- Modify `frontend-dashboard/components/agents/openclaw/ClawHubTab.js` to trigger installs and track job ids
+- Modify `frontend-dashboard/components/agents/openclaw/SkillDetailPanel.js` to expose install and retry actions
+- Create `frontend-dashboard/components/agents/openclaw/SkillSelectionTray.js`
+- Modify `frontend-dashboard/components/Toast.js` to show install and restart feedback
+- Modify `frontend-dashboard/pages/agents/[id].js` to keep the banner state alive across tab changes
+
+#### 5. Post-Install Banner State
+This is the agent-level reminder that a successful install still needs a session restart before OpenClaw picks it up.
+The reminder should stay visible on the agent page even if the user switches tabs inside that page.
+
+Responsibilities:
+- Show a persistent agent-level banner reminding the operator to restart the session
+- Keep that banner visible on the agent detail page after a successful install
+- Make the banner survive subtab switches and modal closes
+
+Primary implementation focus:
+- `frontend-dashboard/pages/agents/[id].js`
+- `frontend-dashboard/components/Toast.js`
+
+Implementation details:
+- Update the agent detail page state after a successful install so it can render a persistent reminder
+- Phrase the banner as an action reminder rather than an error state
+- Keep the banner count/wording simple enough for future extension if multiple skills are added at once
+
+### Frontend State Model
+- `query`
+- `skills`
+- `loading`
+- `error`
+- `selectedSkill`
+- `installedSlugs`
+- install job status for the active skill on running-agent installs
+- page-level restart-session banner state
+
+## Shared API Contract Between Frontend And Backend
+
+### Contract Principle
+- The frontend talks only to Nora
+- The backend talks to ClawHub
+- The frontend should never depend on raw ClawHub response shapes, pagination quirks, or discovery behavior
+- The backend is responsible for normalization
+
+### Discovery Contract
+
+#### `GET /api/clawhub/skills?limit=20&cursor=`
+Success response:
+```json
+{
+ "skills": [
+ {
+ "slug": "github",
+ "name": "GitHub",
+ "description": "Manage issues, PRs, and repos via the gh CLI.",
+ "downloads": 94200,
+ "stars": 1200,
+ "updatedAt": "2026-04-01T12:00:00Z"
+ }
+ ],
+ "cursor": "next-or-null"
+}
+```
+
+Error response:
+```json
+{
+ "error": "clawhub_unavailable",
+ "message": "Could not reach ClawHub registry."
+}
+```
+
+#### `GET /api/clawhub/skills/search?q=&limit=20`
+Success response:
+- Same shape as browse
+
+Validation error:
+```json
+{
+ "error": "missing_query",
+ "message": "q is required."
+}
+```
+
+Unavailable error:
+```json
+{
+ "error": "clawhub_unavailable",
+ "message": "Could not reach ClawHub registry."
+}
+```
+
+#### `GET /api/clawhub/skills/:slug`
+Success response:
+```json
+{
+ "slug": "github",
+ "name": "GitHub",
+ "description": "Manage issues, PRs, and repos via the gh CLI.",
+ "downloads": 94200,
+ "stars": 1200,
+ "updatedAt": "2026-04-01T12:00:00Z",
+ "readme": "# GitHub Skill\n...",
+ "requirements": {
+ "bins": ["gh"],
+ "env": ["GITHUB_TOKEN"],
+ "config": [],
+ "install": [
+ {
+ "kind": "node",
+ "package": "@github/gh-cli"
+ }
+ ]
+ }
+}
+```
+
+Not found error:
+```json
+{
+ "error": "skill_not_found",
+ "message": "No skill found with slug: github"
+}
+```
+
+### Agent-Scoped Contract
+
+#### `GET /api/clawhub/agents/:agentId/skills`
+Success response:
+```json
+{
+ "skills": [
+ { "slug": "github", "version": "2.1.0" },
+ { "slug": "gog", "version": "1.0.4" }
+ ]
+}
+```
+
+#### `POST /api/clawhub/agents/:agentId/skills/:slug/install`
+Accepted response:
+```json
+{
+ "jobId": "uuid-or-bullmq-id",
+ "agentId": "uuid",
+ "slug": "github",
+ "status": "pending"
+}
+```
+
+Error responses:
+```json
+{ "error": "agent_not_found" }
+```
+
+```json
+{
+ "error": "container_not_running",
+ "message": "Start the agent before installing skills."
+}
+```
+
+```json
+{
+ "error": "unsupported_runtime",
+ "message": "ClawHub installs are only available for Docker-backed OpenClaw agents."
+}
+```
+
+```json
+{
+ "error": "npm_unavailable",
+ "message": "The clawhub CLI could not be installed. Ensure Node.js is in your base image."
+}
+```
+
+Behavior notes:
+- This route attempts an immediate runtime-local install for an existing running agent
+- The selected skill is appended to `agents.clawhub_skills` only after the install succeeds
+- If the install fails, the agent record remains unchanged
+- For batch install, the frontend calls this endpoint once per selected slug
+
+### Job Polling Contract
+
+#### `GET /api/clawhub/jobs/:jobId`
+Success response:
+```json
+{
+ "jobId": "uuid-or-bullmq-id",
+ "agentId": "uuid",
+ "slug": "github",
+ "status": "pending | running | success | failed",
+ "error": null,
+ "completedAt": null
+}
+```
+
+State mapping:
+- BullMQ `waiting` / `delayed` -> `pending`
+- BullMQ `active` -> `running`
+- BullMQ `completed` -> `success`
+- BullMQ `failed` -> `failed`
+
+### Frontend Expectations
+- All calls go through `fetchWithAuth`
+- All non-2xx responses include a flat `error`
+- Include `message` when the UI should display human-readable detail
+- `agentId` comes from `router.query.id` in `frontend-dashboard/pages/agents/[id].js`
+
+## Scope Decisions
+
+### Included In V1
+- Browse skills
+- Search skills
+- Skill detail view
+- Installed skill listing
+- Async install with polling
+- Docker-backed OpenClaw agents only
+- Immediate install for existing running agents
+- Saved successful installs in `agents.clawhub_skills`
+- Deploy/start reconciliation that installs only missing saved skills
+
+### Excluded From V1
+- Uninstall
+- Version pinning
+- Streaming install logs
+- Auto-restarting the session
+- Compatibility pre-checks
+- K8s, Proxmox, Hermes, and other non-Docker runtime paths
diff --git a/workers/provisioner/worker.ts b/workers/provisioner/worker.ts
index 0f63fe8..db0d3d2 100644
--- a/workers/provisioner/worker.ts
+++ b/workers/provisioner/worker.ts
@@ -24,6 +24,10 @@ const {
const { waitForAgentReadiness } = require("./healthChecks");
const { buildReadinessWarningDetail, persistReadinessWarning } = require("./readinessWarning");
const { shellSingleQuote } = require("../../agent-runtime/lib/containerCommand");
+const {
+ computeMissingSavedSkills,
+ normalizeSavedSkillEntry: normalizeSavedClawhubSkillEntry,
+} = require("../../agent-runtime/lib/clawhubReconciliation");
// ── Connections ──────────────────────────────────────────
const connection = new IORedis({
@@ -51,6 +55,15 @@ function parsePositiveInteger(rawValue, fallbackValue, { min = 1, max = 32 } = {
return Math.min(max, Math.max(min, parsed));
}
+const OPENCLAW_WORKSPACE_PATH = "/root/.openclaw/workspace";
+const CLAWHUB_LOCKFILE_PATH = `${OPENCLAW_WORKSPACE_PATH}/.clawhub/lock.json`;
+const CLAWHUB_INSTALL_TIMEOUT_MS = parseTimeoutMs(process.env.CLAWHUB_INSTALL_TIMEOUT_MS, 300000);
+const CLAWHUB_INSTALL_LOCK_DURATION_MS = Math.max(CLAWHUB_INSTALL_TIMEOUT_MS + 120000, 420000);
+const CLAWHUB_INSTALL_LOCK_RENEW_MS = Math.max(
+ Math.min(Math.floor(CLAWHUB_INSTALL_LOCK_DURATION_MS / 2), 120000),
+ 30000,
+);
+
const PROVIDER_ENV_MAP = Object.freeze({
anthropic: "ANTHROPIC_API_KEY",
openai: "OPENAI_API_KEY",
@@ -426,16 +439,55 @@ async function runRuntimeCommand(agent, command, { timeout = 30000 } = {}) {
return payload;
}
+function appendChunkTail(chunks, chunk, state, maxBytes) {
+ if (!chunk || maxBytes <= 0) return;
+
+ const normalizedChunk = Buffer.isBuffer(chunk) ? chunk : Buffer.from(String(chunk));
+ if (normalizedChunk.length >= maxBytes) {
+ chunks.length = 0;
+ chunks.push(normalizedChunk.subarray(normalizedChunk.length - maxBytes));
+ state.totalBytes = maxBytes;
+ return;
+ }
+
+ chunks.push(normalizedChunk);
+ state.totalBytes += normalizedChunk.length;
+
+ while (state.totalBytes > maxBytes && chunks.length > 0) {
+ const overflow = state.totalBytes - maxBytes;
+ const firstChunk = chunks[0];
+ if (firstChunk.length <= overflow) {
+ chunks.shift();
+ state.totalBytes -= firstChunk.length;
+ continue;
+ }
+ chunks[0] = firstChunk.subarray(overflow);
+ state.totalBytes -= overflow;
+ }
+}
+
+// eslint-disable-next-line no-control-regex
+const ANSI_ESCAPE_RE = new RegExp("\\u001b\\[[0-9;?]*[ -/]*[@-~]", "g");
+// eslint-disable-next-line no-control-regex
+const NON_PRINTABLE_RE = new RegExp("[^\\x09\\x0a\\x20-\\x7e]", "g");
+
+function sanitizeExecOutput(output = "") {
+ return String(output)
+ .replace(ANSI_ESCAPE_RE, "")
+ .replace(/\r/g, "\n")
+ .replace(NON_PRINTABLE_RE, "")
+ .trim();
+}
async function runProvisionerExecCommand(
provisioner,
containerId,
command,
- { timeout = 30000 } = {},
+ { timeout = 30000, maxOutputBytes = 65536, tty = false, env = [] } = {},
) {
const execResult = await provisioner.exec(containerId, {
cmd: ["/bin/sh", "-lc", command],
- tty: true,
- env: [],
+ tty,
+ env,
});
if (!execResult?.exec || !execResult?.stream) {
throw new Error("Container exec unavailable");
@@ -443,10 +495,13 @@ async function runProvisionerExecCommand(
const output = await new Promise((resolve, reject) => {
const chunks = [];
+ const state = { totalBytes: 0 };
let settled = false;
+ let inspectInterval = null;
const timer = setTimeout(() => {
if (settled) return;
settled = true;
+ if (inspectInterval) clearInterval(inspectInterval);
try {
execResult.stream.destroy();
} catch {
@@ -459,11 +514,12 @@ async function runProvisionerExecCommand(
if (settled) return;
settled = true;
clearTimeout(timer);
- resolve(Buffer.concat(chunks).toString("utf8"));
+ if (inspectInterval) clearInterval(inspectInterval);
+ resolve(sanitizeExecOutput(Buffer.concat(chunks).toString("utf8")));
};
execResult.stream.on("data", (chunk) => {
- chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(String(chunk)));
+ appendChunkTail(chunks, chunk, state, maxOutputBytes);
});
execResult.stream.on("end", finish);
execResult.stream.on("close", finish);
@@ -471,12 +527,32 @@ async function runProvisionerExecCommand(
if (settled) return;
settled = true;
clearTimeout(timer);
+ if (inspectInterval) clearInterval(inspectInterval);
reject(error);
});
+
+ inspectInterval = setInterval(async () => {
+ if (settled) return;
+ try {
+ const status = await execResult.exec.inspect();
+ if (status && status.Running === false && status.ExitCode != null) {
+ finish();
+ }
+ } catch (error) {
+ if (settled) return;
+ settled = true;
+ clearTimeout(timer);
+ if (inspectInterval) clearInterval(inspectInterval);
+ reject(error);
+ }
+ }, 500);
});
const inspectResult = await execResult.exec.inspect();
const exitCode = inspectResult?.ExitCode ?? 0;
+ if (exitCode === 124) {
+ throw new Error(`Container command timed out after ${timeout}ms`);
+ }
if (exitCode !== 0) {
throw new Error(output.trim() || `Container command exited with code ${exitCode}`);
}
@@ -484,6 +560,28 @@ async function runProvisionerExecCommand(
return { exitCode, output };
}
+function wrapCommandWithContainerTimeout(command, timeoutMs) {
+ const timeoutSeconds = Math.max(1, Math.ceil(timeoutMs / 1000));
+ return [
+ "if command -v timeout >/dev/null 2>&1; then",
+ ` exec timeout -k 5s ${timeoutSeconds}s /bin/sh -lc ${JSON.stringify(command)};`,
+ "fi;",
+ `exec /bin/sh -lc ${JSON.stringify(command)};`,
+ ].join(" ");
+}
+
+function createClawhubInstallLogger({ jobId, agentId, slug }) {
+ const startedAt = Date.now();
+
+ return (step, message, extra = null) => {
+ const elapsedMs = Date.now() - startedAt;
+ const suffix = extra ? ` ${JSON.stringify(extra)}` : "";
+ console.log(
+ `[clawhub-installs] job=${jobId} agent=${agentId} slug=${slug} step=${step} elapsedMs=${elapsedMs} ${message}${suffix}`,
+ );
+ };
+}
+
async function reconcileRuntimeLlmAuth({
agentId,
userId,
@@ -631,6 +729,193 @@ async function markDeploymentLifecycle(db, agentId, status) {
await db.query("UPDATE deployments SET status = $2 WHERE agent_id = $1", [agentId, status]);
}
+function normalizeInstalledSkillsLockfile(parsed = {}) {
+ const skills = parsed?.skills;
+ if (!skills || typeof skills !== "object" || Array.isArray(skills)) return [];
+
+ return Object.entries(skills)
+ .map(([slug, entry]) => ({
+ slug,
+ version:
+ entry && typeof entry === "object" && typeof entry.version === "string"
+ ? entry.version
+ : "",
+ }))
+ .filter((entry) => entry.slug);
+}
+
+function sleep(ms) {
+ return new Promise((resolve) => setTimeout(resolve, ms));
+}
+
+async function readInstalledClawhubSkills(provisioner, containerId) {
+ const readCommand =
+ `if [ -f ${JSON.stringify(CLAWHUB_LOCKFILE_PATH)} ]; then ` +
+ `base64 < ${JSON.stringify(CLAWHUB_LOCKFILE_PATH)} | tr -d '\\n'; ` +
+ `else printf 'eyJ2ZXJzaW9uIjoxLCJza2lsbHMiOnt9fQ=='; fi`;
+
+ let lastError = null;
+ for (let attempt = 1; attempt <= 5; attempt += 1) {
+ const { output } = await runProvisionerExecCommand(provisioner, containerId, readCommand, {
+ // Use a TTY here so Docker does not prepend multiplexed stream framing bytes
+ // to the lockfile payload. We additionally base64-wrap the file contents so
+ // JSON parsing only happens after the transport output is normalized.
+ tty: true,
+ env: ["TERM=dumb", "CI=1", "NO_COLOR=1", "CLICOLOR=0"],
+ });
+
+ try {
+ const decoded = Buffer.from(
+ String(output || "eyJ2ZXJzaW9uIjoxLCJza2lsbHMiOnt9fQ==").trim(),
+ "base64",
+ ).toString("utf8");
+ return normalizeInstalledSkillsLockfile(JSON.parse(decoded || '{"version":1,"skills":{}}'));
+ } catch (error) {
+ lastError = error;
+ if (attempt < 5) {
+ await sleep(250 * attempt);
+ }
+ }
+ }
+
+ throw new Error(`Failed to parse ClawHub lockfile: ${lastError?.message || "unknown error"}`);
+}
+
+async function ensureClawhubCli(provisioner, containerId) {
+ try {
+ await runProvisionerExecCommand(
+ provisioner,
+ containerId,
+ wrapCommandWithContainerTimeout(
+ "if command -v clawhub >/dev/null 2>&1; then exit 0; fi; " +
+ "if ! command -v npm >/dev/null 2>&1; then exit 42; fi; " +
+ "npm install -g clawhub",
+ CLAWHUB_INSTALL_TIMEOUT_MS,
+ ),
+ {
+ timeout: CLAWHUB_INSTALL_TIMEOUT_MS + 10000,
+ env: ["TERM=dumb", "CI=1", "NO_COLOR=1", "CLICOLOR=0"],
+ },
+ );
+ } catch (error) {
+ if (String(error?.message || "").includes("exit 42")) {
+ const npmError = new Error(
+ "The clawhub CLI could not be installed. Ensure Node.js is in your base image.",
+ );
+ npmError.code = "npm_unavailable";
+ throw npmError;
+ }
+ throw error;
+ }
+}
+
+async function appendSavedClawhubSkill(agentId, slug, skillEntry) {
+ const normalizedEntry = normalizeSavedClawhubSkillEntry(slug, skillEntry);
+ if (!normalizedEntry) return;
+
+ const result = await db.query("SELECT clawhub_skills FROM agents WHERE id = $1 LIMIT 1", [
+ agentId,
+ ]);
+ const current = Array.isArray(result.rows[0]?.clawhub_skills)
+ ? result.rows[0].clawhub_skills
+ : [];
+ const exists = current.some((entry) => {
+ const savedSlug = String(entry?.installSlug || entry?.slug || "").trim();
+ const savedAuthor = String(entry?.author || "").trim();
+ return savedSlug === normalizedEntry.installSlug && savedAuthor === normalizedEntry.author;
+ });
+ if (exists) return;
+
+ await db.query("UPDATE agents SET clawhub_skills = $2::jsonb WHERE id = $1", [
+ agentId,
+ JSON.stringify([...current, normalizedEntry]),
+ ]);
+}
+
+async function reconcileSavedClawhubSkills({
+ agentId,
+ containerId,
+ provisioner,
+ logPrefix = "[clawhub-reconcile]",
+}) {
+ const result = await db.query(
+ "SELECT clawhub_skills, backend_type, runtime_family FROM agents WHERE id = $1 LIMIT 1",
+ [agentId],
+ );
+ const agent = result.rows[0];
+ if (!agent) {
+ console.warn(`${logPrefix} agent=${agentId} Agent row not found; skipping reconciliation`);
+ return;
+ }
+
+ if (agent.backend_type !== "docker" || agent.runtime_family !== "openclaw") {
+ return;
+ }
+
+ const savedSkills = Array.isArray(agent.clawhub_skills) ? agent.clawhub_skills : [];
+
+ if (!savedSkills.length) {
+ console.log(`${logPrefix} agent=${agentId} No saved ClawHub skills to reconcile`);
+ return;
+ }
+
+ let installedSkills = [];
+ try {
+ installedSkills = await readInstalledClawhubSkills(provisioner, containerId);
+ } catch (error) {
+ console.warn(
+ `${logPrefix} agent=${agentId} Failed to read installed skills before reconciliation: ${error.message}`,
+ );
+ installedSkills = [];
+ }
+
+ const missingSkills = computeMissingSavedSkills(savedSkills, installedSkills);
+
+ if (!missingSkills.length) {
+ console.log(`${logPrefix} agent=${agentId} All saved ClawHub skills already present`);
+ return;
+ }
+
+ console.log(
+ `${logPrefix} agent=${agentId} Reconciling ${missingSkills.length} missing ClawHub skill(s)`,
+ );
+
+ for (const skill of missingSkills) {
+ try {
+ console.log(
+ `${logPrefix} agent=${agentId} slug=${skill.installSlug} Installing missing saved skill`,
+ );
+ await ensureClawhubCli(provisioner, containerId);
+ await runProvisionerExecCommand(
+ provisioner,
+ containerId,
+ `cd ${JSON.stringify(OPENCLAW_WORKSPACE_PATH)} && clawhub install ${JSON.stringify(
+ skill.installSlug,
+ )} --no-input`,
+ {
+ timeout: CLAWHUB_INSTALL_TIMEOUT_MS + 10000,
+ maxOutputBytes: 32768,
+ env: ["TERM=dumb", "CI=1", "NO_COLOR=1", "CLICOLOR=0"],
+ },
+ );
+ console.log(
+ `${logPrefix} agent=${agentId} slug=${skill.installSlug} Reconciliation install completed`,
+ );
+ } catch (error) {
+ const message = String(error?.message || "");
+ if (message.includes("Already installed")) {
+ console.log(
+ `${logPrefix} agent=${agentId} slug=${skill.installSlug} Skill already installed during reconciliation`,
+ );
+ continue;
+ }
+ console.warn(
+ `${logPrefix} agent=${agentId} slug=${skill.installSlug} Reconciliation install failed: ${message}`,
+ );
+ }
+ }
+}
+
// ── Pluggable Backend ────────────────────────────────────
const backendInstances = new Map();
@@ -1195,6 +1480,21 @@ const worker = new Worker(
}
}
+ if (resolvedRuntimeFields.runtime_family === "openclaw" && containerId) {
+ try {
+ await reconcileSavedClawhubSkills({
+ agentId: id,
+ containerId,
+ provisioner,
+ });
+ } catch (e) {
+ console.warn(
+ `[provisioner] Failed to reconcile saved ClawHub skills for agent ${id}:`,
+ e.message,
+ );
+ }
+ }
+
// Sync integrations to newly deployed agent container
try {
const intResult = await db.query(
@@ -1260,12 +1560,146 @@ worker.on("completed", (job) => {
console.log(`Job ${job.id} completed successfully`);
});
+const clawhubInstallWorker = new Worker(
+ "clawhub-installs",
+ async (job) => {
+ const { agentId, slug, skillEntry, persistOnSuccess = true } = job.data || {};
+ const normalizedSlug = String(slug || "").trim();
+ if (!agentId || !normalizedSlug) {
+ throw new Error("ClawHub install job is missing agentId or slug");
+ }
+ const logInstall = createClawhubInstallLogger({
+ jobId: job.id,
+ agentId,
+ slug: normalizedSlug,
+ });
+
+ const result = await db.query(
+ `SELECT id, name, status, container_id, backend_type, runtime_family, deploy_target,
+ sandbox_profile, clawhub_skills
+ FROM agents
+ WHERE id = $1
+ LIMIT 1`,
+ [agentId],
+ );
+ const agent = result.rows[0];
+ if (!agent) {
+ throw new Error(`Agent not found: ${agentId}`);
+ }
+ if (agent.backend_type !== "docker" || agent.runtime_family !== "openclaw") {
+ throw new Error("ClawHub installs are only available for Docker-backed OpenClaw agents.");
+ }
+ if (!agent.container_id || (agent.status !== "running" && agent.status !== "warning")) {
+ throw new Error("Start the agent before installing skills.");
+ }
+ const provisioner = loadBackend(agent.backend_type);
+
+ logInstall("start", "Starting install job");
+
+ logInstall("cli-check", "Ensuring clawhub CLI is available");
+ await ensureClawhubCli(provisioner, agent.container_id);
+ logInstall("cli-check", "Clawhub CLI is ready");
+
+ logInstall("precheck", "Reading installed skills before install");
+ const installedBefore = await readInstalledClawhubSkills(provisioner, agent.container_id);
+ logInstall("precheck", "Read installed skills before install", {
+ installedCount: installedBefore.length,
+ });
+ if (installedBefore.some((entry) => entry.slug === normalizedSlug)) {
+ logInstall("precheck", "Skill already installed before command");
+ if (persistOnSuccess) {
+ logInstall("persist", "Persisting already-installed skill to agents table");
+ await appendSavedClawhubSkill(agentId, normalizedSlug, skillEntry);
+ logInstall("persist", "Persisted already-installed skill");
+ }
+ logInstall("done", "Install job completed without running clawhub install");
+ return {
+ agentId,
+ slug: normalizedSlug,
+ installedSkills: installedBefore,
+ };
+ }
+
+ try {
+ logInstall("install", "Running clawhub install command", {
+ timeoutMs: CLAWHUB_INSTALL_TIMEOUT_MS,
+ });
+ // Keep the install invocation unwrapped. A nested in-container `timeout ... /bin/sh -lc ...`
+ // caused Nora-driven ClawHub installs to hang/time out even though the same CLI command
+ // completed quickly when run directly in the container. The outer exec timeout remains the
+ // single guardrail for this path.
+ await runProvisionerExecCommand(
+ provisioner,
+ agent.container_id,
+ `cd ${JSON.stringify(OPENCLAW_WORKSPACE_PATH)} && clawhub install ${JSON.stringify(
+ normalizedSlug,
+ )} --no-input`,
+ {
+ timeout: CLAWHUB_INSTALL_TIMEOUT_MS + 10000,
+ maxOutputBytes: 32768,
+ env: ["TERM=dumb", "CI=1", "NO_COLOR=1", "CLICOLOR=0"],
+ },
+ );
+ logInstall("install", "Clawhub install command finished");
+ } catch (error) {
+ const message = String(error?.message || "");
+ if (!message.includes("Already installed")) {
+ logInstall("install", "Clawhub install command failed", {
+ error: message,
+ });
+ throw error;
+ }
+ logInstall("install", "Clawhub reported skill already installed");
+ }
+
+ logInstall("verify", "Reading installed skills after install");
+ const installedSkills = await readInstalledClawhubSkills(provisioner, agent.container_id);
+ logInstall("verify", "Read installed skills after install", {
+ installedCount: installedSkills.length,
+ });
+ const installed = installedSkills.some((entry) => entry.slug === normalizedSlug);
+ if (!installed) {
+ logInstall("verify", "Lockfile missing expected slug after install");
+ throw new Error(`ClawHub install completed but ${normalizedSlug} was not found in lockfile`);
+ }
+
+ if (persistOnSuccess) {
+ logInstall("persist", "Persisting successful install to agents table");
+ await appendSavedClawhubSkill(agentId, normalizedSlug, skillEntry);
+ logInstall("persist", "Persisted successful install");
+ }
+
+ logInstall("done", "Install job completed successfully");
+ return {
+ agentId,
+ slug: normalizedSlug,
+ installedSkills,
+ };
+ },
+ {
+ connection,
+ concurrency: 1,
+ lockDuration: CLAWHUB_INSTALL_LOCK_DURATION_MS,
+ lockRenewTime: CLAWHUB_INSTALL_LOCK_RENEW_MS,
+ stalledInterval: 30000,
+ maxStalledCount: 1,
+ },
+);
+
+clawhubInstallWorker.on("failed", (job, err) => {
+ console.error(`[clawhub-installs] Job ${job?.id} failed: ${err.message}`);
+});
+
+clawhubInstallWorker.on("completed", (job) => {
+ console.log(`[clawhub-installs] Job ${job.id} completed successfully`);
+});
+
// ── Health Check Server ──────────────────────────────────────────
const http = require("http");
const HEALTH_PORT = parseInt(process.env.WORKER_HEALTH_PORT || "4001");
const healthServer = http.createServer((req, res) => {
if (req.url === "/health") {
- const isReady = worker.isRunning();
+ const isReady = worker.isRunning() && clawhubInstallWorker.isRunning();
res.writeHead(isReady ? 200 : 503, { "Content-Type": "application/json" });
res.end(JSON.stringify({ status: isReady ? "ok" : "not_ready", uptime: process.uptime() }));
} else {