From 9b7a05d84bdee0785e2f08e1b0d79f937355e5e8 Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Wed, 18 Mar 2026 19:33:36 +0100 Subject: [PATCH 01/22] docs: consolidate sandbox section from 19 to 8 pages with Core/Guides grouping --- docs.json | 130 +- docs/agents/amp.mdx | 4 +- docs/agents/claude-code.mdx | 2 +- docs/agents/codex.mdx | 4 +- docs/agents/openclaw.mdx | 6 +- docs/agents/opencode.mdx | 4 +- docs/billing.mdx | 4 +- docs/migration/v2.mdx | 2 +- docs/sandbox.mdx | 168 +-- docs/sandbox/auto-resume.mdx | 308 ---- docs/sandbox/{pty.mdx => commands.mdx} | 183 ++- .../{connect-bucket.mdx => configuration.mdx} | 150 +- docs/sandbox/connect.mdx | 76 - docs/sandbox/environment-variables.mdx | 119 -- docs/sandbox/lifecycle-events-api.mdx | 156 -- docs/sandbox/lifecycle-events-webhooks.mdx | 363 ----- docs/sandbox/lifecycle.mdx | 1253 +++++++++++++++++ docs/sandbox/list.mdx | 339 ----- docs/sandbox/metadata.mdx | 57 - docs/sandbox/metrics.mdx | 110 -- docs/sandbox/observability.mdx | 623 ++++++++ docs/sandbox/persistence.mdx | 282 ---- .../{ip-tunneling.mdx => proxy-tunneling.mdx} | 0 docs/sandbox/rate-limits.mdx | 143 -- docs/sandbox/secured-access.mdx | 54 - .../{internet-access.mdx => security.mdx} | 1073 ++++++++------ docs/sandbox/snapshots.mdx | 194 --- docs/sandbox/ssh-access.mdx | 156 -- docs/template/defining-template.mdx | 2 +- docs/template/start-ready-command.mdx | 2 +- docs/use-cases/coding-agents.mdx | 2 +- 31 files changed, 2949 insertions(+), 3020 deletions(-) delete mode 100644 docs/sandbox/auto-resume.mdx rename docs/sandbox/{pty.mdx => commands.mdx} (57%) rename docs/sandbox/{connect-bucket.mdx => configuration.mdx} (60%) delete mode 100644 docs/sandbox/connect.mdx delete mode 100644 docs/sandbox/environment-variables.mdx delete mode 100644 docs/sandbox/lifecycle-events-api.mdx delete mode 100644 docs/sandbox/lifecycle-events-webhooks.mdx create mode 100644 docs/sandbox/lifecycle.mdx delete mode 100644 docs/sandbox/list.mdx delete mode 100644 docs/sandbox/metadata.mdx delete mode 100644 docs/sandbox/metrics.mdx create mode 100644 docs/sandbox/observability.mdx delete mode 100644 docs/sandbox/persistence.mdx rename docs/sandbox/{ip-tunneling.mdx => proxy-tunneling.mdx} (100%) delete mode 100644 docs/sandbox/rate-limits.mdx delete mode 100644 docs/sandbox/secured-access.mdx rename docs/sandbox/{internet-access.mdx => security.mdx} (56%) delete mode 100644 docs/sandbox/snapshots.mdx delete mode 100644 docs/sandbox/ssh-access.mdx diff --git a/docs.json b/docs.json index 345a1bfe..a26623de 100644 --- a/docs.json +++ b/docs.json @@ -104,25 +104,24 @@ "group": "Sandbox", "pages": [ "docs/sandbox", - "docs/sandbox/lifecycle-events-api", - "docs/sandbox/lifecycle-events-webhooks", - "docs/sandbox/persistence", - "docs/sandbox/snapshots", - "docs/sandbox/auto-resume", - "docs/sandbox/git-integration", - "docs/sandbox/metrics", - "docs/sandbox/metadata", - "docs/sandbox/environment-variables", - "docs/sandbox/list", - "docs/sandbox/connect", - "docs/sandbox/internet-access", - "docs/sandbox/pty", - "docs/sandbox/ssh-access", - "docs/sandbox/connect-bucket", - "docs/sandbox/rate-limits", - "docs/sandbox/secured-access", - "docs/sandbox/ip-tunneling", - "docs/sandbox/custom-domain" + { + "group": "Core", + "pages": [ + "docs/sandbox/lifecycle", + "docs/sandbox/commands", + "docs/sandbox/configuration", + "docs/sandbox/security", + "docs/sandbox/observability" + ] + }, + { + "group": "Guides", + "pages": [ + "docs/sandbox/git-integration", + "docs/sandbox/proxy-tunneling", + "docs/sandbox/custom-domain" + ] + } ] }, { @@ -3804,7 +3803,7 @@ }, { "source": "/docs/legacy/sandbox/api/envs", - "destination": "/docs/sandbox/environment-variables", + "destination": "/docs/sandbox/configuration", "permanent": true }, { @@ -3814,17 +3813,17 @@ }, { "source": "/docs/legacy/sandbox/api/reconnect", - "destination": "/docs/sandbox/connect", + "destination": "/docs/sandbox/lifecycle", "permanent": true }, { "source": "/docs/legacy/sandbox/api/url", - "destination": "/docs/sandbox/internet-access", + "destination": "/docs/sandbox/security", "permanent": true }, { "source": "/docs/legacy/sandbox/api/metadata", - "destination": "/docs/sandbox/metadata", + "destination": "/docs/sandbox/lifecycle", "permanent": true }, { @@ -3941,6 +3940,91 @@ "source": "/docs/git", "destination": "/docs/sandbox/git-integration", "permanent": true + }, + { + "source": "/docs/sandbox/persistence", + "destination": "/docs/sandbox/lifecycle", + "permanent": true + }, + { + "source": "/docs/sandbox/snapshots", + "destination": "/docs/sandbox/lifecycle", + "permanent": true + }, + { + "source": "/docs/sandbox/auto-resume", + "destination": "/docs/sandbox/lifecycle", + "permanent": true + }, + { + "source": "/docs/sandbox/list", + "destination": "/docs/sandbox/lifecycle", + "permanent": true + }, + { + "source": "/docs/sandbox/connect", + "destination": "/docs/sandbox/lifecycle", + "permanent": true + }, + { + "source": "/docs/sandbox/metadata", + "destination": "/docs/sandbox/lifecycle", + "permanent": true + }, + { + "source": "/docs/sandbox/lifecycle-events-api", + "destination": "/docs/sandbox/observability", + "permanent": true + }, + { + "source": "/docs/sandbox/lifecycle-events-webhooks", + "destination": "/docs/sandbox/observability", + "permanent": true + }, + { + "source": "/docs/sandbox/metrics", + "destination": "/docs/sandbox/observability", + "permanent": true + }, + { + "source": "/docs/sandbox/secured-access", + "destination": "/docs/sandbox/security", + "permanent": true + }, + { + "source": "/docs/sandbox/internet-access", + "destination": "/docs/sandbox/security", + "permanent": true + }, + { + "source": "/docs/sandbox/rate-limits", + "destination": "/docs/sandbox/security", + "permanent": true + }, + { + "source": "/docs/sandbox/environment-variables", + "destination": "/docs/sandbox/configuration", + "permanent": true + }, + { + "source": "/docs/sandbox/connect-bucket", + "destination": "/docs/sandbox/configuration", + "permanent": true + }, + { + "source": "/docs/sandbox/pty", + "destination": "/docs/sandbox/commands", + "permanent": true + }, + { + "source": "/docs/sandbox/ssh-access", + "destination": "/docs/sandbox/commands", + "permanent": true + }, + { + "source": "/docs/sandbox/ip-tunneling", + "destination": "/docs/sandbox/proxy-tunneling", + "permanent": true } ] } diff --git a/docs/agents/amp.mdx b/docs/agents/amp.mdx index 197b7081..b9081338 100644 --- a/docs/agents/amp.mdx +++ b/docs/agents/amp.mdx @@ -298,13 +298,13 @@ python build.py ## Related guides - + Auto-pause, resume, and manage sandbox lifecycle Clone repos, manage branches, and push changes - + Connect to the sandbox via SSH for interactive sessions diff --git a/docs/agents/claude-code.mdx b/docs/agents/claude-code.mdx index cb551604..89dc2f43 100644 --- a/docs/agents/claude-code.mdx +++ b/docs/agents/claude-code.mdx @@ -450,7 +450,7 @@ python build.py Connect Claude Code to 200+ MCP tools - + Auto-pause, resume, and manage sandbox lifecycle diff --git a/docs/agents/codex.mdx b/docs/agents/codex.mdx index 7b6f854f..b647f455 100644 --- a/docs/agents/codex.mdx +++ b/docs/agents/codex.mdx @@ -354,13 +354,13 @@ python build.py ## Related guides - + Auto-pause, resume, and manage sandbox lifecycle Clone repos, manage branches, and push changes - + Connect to the sandbox via SSH for interactive sessions diff --git a/docs/agents/openclaw.mdx b/docs/agents/openclaw.mdx index 20165972..da88841e 100644 --- a/docs/agents/openclaw.mdx +++ b/docs/agents/openclaw.mdx @@ -387,7 +387,7 @@ sandbox.kill() OpenClaw has a built-in [web UI and chat interface](https://openclaw.ai) served by its gateway. Start it inside a sandbox and connect from your browser. -This sandbox is created with a 10-minute timeout and auto-pause enabled. After 10 minutes of inactivity it pauses and can be resumed later. See [Sandbox Persistence](/docs/sandbox/persistence) and [Sandbox Lifecycle](/docs/sandbox) for more details. +This sandbox is created with a 10-minute timeout and auto-pause enabled. After 10 minutes of inactivity it pauses and can be resumed later. See [Sandbox Persistence](/docs/sandbox/lifecycle) and [Sandbox Lifecycle](/docs/sandbox) for more details. @@ -500,13 +500,13 @@ python build.py ## Related guides - + Auto-pause, resume, and manage sandbox lifecycle Clone repos, manage branches, and push changes - + Connect to the sandbox via SSH for interactive sessions diff --git a/docs/agents/opencode.mdx b/docs/agents/opencode.mdx index a5cee3b1..0eb70e6d 100644 --- a/docs/agents/opencode.mdx +++ b/docs/agents/opencode.mdx @@ -278,13 +278,13 @@ python build.py ## Related guides - + Auto-pause, resume, and manage sandbox lifecycle Clone repos, manage branches, and push changes - + Connect to the sandbox via SSH for interactive sessions diff --git a/docs/billing.mdx b/docs/billing.mdx index 752646e5..c665b719 100644 --- a/docs/billing.mdx +++ b/docs/billing.mdx @@ -21,7 +21,7 @@ E2B uses [usage-based pricing](#usage-based-pricing) - you pay only for what you Pro plan includes 100 concurrent sandboxes. Higher concurrency up to 1,100 is available as a purchasable [add-on](https://e2b.dev/dashboard?tab=billing). -Plans have different [API rate limits](/docs/sandbox/rate-limits). +Plans have different [API rate limits](/docs/sandbox/security#rate-limits). To upgrade your plan or purchase add-ons, visit the [dashboard billing tab](https://e2b.dev/dashboard?tab=billing). For Enterprise plans, [contact sales](mailto:enterprise@e2b.dev). @@ -97,7 +97,7 @@ Check your usage and costs in the [dashboard usage tab](https://e2b.dev/dashboar - **Kill sandboxes you no longer need** - Use `sbx.kill()` to stop billing and release resources permanently - **Allocate only what you need** - Start with default resources (2 vCPU, 1 GB RAM) and increase only if necessary - **Monitor actively running sandboxes** - Use the [CLI](/docs/cli/list-sandboxes) or [dashboard](https://e2b.dev/dashboard?tab=usage) to track active sandboxes - - **Use lifecycle events** - Set up [webhooks](/docs/sandbox/lifecycle-events-webhooks) to get notified when sandboxes are created + - **Use lifecycle events** - Set up [webhooks](/docs/sandbox/observability#lifecycle-webhooks) to get notified when sandboxes are created diff --git a/docs/migration/v2.mdx b/docs/migration/v2.mdx index 5d06336b..3560c644 100644 --- a/docs/migration/v2.mdx +++ b/docs/migration/v2.mdx @@ -63,7 +63,7 @@ sandbox = Sandbox.create(secure=False) # Explicitly disable ``` -Check more details in the [secured access documentation](/docs/sandbox/secured-access). +Check more details in the [secured access documentation](/docs/sandbox/security#secured-access). ### 3. File writing in Python SDK diff --git a/docs/sandbox.mdx b/docs/sandbox.mdx index 6c37ccd3..b4833636 100644 --- a/docs/sandbox.mdx +++ b/docs/sandbox.mdx @@ -1,136 +1,38 @@ --- -title: "Sandbox lifecycle" -sidebarTitle: Lifecycle +title: "Sandbox" +sidebarTitle: Overview --- -Sandboxes stay running as long as you need them. When their timeout expires, they can automatically pause to save resources — preserving their full state so you can resume at any time. You can also configure an explicit timeout or shut down a sandbox manually. - - -Sandboxes can run continuously for up to 24 hours (Pro) or 1 hour (Base). For longer workloads, use [pause and resume](/docs/sandbox/persistence) — pausing resets the runtime window, and your sandbox's full state is preserved indefinitely. - - - -```js JavaScript & TypeScript highlight={6} -import { Sandbox } from '@e2b/code-interpreter' - -// Create sandbox with and keep it running for 60 seconds. -// 🚨 Note: The units are milliseconds. -const sandbox = await Sandbox.create({ - timeoutMs: 60_000, -}) -``` -```python Python highlight={6} -from e2b_code_interpreter import Sandbox - -# Create sandbox with and keep it running for 60 seconds. -# 🚨 Note: The units are seconds. -sandbox = Sandbox.create( - timeout=60, -) -``` - - - -## Change sandbox timeout during runtime - -You can change the sandbox timeout when it's running by calling the `setTimeout` method in JavaScript or `set_timeout` method in Python. - -When you call the set timeout method, the sandbox timeout will be reset to the new value that you specified. - -This can be useful if you want to extend the sandbox lifetime when it's already running. -You can for example start with a sandbox with 1 minute timeout and then periodically call set timeout every time user interacts with it in your app. - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -// Create sandbox with and keep it running for 60 seconds. -const sandbox = await Sandbox.create({ timeoutMs: 60_000 }) - -// Change the sandbox timeout to 30 seconds. -// 🚨 The new timeout will be 30 seconds from now. -await sandbox.setTimeout(30_000) -``` -```python Python -from e2b_code_interpreter import Sandbox - -# Create sandbox with and keep it running for 60 seconds. -sandbox = Sandbox.create(timeout=60) - -# Change the sandbox timeout to 30 seconds. -# 🚨 The new timeout will be 30 seconds from now. -sandbox.set_timeout(30) -``` - - -## Retrieve sandbox information - -You can retrieve sandbox information like sandbox ID, template, metadata, started at/end at date by calling the `getInfo` method in JavaScript or `get_info` method in Python. - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -// Create sandbox with and keep it running for 60 seconds. -const sandbox = await Sandbox.create({ timeoutMs: 60_000 }) - -// Retrieve sandbox information. -const info = await sandbox.getInfo() - -console.log(info) - -// { -// "sandboxId": "iiny0783cype8gmoawzmx-ce30bc46", -// "templateId": "rki5dems9wqfm4r03t7g", -// "name": "base", -// "metadata": {}, -// "startedAt": "2025-03-24T15:37:58.076Z", -// "endAt": "2025-03-24T15:42:58.076Z" -// } -``` - -```python Python -from e2b_code_interpreter import Sandbox - -# Create sandbox with and keep it running for 60 seconds. -sandbox = Sandbox.create(timeout=60) - -# Retrieve sandbox information. -info = sandbox.get_info() - -print(info) - -# SandboxInfo(sandbox_id='ig6f1yt6idvxkxl562scj-419ff533', -# template_id='u7nqkmpn3jjf1tvftlsu', -# name='base', -# metadata={}, -# started_at=datetime.datetime(2025, 3, 24, 15, 42, 59, 255612, tzinfo=tzutc()), -# end_at=datetime.datetime(2025, 3, 24, 15, 47, 59, 255612, tzinfo=tzutc()) -# ) -``` - - -## Shutdown sandbox - -You can shutdown the sandbox any time even before the timeout is up by calling the `kill` method. - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -// Create sandbox with and keep it running for 60 seconds. -const sandbox = await Sandbox.create({ timeoutMs: 60_000 }) - -// Shutdown the sandbox immediately. -await sandbox.kill() -``` -```python Python -from e2b_code_interpreter import Sandbox - -# Create sandbox with and keep it running for 60 seconds. -sandbox = Sandbox.create(timeout=60) - -# Shutdown the sandbox immediately. -sandbox.kill() -``` - +An E2B sandbox is a secure, isolated cloud environment where your AI agents can execute code, run commands, interact with the filesystem, and access the internet. Sandboxes support long-lived sessions with pause/resume, snapshots, and configurable timeouts. + + + + Timeouts, pause/resume, snapshots, listing and connecting to sandboxes. + + + Interactive terminal (PTY) and SSH access. + + + Environment variables and storage bucket integration. + + + Secured access, network controls, and rate limits. + + + Metrics, lifecycle events API, and webhooks. + + + +## Guides + + + + Clone repos, manage branches, push changes. + + + Route sandbox traffic through a proxy server. + + + Set up a custom domain for sandbox access. + + diff --git a/docs/sandbox/auto-resume.mdx b/docs/sandbox/auto-resume.mdx deleted file mode 100644 index 06d04e33..00000000 --- a/docs/sandbox/auto-resume.mdx +++ /dev/null @@ -1,308 +0,0 @@ ---- -title: "AutoResume" -sidebarTitle: AutoResume ---- - -Many workloads don't need a sandbox running all the time, but when they do need it, it should just work, whether it was paused or not. - -`AutoResume` handles this automatically: a paused sandbox wakes up when activity arrives, so your code does not have to check or manage sandbox state. -Configure it through the `lifecycle` object when creating a sandbox. - -## Configure lifecycle on create - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const sandbox = await Sandbox.create({ - timeoutMs: 10 * 60 * 1000, - lifecycle: { - onTimeout: 'pause', - autoResume: true, // resume when activity arrives - }, -}) -``` -```python Python -from e2b import Sandbox - -sandbox = Sandbox.create( - timeout=10 * 60, - lifecycle={ - "on_timeout": "pause", - "auto_resume": True, # resume when activity arrives - }, -) -``` - - -## Lifecycle options - -- `onTimeout` / `on_timeout` - - `kill` (default): sandbox is terminated when timeout is reached - - `pause`: sandbox is paused when timeout is reached -- `autoResume` / `auto_resume` - - `false` (default): paused sandboxes do not auto-resume - - `true`: paused sandboxes auto-resume on activity - - `true` is valid only when `onTimeout`/`on_timeout` is `pause` - -## Behavior summary - -- Default behavior is equivalent to `onTimeout: "kill"` with `autoResume: false`. -- `onTimeout: "pause"` with `autoResume: false` gives auto-pause without auto-resume. -- `onTimeout: "pause"` with `autoResume: true` gives auto-pause with auto-resume. -- [`Sandbox.connect()`](/docs/sandbox/connect) can still be used to resume a paused sandbox manually. - -If you use `autoResume: false`, resume explicitly with [`Sandbox.connect()`](/docs/sandbox/connect). - -## What counts as activity - -Auto-resume is triggered by the sandbox activity - that's both HTTP traffic and controlling the sandbox from the SDK. - -That includes SDK operations like: -- `sandbox.commands.run(...)` -- `sandbox.files.read(...)` -- `sandbox.files.write(...)` -- opening a tunneled app URL or sending requests to a service running inside the sandbox - -If a sandbox is paused and `autoResume` is enabled, the next supported operation resumes it automatically. You do not need to call [`Sandbox.connect()`](/docs/sandbox/connect) first. - -### SDK example: pause, then read a file - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const sandbox = await Sandbox.create({ - timeoutMs: 10 * 60 * 1000, - lifecycle: { - onTimeout: 'pause', - autoResume: true, - }, -}) - -await sandbox.files.write('/home/user/hello.txt', 'hello from a paused sandbox') -await sandbox.pause() - -const content = await sandbox.files.read('/home/user/hello.txt') -console.log(content) -console.log(`State after read: ${(await sandbox.getInfo()).state}`) -``` -```python Python -from e2b import Sandbox - -sandbox = Sandbox.create( - timeout=10 * 60, - lifecycle={ - "on_timeout": "pause", - "auto_resume": True, - }, -) - -sandbox.files.write("/home/user/hello.txt", "hello from a paused sandbox") -sandbox.pause() - -content = sandbox.files.read("/home/user/hello.txt") -print(content) -print(f"State after read: {sandbox.get_info().state}") -``` - - -## Use cases - -### Web and dev/preview servers - -Use `onTimeout: "pause"` + `autoResume: true` so inbound traffic can wake a paused sandbox automatically. -This works for both: -- Basic web/API servers -- Dev or preview servers you open occasionally - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const sandbox = await Sandbox.create({ - timeoutMs: 10 * 60 * 1000, - lifecycle: { - onTimeout: 'pause', - autoResume: true, - }, -}) - -await sandbox.commands.run( - `python3 -m pip -q install 'flask>=2.2'` -) - -await sandbox.files.write( - '/home/user/app.py', - [ - 'from flask import Flask', - 'app = Flask(__name__)', - '@app.route("/")', - 'def hello():', - ' return "Hello, World!"', - 'app.run(host="0.0.0.0", port=3000)', - '', - ].join('\n') -) - -await sandbox.commands.run( - 'python3 -u /home/user/app.py > /home/user/flask.log 2>&1', - { background: true } -) - -await new Promise((resolve) => setTimeout(resolve, 1000)) - -const previewHost = sandbox.getHost(3000) -console.log(`Preview URL: https://${previewHost}`) - -console.log(`Status before pause: ${(await sandbox.getInfo()).state}`) -await sandbox.pause() -console.log(`Status after pause: ${(await sandbox.getInfo()).state}`) -``` -```python Python -import time - -from e2b import Sandbox - -sandbox = Sandbox.create( - timeout=10 * 60, - lifecycle={ - "on_timeout": "pause", - "auto_resume": True, - }, -) - -sandbox.commands.run("python3 -m pip -q install 'flask>=2.2'") - -sandbox.files.write( - "/home/user/app.py", - 'from flask import Flask\n' - 'app = Flask(__name__)\n' - '@app.route("/")\n' - 'def hello():\n' - ' return "Hello, World!"\n' - 'app.run(host="0.0.0.0", port=3000)\n' -) - -sandbox.commands.run( - "python3 -u /home/user/app.py > /home/user/flask.log 2>&1", - background=True, -) - -time.sleep(1) - -preview_host = sandbox.get_host(3000) -print(f"Preview URL: https://{preview_host}") - -print(f"Status before pause: {sandbox.get_info().state}") -sandbox.pause() -print(f"Status after pause: {sandbox.get_info().state}") -``` - - -### Agent/tool execution - -For queued tasks or tool calls, create once and keep using the same sandbox handle. If it is paused, it will auto-resume when you run the next command. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -// One-time setup -const sandbox = await Sandbox.create({ - timeoutMs: 5 * 60 * 1000, - lifecycle: { - onTimeout: 'pause', - autoResume: true, - }, -}) - -// Later: called for each agent/tool task -async function runToolTask(command) { - const result = await sandbox.commands.run(command) - return result.stdout -} - -console.log(await runToolTask('python -c "print(2 + 2)"')) -``` -```python Python -from e2b import Sandbox - -# One-time setup -sandbox = Sandbox.create( - timeout=5 * 60, - lifecycle={ - "on_timeout": "pause", - "auto_resume": True, - }, -) - -# Later: called for each agent/tool task -def run_tool_task(command: str) -> str: - result = sandbox.commands.run(command) - return result.stdout - -print(run_tool_task('python -c "print(2 + 2)"')) -``` - - -### Per-user sandboxes - -For multi-tenant apps, keep a map of sandbox IDs by user. On each request, connect to the user's existing sandbox (which auto-resumes if paused) or create a new one. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const userSandboxes = new Map() // userId → Sandbox - -async function getSandbox(userId) { - let sandbox = userSandboxes.get(userId) - - if (!sandbox) { - sandbox = await Sandbox.create({ - timeoutMs: 5 * 60 * 1000, - lifecycle: { - onTimeout: 'pause', - autoResume: true, - }, - }) - userSandboxes.set(userId, sandbox) - } - - return sandbox -} - -// On each user request (auto-resumes if paused) -const sandbox = await getSandbox('user-123') -const result = await sandbox.commands.run('echo "Hello from your sandbox"') -console.log(result.stdout) -``` -```python Python -from e2b import Sandbox - -user_sandboxes: dict[str, Sandbox] = {} # user_id → Sandbox - -def get_sandbox(user_id: str) -> Sandbox: - if user_id not in user_sandboxes: - user_sandboxes[user_id] = Sandbox.create( - timeout=5 * 60, - lifecycle={ - "on_timeout": "pause", - "auto_resume": True, - }, - ) - - return user_sandboxes[user_id] - -# On each user request (auto-resumes if paused) -sandbox = get_sandbox("user-123") -result = sandbox.commands.run('echo "Hello from your sandbox"') -print(result.stdout) -``` - - -## Cleanup -Auto-resume is persistent, meaning if your sandbox resumes and later times out again, it will pause again. - -If you call `.kill()`, the sandbox is permanently deleted and cannot be resumed. diff --git a/docs/sandbox/pty.mdx b/docs/sandbox/commands.mdx similarity index 57% rename from docs/sandbox/pty.mdx rename to docs/sandbox/commands.mdx index 2c5db488..5c4984f8 100644 --- a/docs/sandbox/pty.mdx +++ b/docs/sandbox/commands.mdx @@ -1,8 +1,12 @@ --- -title: "Interactive terminal (PTY)" -sidebarTitle: Interactive terminal +title: "Commands & terminal" +sidebarTitle: Commands --- +E2B sandboxes provide multiple ways to run commands and interact with the terminal: the PTY module for interactive terminal sessions, and SSH access for remote connectivity. + +## Interactive terminal (PTY) + The PTY (pseudo-terminal) module allows you to create interactive terminal sessions in the sandbox with real-time, bidirectional communication. Unlike `commands.run()` which executes a command and returns output after completion, PTY provides: @@ -11,7 +15,7 @@ Unlike `commands.run()` which executes a command and returns output after comple - **Interactive shell** - Full terminal support with ANSI colors and escape sequences - **Session persistence** - Disconnect and reconnect to running sessions -## Create a PTY session +### Create a PTY session Use `sandbox.pty.create()` to start an interactive bash shell. @@ -60,7 +64,7 @@ print('Terminal PID:', terminal.pid) The PTY runs an interactive bash shell with `TERM=xterm-256color`, which supports ANSI colors and escape sequences. -## Timeout +### Timeout PTY sessions have a configurable timeout that controls the session duration. The default is 60 seconds. For interactive or long-running sessions, set `timeoutMs: 0` (JavaScript) or `timeout=0` (Python) to keep the session open indefinitely. @@ -94,7 +98,7 @@ terminal = sandbox.pty.create( ``` -## Send input to PTY +### Send input to PTY Use `sendInput()` in JavaScript or `send_stdin()` in Python to send data to the terminal. These methods return a Promise (JavaScript) or complete synchronously (Python) - the actual output will be delivered to your `onData` callback. @@ -134,7 +138,7 @@ sandbox.pty.send_stdin(terminal.pid, b'echo "Hello from PTY"\n') ``` -## Resize the terminal +### Resize the terminal When the user's terminal window changes size, notify the PTY with `resize()`. The `cols` and `rows` parameters are measured in characters, not pixels. @@ -173,12 +177,9 @@ sandbox.pty.resize(terminal.pid, cols=120, rows=40) ``` -## Disconnect and reconnect +### Disconnect and reconnect -You can disconnect from a PTY session while keeping it running, then reconnect later with a new data handler. This is useful for: -- Resuming terminal sessions after network interruptions -- Sharing terminal access between multiple clients -- Implementing terminal session persistence +You can disconnect from a PTY session while keeping it running, then reconnect later with a new data handler. This is useful for resuming terminal sessions after network interruptions, sharing terminal access between multiple clients, or implementing terminal session persistence. ```js JavaScript & TypeScript @@ -249,7 +250,7 @@ reconnected.wait() ``` -## Kill the PTY +### Kill the PTY Terminate the PTY session with `kill()`. @@ -293,7 +294,7 @@ print('Killed:', killed) # True if successful ``` -## Wait for PTY to exit +### Wait for PTY to exit Use `wait()` to wait for the terminal session to end (e.g., when the user types `exit`). @@ -337,6 +338,160 @@ print('Exit code:', result.exit_code) ``` -## Interactive terminal (SSH-like) +### Interactive terminal (SSH-like) Building a fully interactive terminal (like SSH) requires handling raw mode, stdin forwarding, and terminal resize events. For a production implementation, see the [E2B CLI source code](https://github.com/e2b-dev/E2B/blob/main/packages/cli/src/terminal.ts) which uses the same `sandbox.pty` API documented above. + +## SSH access + +SSH access enables remote terminal sessions, SCP/SFTP file transfers, and integration with tools that expect SSH connectivity. + +### Quickstart + + + + +Define a template with OpenSSH server and [websocat](https://github.com/vi/websocat): + + + +```typescript JavaScript & TypeScript +// template.ts +import { Template, waitForPort } from 'e2b' + +export const template = Template() + .fromUbuntuImage('25.04') + .aptInstall(['openssh-server']) + .runCmd([ + 'curl -fsSL -o /usr/local/bin/websocat https://github.com/vi/websocat/releases/latest/download/websocat.x86_64-unknown-linux-musl', + 'chmod a+x /usr/local/bin/websocat', + ], { user: 'root' }) + .setStartCmd('sudo websocat -b --exit-on-eof ws-l:0.0.0.0:8081 tcp:127.0.0.1:22', waitForPort(8081)) +``` + +```python Python +# template.py +from e2b import Template, wait_for_port + +template = ( + Template() + .from_ubuntu_image("25.04") + .apt_install(["openssh-server"]) + .run_cmd([ + "curl -fsSL -o /usr/local/bin/websocat https://github.com/vi/websocat/releases/latest/download/websocat.x86_64-unknown-linux-musl", + "chmod a+x /usr/local/bin/websocat", + ], user="root") + .set_start_cmd("sudo websocat -b --exit-on-eof ws-l:0.0.0.0:8081 tcp:127.0.0.1:22", wait_for_port(8081)) +) +``` + + + +Build the template: + + + +```typescript JavaScript & TypeScript +// build.ts +import { Template, defaultBuildLogger } from 'e2b' +import { template as sshTemplate } from './template' + +await Template.build(sshTemplate, 'ssh-ready', { + cpuCount: 2, + memoryMB: 2048, + onBuildLogs: defaultBuildLogger(), +}) +``` + +```python Python +# build.py +from e2b import Template, default_build_logger +from template import template as ssh_template + +Template.build(ssh_template, "ssh-ready", + cpu_count=2, + memory_mb=2048, + on_build_logs=default_build_logger(), +) +``` + + + + + + + + +```typescript JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sbx = await Sandbox.create('ssh-ready') +console.log(sbx.sandboxId) +``` + +```python Python +from e2b import Sandbox + +sbx = Sandbox.create("ssh-ready") +print(sbx.sandbox_id) +``` + + + + + + + + +```bash macOS +# Install websocat +brew install websocat + +# Connect to your sandbox +ssh -o 'ProxyCommand=websocat --binary -B 65536 - wss://8081-%h.e2b.app' user@ +``` + +```bash Linux +# Install websocat +sudo curl -fsSL -o /usr/local/bin/websocat https://github.com/vi/websocat/releases/latest/download/websocat.x86_64-unknown-linux-musl +sudo chmod a+x /usr/local/bin/websocat + +# Connect to your sandbox +ssh -o 'ProxyCommand=websocat --binary -B 65536 - wss://8081-%h.e2b.app' user@ +``` + + + + + + +--- + +### How it works + +This method uses [websocat](https://github.com/vi/websocat) to proxy SSH connections over WebSocket through the sandbox's exposed ports. + +``` +┌───────────────────────────────────────────────────────────┐ +│ Your Machine │ +│ ┌──────────┐ ProxyCommand ┌──────────────────┐ │ +│ │ SSH │ ────────────────── │ websocat │ │ +│ │ Client │ │ (WebSocket) │ │ +│ └──────────┘ └─────────┬────────┘ │ +└────────────────────────────────────────────┼──────────────┘ + │ + wss://8081-.e2b.app + │ +┌────────────────────────────────────────────┼──────────────┐ +│ E2B Sandbox ▼ │ +│ ┌──────────────────┐ │ +│ │ websocat │ │ +│ │ (WS → TCP:22) │ │ +│ └─────────┬────────┘ │ +│ │ │ +│ ┌─────────▼────────┐ │ +│ │ SSH Server │ │ +│ │ (OpenSSH) │ │ +│ └──────────────────┘ │ +└───────────────────────────────────────────────────────────┘ +``` diff --git a/docs/sandbox/connect-bucket.mdx b/docs/sandbox/configuration.mdx similarity index 60% rename from docs/sandbox/connect-bucket.mdx rename to docs/sandbox/configuration.mdx index 4b13b601..a8257241 100644 --- a/docs/sandbox/connect-bucket.mdx +++ b/docs/sandbox/configuration.mdx @@ -1,14 +1,138 @@ --- -title: "Connecting storage bucket to the sandbox" -sidebarTitle: Connecting storage bucket +title: "Configuration" +sidebarTitle: Configuration --- -To connect a bucket for storing data from the sandbox, we will use the FUSE file system to mount the bucket to the sandbox. +This page covers sandbox configuration options including environment variables and connecting external storage buckets. + +## Environment variables + +### Default environment variables + +Upon creating a sandbox, useful sandbox metadata is set as environment variables for commands: +- `E2B_SANDBOX` is set to `true` for processes to know if they are inside our VM. +- `E2B_SANDBOX_ID` to know the ID of the sandbox. +- `E2B_TEAM_ID` to know the team ID that created the sandbox. +- `E2B_TEMPLATE_ID` to know what template was used for the current sandbox. + +You can try it out by running the following code in the sandbox: + + +```js JavaScript & TypeScript +const sandbox = await Sandbox.create() +const result = await sandbox.commands.run('echo $E2B_SANDBOX_ID') +``` +```python Python +sandbox = Sandbox.create() +result = sandbox.commands.run("echo $E2B_SANDBOX_ID") +``` + + + +These default environment variables are only accessible via the SDK, when using the CLI you can find them in the form of dot files in the `/run/e2b/` dir: +```sh +user@e2b:~$ ls -a /run/e2b/ +.E2B_SANDBOX .E2B_SANDBOX_ID .E2B_TEAM_ID .E2B_TEMPLATE_ID +``` + + +--- + +### Setting environment variables + +There are 3 ways to set environment variables in a sandbox: +1. [Global environment variables when creating the sandbox](/docs/sandbox/configuration#1-global-environment-variables). +2. [When running code in the sandbox](/docs/sandbox/configuration#2-setting-environment-variables-when-running-code). +3. [When running commands in the sandbox](/docs/sandbox/configuration#3-setting-environment-variables-when-running-commands). + +#### 1. Global environment variables + +You can set global environment variables when creating a sandbox. + + +```js JavaScript & TypeScript highlight={2-4} +const sandbox = await Sandbox.create({ + envs: { + MY_VAR: 'my_value', + }, +}) +``` +```python Python highlight={2-4} +sandbox = Sandbox.create( + envs={ + 'MY_VAR': 'my_value', + }, +) +``` + + +#### 2. Setting environment variables when running code + +You can set environment variables for specific code execution call in the sandbox. + + +- These environment variables are scoped to the command but are not private in the OS. +- If you had a global environment variable with the same name, it will be overridden only for the command. + + + +```js JavaScript & TypeScript highlight={3-5} +const sandbox = await Sandbox.create() +const result = await sandbox.runCode('import os; print(os.environ.get("MY_VAR"))', { + envs: { + MY_VAR: 'my_value', + }, +}) +``` +```python Python highlight={4-6} +sandbox = Sandbox.create() +result = sandbox.run_code( + 'import os; print(os.environ.get("MY_VAR"))', + envs={ + 'MY_VAR': 'my_value' + } +) +``` + + +#### 3. Setting environment variables when running commands + +You can set environment variables for specific command execution in the sandbox. + + +- These environment variables are scoped to the command but are not private in the OS. +- If you had a global environment variable with the same name, it will be overridden only for the command. + + + +```js JavaScript & TypeScript highlight={3-5} +const sandbox = await Sandbox.create() +sandbox.commands.run('echo $MY_VAR', { + envs: { + MY_VAR: '123', + }, +}) +``` +```python Python highlight={4-6} +sandbox = Sandbox.create() +sandbox.commands.run( + 'echo $MY_VAR', + envs={ + 'MY_VAR': '123' + } +) +``` + + +## Storage buckets + +To connect a bucket for storing data from the sandbox, you can use the FUSE file system to mount the bucket inside the sandbox. You will need to create a custom sandbox template with the FUSE file system installed. The guide for building a custom sandbox template can be found [here](/docs/template/quickstart). -## Google Cloud Storage -### Prerequisites +### Google Cloud Storage + +#### Prerequisites To use Google Cloud Storage, you'll need a bucket and a service account. You can create a service account [here](https://console.cloud.google.com/iam-admin/serviceaccounts) and a bucket [here](https://console.cloud.google.com/storage). @@ -16,7 +140,7 @@ If you want to write to the bucket, make sure the service account has the `Stora You can find a guide on creating a service account key [here](https://cloud.google.com/iam/docs/keys-create-delete#iam-service-account-keys-create-console). -### Mounting the bucket +#### Mounting the bucket To use the Google Cloud Storage we need to install the `gcsfuse` package. There's simple [template](/docs/template/quickstart) that can be used to create a container with the `gcsfuse` installed. @@ -81,11 +205,11 @@ output = sandbox.commands.run( ``` -### Flags +#### Flags The complete list of flags is available [here](https://cloud.google.com/storage/docs/gcsfuse-cli#options). -### Allow the default user to access the files +#### Allow the default user to access the files To allow the default user to access the files, we can use the following flags: @@ -93,7 +217,7 @@ To allow the default user to access the files, we can use the following flags: -o allow_other -file-mode=777 -dir-mode=777 ``` -## Amazon S3 +### Amazon S3 To use Amazon S3, we can use the `s3fs` package. The [template](/docs/template/quickstart) setup is similar to that of Google Cloud Storage. @@ -148,11 +272,11 @@ sandbox.commands.run("sudo s3fs /home/user/bucket") ``` -### Flags +#### Flags The complete list of flags is available [here](https://manpages.ubuntu.com/manpages/xenial/man1/s3fs.1.html). -### Allow the default user to access the files +#### Allow the default user to access the files To allow the default user to access the files, add the following flag: @@ -160,7 +284,7 @@ To allow the default user to access the files, add the following flag: -o allow_other ``` -## Cloudflare R2 +### Cloudflare R2 For Cloudflare R2, we can use a setup very similar to S3. The template remains the same as for S3. However, the mounting differs slightly; we need to specify the endpoint for R2. @@ -196,6 +320,6 @@ sandbox.commands.run( ``` -### Flags +#### Flags It's the same as for S3. diff --git a/docs/sandbox/connect.mdx b/docs/sandbox/connect.mdx deleted file mode 100644 index e62977e4..00000000 --- a/docs/sandbox/connect.mdx +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: "Connect to running sandbox" ---- - -If you have a running sandbox, you can connect to it using the `Sandbox.connect()` method and then start controlling it with our SDK. - -This is useful if you want to, for example, reuse the same sandbox instance for the same user after a short period of inactivity. - -## 1. Get the sandbox ID -To connect to a running sandbox, you first need to retrieve its ID. You can do this by calling the `Sandbox.list()` method. - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sbx = await Sandbox.create() - -// Get all running sandboxes -const paginator = await Sandbox.list({ - query: { state: ['running'] }, -}) - -const runningSandboxes = await paginator.nextItems() -if (runningSandboxes.length === 0) { - throw new Error('No running sandboxes found') -} - -// Get the ID of the sandbox you want to connect to -const sandboxId = runningSandboxes[0].sandboxId -``` - -```python Python -from e2b_code_interpreter import Sandbox - -sbx = Sandbox.create() - -# Get all running sandboxes -paginator = Sandbox.list() - -# Get the ID of the sandbox you want to connect to -running_sandboxes = paginator.next_items() -if len(running_sandboxes) == 0: - raise Exception("No running sandboxes found") - -# Get the ID of the sandbox you want to connect to -sandbox_id = running_sandboxes[0].sandbox_id -``` - - -## 2. Connect to the sandbox - -Now that you have the sandbox ID, you can connect to the sandbox using the `Sandbox.connect()` method. - - -```js JavaScript & TypeScript highlight={3} -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.connect(sandboxId) - -// Now you can use the sandbox as usual -// ... -const result = await sandbox.commands.run("whoami") -console.log(`Running in sandbox ${sandbox.sandboxId} as "${result.stdout.trim()}"`) -``` - -```python Python highlight={3} -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.connect(sandbox_id) - -# Now you can use the sandbox as usual -# ... -r = sandbox.commands.run("whoami") -print(f"Running in sandbox {sandbox.sandbox_id} as \"{r.stdout.strip()}\"") -``` - diff --git a/docs/sandbox/environment-variables.mdx b/docs/sandbox/environment-variables.mdx deleted file mode 100644 index 0f893e30..00000000 --- a/docs/sandbox/environment-variables.mdx +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: "Environment variables" ---- - -This page covers how to set and use environment variables in a sandbox, and default environment variables inside the sandbox. - -## Default environment variables -### Knowing if you are inside a sandbox - -Sometimes it's useful to know if the code is running inside a sandbox. Upon creating a sandbox, useful sandbox metadata is set as environment variables for commands: -- `E2B_SANDBOX` is set to `true` for processes to know if they are inside our VM. -- `E2B_SANDBOX_ID` to know the ID of the sandbox. -- `E2B_TEAM_ID` to know the team ID that created the sandbox. -- `E2B_TEMPLATE_ID` to know what template was used for the current sandbox. - -You can try it out by running the following code in the sandbox: - - -```js JavaScript & TypeScript -const sandbox = await Sandbox.create() -const result = await sandbox.commands.run('echo $E2B_SANDBOX_ID') -``` -```python Python -sandbox = Sandbox.create() -result = sandbox.commands.run("echo $E2B_SANDBOX_ID") -``` - - - -These default environment variables are only accessible via the SDK, when using the CLI you can find them in the form of dot files in the `/run/e2b/` dir: -```sh -user@e2b:~$ ls -a /run/e2b/ -.E2B_SANDBOX .E2B_SANDBOX_ID .E2B_TEAM_ID .E2B_TEMPLATE_ID -``` - - ---- - -## Setting environment variables -There are 3 ways to set environment variables in a sandbox: -1. [Global environment variables when creating the sandbox](/docs/sandbox/environment-variables#1-global-environment-variables). -2. [When running code in the sandbox](/docs/sandbox/environment-variables#2-setting-environment-variables-when-running-code). -3. [When running commands in the sandbox](/docs/sandbox/environment-variables#3-setting-environment-variables-when-running-commands). - -### 1. Global environment variables -You can set global environment variables when creating a sandbox. - - -```js JavaScript & TypeScript highlight={2-4} -const sandbox = await Sandbox.create({ - envs: { - MY_VAR: 'my_value', - }, -}) -``` -```python Python highlight={2-4} -sandbox = Sandbox.create( - envs={ - 'MY_VAR': 'my_value', - }, -) -``` - - -### 2. Setting environment variables when running code -You can set environment variables for specific code execution call in the sandbox. - - -- These environment variables are scoped to the command but are not private in the OS. -- If you had a global environment variable with the same name, it will be overridden only for the command. - - - -```js JavaScript & TypeScript highlight={3-5} -const sandbox = await Sandbox.create() -const result = await sandbox.runCode('import os; print(os.environ.get("MY_VAR"))', { - envs: { - MY_VAR: 'my_value', - }, -}) -``` -```python Python highlight={4-6} -sandbox = Sandbox.create() -result = sandbox.run_code( - 'import os; print(os.environ.get("MY_VAR"))', - envs={ - 'MY_VAR': 'my_value' - } -) -``` - - -### 3. Setting environment variables when running commands -You can set environment variables for specific command execution in the sandbox. - - -- These environment variables are scoped to the command but are not private in the OS. -- If you had a global environment variable with the same name, it will be overridden only for the command. - - - -```js JavaScript & TypeScript highlight={3-5} -const sandbox = await Sandbox.create() -sandbox.commands.run('echo $MY_VAR', { - envs: { - MY_VAR: '123', - }, -}) -``` -```python Python highlight={4-6} -sandbox = Sandbox.create() -sandbox.commands.run( - 'echo $MY_VAR', - envs={ - 'MY_VAR': '123' - } -) -``` - diff --git a/docs/sandbox/lifecycle-events-api.mdx b/docs/sandbox/lifecycle-events-api.mdx deleted file mode 100644 index 219bbc18..00000000 --- a/docs/sandbox/lifecycle-events-api.mdx +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: "Monitor sandbox lifecycle events" -sidebarTitle: Lifecycle events API ---- - -The lifecycle API provides RESTful endpoints to request the latest sandbox lifecycle events. This allows you to track when sandboxes are created, paused, resumed, updated, snapshotted, or killed, along with metadata. -All requests require authentication using your team [API key](/docs/api-key#where-to-find-api-key). - -Query Parameters: -- `offset` (optional): Number of events to skip (default: 0, min: 0) -- `limit` (optional): Number of events to return (default: 10, min: 1, max: 100) -- `orderAsc` (optional): Sort order - true for ascending, false for descending (default: false) - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sbx = await Sandbox.create() - -// Get the latest events for a specific sandbox -const resp1 = await fetch( - `https://api.e2b.app/events/sandboxes/${sbx.id}`, - { - method: 'GET', - headers: { - 'X-API-Key': E2B_API_KEY, - }, - } -) -const sandboxEvents = await resp1.json() - -// Get the latest 10 events for all sandboxes associated with the team -const resp2 = await fetch( - 'https://api.e2b.app/events/sandboxes?limit=10', - { - method: 'GET', - headers: { - 'X-API-Key': E2B_API_KEY, - }, - } -) -const teamSandboxEvents = await resp2.json() - -console.log(teamSandboxEvents) - -// [ -// { -// "version": "v1", -// "id": "f5911677-cb60-498f-afed-f68143b3cc59", -// "type": "sandbox.lifecycle.killed", -// "eventData": null, -// "sandboxBuildId": "a979a14b-bdcc-49e6-bc04-1189fc9fe7c2", -// "sandboxExecutionId": "1dae9e1c-9957-4ce7-a236-a99d5779aadf", -// "sandboxId": "${SANDBOX_ID}", -// "sandboxTeamId": "460355b3-4f64-48f9-9a16-4442817f79f5", -// "sandboxTemplateId": "rki5dems9wqfm4r03t7g", -// "timestamp": "2025-08-06T20:59:36Z" -// }, -// { -// "version": "v1", -// "id": "30b09e11-9ba2-42db-9cf6-d21f0f43a234", -// "type": "sandbox.lifecycle.updated", -// "eventData": { -// "set_timeout": "2025-08-06T20:59:59Z" -// }, -// "sandboxBuildId": "a979a14b-bdcc-49e6-bc04-1189fc9fe7c2", -// "sandboxExecutionId": "1dae9e1c-9957-4ce7-a236-a99d5779aadf", -// "sandboxId": "${SANDBOX_ID}", -// "sandboxTeamId": "460355b3-4f64-48f9-9a16-4442817f79f5", -// "sandboxTemplateId": "rki5dems9wqfm4r03t7g", -// "timestamp": "2025-08-06T20:59:29Z" -// }, -// [...] -// { -// "version": "v1", -// "id": "0568572b-a2ac-4e5f-85fa-fae90905f556", -// "type": "sandbox.lifecycle.created", -// "eventData": null, -// "sandboxBuildId": "a979a14b-bdcc-49e6-bc04-1189fc9fe7c2", -// "sandboxExecutionId": "1dae9e1c-9957-4ce7-a236-a99d5779aadf", -// "sandboxId": "${SANDBOX_ID}", -// "sandboxTeamId": "460355b3-4f64-48f9-9a16-4442817f79f5", -// "sandboxTemplateId": "rki5dems9wqfm4r03t7g", -// "timestamp": "2025-08-06T20:59:24Z" -// } -// ] -``` -```python Python -import requests -from e2b_code_interpreter import Sandbox - -sbx = Sandbox.create() - -# Get the latest events for a specific sandbox -resp1 = requests.get( - f"https://api.e2b.app/events/sandboxes/{sbx.sandbox_id}", - headers={ - "X-API-Key": E2B_API_KEY, - } -) -sandbox_events = resp1.json() - -# Get the latest 10 events for all sandboxes associated with the team -resp2 = requests.get( - "https://api.e2b.app/events/sandboxes?limit=10", - headers={ - "X-API-Key": E2B_API_KEY, - } -) -team_sandbox_events = resp2.json() - -print(team_sandbox_events) - -# [ -# { -# "version": "v1", -# "id": "0568572b-a2ac-4e5f-85fa-fae90905f556", -# "type": "sandbox.lifecycle.killed", -# "eventData": null, -# "sandboxBuildId": "a979a14b-bdcc-49e6-bc04-1189fc9fe7c2", -# "sandboxExecutionId": "1dae9e1c-9957-4ce7-a236-a99d5779aadf", -# "sandboxId": "${SANDBOX_ID}", -# "sandboxTeamId": "460355b3-4f64-48f9-9a16-4442817f79f5", -# "sandboxTemplateId": "rki5dems9wqfm4r03t7g", -# "timestamp": "2025-08-06T20:59:36Z" -# }, -# { -# "version": "v1", -# "id": "e7013704-2c51-4dd2-9f6c-388c91460149", -# "type": "sandbox.lifecycle.updated", -# "eventData": { -# "set_timeout": "2025-08-06T20:59:59Z" -# }, -# "sandboxBuildId": "a979a14b-bdcc-49e6-bc04-1189fc9fe7c2", -# "sandboxExecutionId": "1dae9e1c-9957-4ce7-a236-a99d5779aadf", -# "sandboxId": "${SANDBOX_ID}", -# "sandboxTeamId": "460355b3-4f64-48f9-9a16-4442817f79f5", -# "sandboxTemplateId": "rki5dems9wqfm4r03t7g", -# "timestamp": "2025-08-06T20:59:29Z" -# }, -# [...] -# { -# "version": "v1", -# "id": "f29ef778-2743-4c97-a802-7ba67f84ce24", -# "type": "sandbox.lifecycle.created", -# "eventData": null, -# "sandboxBuildId": "a979a14b-bdcc-49e6-bc04-1189fc9fe7c2", -# "sandboxExecutionId": "1dae9e1c-9957-4ce7-a236-a99d5779aadf", -# "sandboxId": "${SANDBOX_ID}", -# "sandboxTeamId": "460355b3-4f64-48f9-9a16-4442817f79f5", -# "sandboxTemplateId": "rki5dems9wqfm4r03t7g", -# "timestamp": "2025-08-06T20:59:24Z" -# } -# ] -``` - diff --git a/docs/sandbox/lifecycle-events-webhooks.mdx b/docs/sandbox/lifecycle-events-webhooks.mdx deleted file mode 100644 index a3ff0e58..00000000 --- a/docs/sandbox/lifecycle-events-webhooks.mdx +++ /dev/null @@ -1,363 +0,0 @@ ---- -title: "Sandbox lifecycle webhooks" -sidebarTitle: Lifecycle events webhooks ---- - -Webhooks provide a way for notifications to be delivered to an external web server whenever certain sandbox lifecycle events occur. -This allows you to receive real-time updates about sandbox creation, updates, and termination without having to poll the API. -All webhook requests require authentication using your team [API key](/docs/api-key#where-to-find-api-key). - -## Webhook management - -### Register webhook - -Register a new webhook to receive sandbox lifecycle events. -The webhook will be registered for the team ID associated with your API key. -All events specified during webhook creation will be sent to URL provided during registration with [following payload](#webhook-payload). - - -```js JavaScript & TypeScript -// Register a new webhook -const resp = await fetch( - 'https://api.e2b.app/events/webhooks', - { - method: 'POST', - headers: { - 'X-API-Key': E2B_API_KEY, - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ - name: 'My Sandbox Webhook', - url: 'https://your-webhook-endpoint.com/webhook', - enabled: true, - events: ['sandbox.lifecycle.created', 'sandbox.lifecycle.updated', 'sandbox.lifecycle.killed'], - signatureSecret: 'secret-for-event-signature-verification' - }), - } -) - -if (resp.status === 201) { - console.log('Webhook registered successfully') -} -``` -```python Python -import requests - -# Register a new webhook -resp = requests.post( - "https://api.e2b.app/events/webhooks", - headers={ - "X-API-Key": E2B_API_KEY, - "Content-Type": "application/json", - }, - json={ - "name": "My Sandbox Webhook", - "url": "https://your-webhook-endpoint.com/webhook", - "enabled": true, - "events": ["sandbox.lifecycle.created", "sandbox.lifecycle.updated", "sandbox.lifecycle.killed"], - "signatureSecret": "secret-for-event-signature-verification" - } -) - -if resp.status_code == 201: - print("Webhook registered successfully") -``` - - -### List webhooks - -List all registered webhooks for your team. - - -```js JavaScript & TypeScript -// List webhooks -const resp = await fetch( - 'https://api.e2b.app/events/webhooks', - { - method: 'GET', - headers: { - 'X-API-Key': E2B_API_KEY - }, - }, -) - -if (resp.status === 200) { - console.log('Webhooks listed successfully') - console.log(await resp.json()) -} -``` -```python Python -import requests - -# List webhooks -resp = requests.get( - "https://api.e2b.app/events/webhooks", - headers={ - "X-API-Key": E2B_API_KEY - }, -) - -if resp.status_code == 200: - print("Webhooks listed successfully") - print(response.json()) -``` - - - -### Get webhook configuration - -Retrieve the current webhook configuration for your team. - - -```js JavaScript & TypeScript -// Get webhook configuration -const resp = await fetch( - `https://api.e2b.app/events/webhooks/${webhookID}`, - { - method: 'GET', - headers: { - 'X-API-Key': E2B_API_KEY, - }, - } -) -const webhookConfig = await resp.json() -console.log(webhookConfig) -// { -// "id": "", -// "teamID": "", -// "name": "My Sandbox Webhook", -// "createdAt": "2025-08-06T21:00:00Z", -// "enabled": true, -// "url": "https://your-webhook-endpoint.com/webhook", -// "events": ["sandbox.lifecycle.created", "sandbox.lifecycle.killed"] -// } - -``` -```python Python -import requests - -# Get webhook configuration -resp = requests.get( - "https://api.e2b.app/events/webhooks/{webhookID}", - headers={ - "X-API-Key": E2B_API_KEY, - } -) - -webhook_config = resp.json() -print(webhook_config) -# { -# "id": "", -# "teamID": "", -# "name": "My Sandbox Webhook", -# "createdAt": "2025-08-06T21:00:00Z", -# "enabled": true, -# "url": "https://your-webhook-endpoint.com/webhook", -# "events": ["sandbox.lifecycle.created", "sandbox.lifecycle.killed"] -# } -``` - - -### Update webhook configuration - -Update an existing webhook configuration. The update will replace the previous configuration fields with provided fields. - - -```js JavaScript & TypeScript -// Update webhook configuration -const resp = await fetch( - `https://api.e2b.app/events/webhooks/${webhookID}`, - { - method: 'PATCH', - headers: { - 'X-API-Key': E2B_API_KEY, - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ - url: 'https://your-updated-webhook-endpoint.com/webhook', - enabled: false, - events: ['sandbox.lifecycle.created'] - }), - } -) - -if (resp.status === 200) { - console.log('Webhook updated successfully') -} -``` -```python Python -import requests - -# Update webhook configuration -resp = requests.patch( - "https://api.e2b.app/events/webhooks/{webhookID}", - headers={ - "X-API-Key": E2B_API_KEY, - "Content-Type": "application/json", - }, - json={ - "url": "https://your-updated-webhook-endpoint.com/webhook", - "enabled": False, - "events": ["sandbox.lifecycle.created"] - } -) - -if resp.status_code == 200: - print("Webhook updated successfully") -``` - - -### Delete webhook - -Unregister the webhook. - - -```js JavaScript & TypeScript -// Delete webhook configuration -const resp = await fetch( - `https://api.e2b.app/events/webhooks/${webhookID}`, - { - method: 'DELETE', - headers: { - 'X-API-Key': E2B_API_KEY, - }, - } -) - -if (resp.status === 200) { - console.log('Webhook deleted successfully') -} -``` -```python Python -import requests - -# Delete webhook configuration -resp = requests.delete( - "https://api.e2b.app/events/webhooks/{webhookID}", - headers={ - "X-API-Key": E2B_API_KEY, - } -) - -if resp.status_code == 200: - print("Webhook deleted successfully") -``` - - -## Webhook payload - -When a webhook is triggered, your endpoint will receive a POST request with a JSON payload containing the sandbox event data. -The payload structure matches the event format from the API: - -```json -{ - "version": "v2", - "id": "", - "type": "", - "eventData": { - "sandbox_metadata": { - "": "" - }, - "execution": { - "started_at": "2025-08-06T20:58:24Z", - "vcpu_count": 2, - "memory_mb": 512, - "execution_time": 1000, - } - }, - "sandboxBuildId": "", - "sandboxExecutionId": "", - "sandboxId": "", - "sandboxTeamId": "", - "sandboxTemplateId": "", - "timestamp": "2025-08-06T20:59:24Z" -} -``` - -`eventData.execution` contains sandbox execution details and is included on `sandbox.lifecycle.killed` and `sandbox.lifecycle.paused` events: - -- `started_at` - UTC RFC3339 timestamp when the current sandbox execution started -- `vcpu_count` - Number of vCPUs assigned to the sandbox -- `memory_mb` - Memory assigned to the sandbox in MB -- `execution_time` - Sandbox runtime in milliseconds - -# Webhook verification -To ensure the authenticity of webhook requests, each request includes a signature in the `e2b-signature` header. -You can verify the signature using the signature secret you provided when registering the webhook. -This confirms that the request originated from E2B and has not been tampered with. - - -```js JavaScript & TypeScript -function verifyWebhookSignature(secret : string, payload : string, payloadSignature : string) { - const expectedSignatureRaw = crypto.createHash('sha256').update(secret + payload).digest('base64'); - const expectedSignature = expectedSignatureRaw.replace(/=+$/, ''); - return expectedSignature == payloadSignature -} - -const payloadValid = verifyWebhookSignature(secret, webhookBodyRaw, webhookSignatureHeader) -if (payloadValid) { - console.log("Payload signature is valid") -} else { - console.log("Payload signature is INVALID") -} -``` -```python Python -import hashlib -import base64 - -def verify_webhook_signature(secret: str, payload: str, payload_signature: str) -> bool: - hash_bytes = hashlib.sha256((secret + payload).encode('utf-8')).digest() - expected_signature = base64.b64encode(hash_bytes).decode('utf-8') - expected_signature = expected_signature.rstrip('=') - - return expected_signature == payload_signature - -if verify_webhook_signature(secret, webhook_body_raw, webhook_signature_header): - print("Payload signature is valid") -else: - print("Payload signature is INVALID") -``` -```go Golang -import ( - "crypto/sha256" - "encoding/base64" - "fmt" - "strings" -) - -func verifyWebhookSignature(secret, payload, payloadSignature string) bool { - hash := sha256.Sum256([]byte(secret + payload)) - expectedSignature := base64.StdEncoding.EncodeToString(hash[:]) - - expectedSignature = strings.TrimRight(expectedSignature, "=") - - return expectedSignature == payloadSignature -} - -if verifyWebhookSignature(secret, webhookBodyString, webhookSignatureHeaderString) { - fmt.Println("Payload signature is valid") -} else { - fmt.Println("Payload signature is INVALID") -} -``` - - -## Webhook request headers -When webhooks is send, we are adding headers to help you verify the authenticity of the request and make debugging easier. - -- `e2b-webhook-id` - Webhook ID that triggered the event -- `e2b-delivery-id` - Unique ID for the delivery attempt -- `e2b-signature-version` - Currently always `v1`, reserved for future use -- `e2b-signature` - Signature for verifying the request authenticity` - -## Available event types - -The following event types can be subscribed to via webhooks, they are used as the `type` field in the [payload](#webhook-payload). - -- `sandbox.lifecycle.created` - Sandbox creation -- `sandbox.lifecycle.killed` - Sandbox termination -- `sandbox.lifecycle.updated` - Sandbox configuration updates -- `sandbox.lifecycle.paused` - Sandbox pausing -- `sandbox.lifecycle.resumed` - Sandbox resuming -- `sandbox.lifecycle.checkpointed` - Sandbox [snapshot](/docs/sandbox/snapshots) created - diff --git a/docs/sandbox/lifecycle.mdx b/docs/sandbox/lifecycle.mdx new file mode 100644 index 00000000..20075fd0 --- /dev/null +++ b/docs/sandbox/lifecycle.mdx @@ -0,0 +1,1253 @@ +--- +title: "Sandbox lifecycle" +sidebarTitle: Lifecycle +--- + +Sandboxes stay running as long as you need them. When their timeout expires, they can automatically pause to save resources — preserving their full state so you can resume at any time. You can also configure an explicit timeout or shut down a sandbox manually. + + +Sandboxes can run continuously for up to 24 hours (Pro) or 1 hour (Base). For longer workloads, use [pause and resume](/docs/sandbox/lifecycle#persistence) — pausing resets the runtime window, and your sandbox's full state is preserved indefinitely. + + +## Timeouts + +Every sandbox has a configurable timeout that determines how long it stays running. You set it at creation time in milliseconds (JavaScript) or seconds (Python). + + +```js JavaScript & TypeScript highlight={6} +import { Sandbox } from '@e2b/code-interpreter' + +// Create sandbox with and keep it running for 60 seconds. +// 🚨 Note: The units are milliseconds. +const sandbox = await Sandbox.create({ + timeoutMs: 60_000, +}) +``` +```python Python highlight={6} +from e2b_code_interpreter import Sandbox + +# Create sandbox with and keep it running for 60 seconds. +# 🚨 Note: The units are seconds. +sandbox = Sandbox.create( + timeout=60, +) +``` + + +### Change timeout at runtime + +You can change the sandbox timeout when it's running by calling the `setTimeout` method in JavaScript or `set_timeout` method in Python. + +When you call the set timeout method, the sandbox timeout will be reset to the new value that you specified. + +This can be useful if you want to extend the sandbox lifetime when it's already running. +You can for example start with a sandbox with 1 minute timeout and then periodically call set timeout every time user interacts with it in your app. + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +// Create sandbox with and keep it running for 60 seconds. +const sandbox = await Sandbox.create({ timeoutMs: 60_000 }) + +// Change the sandbox timeout to 30 seconds. +// 🚨 The new timeout will be 30 seconds from now. +await sandbox.setTimeout(30_000) +``` +```python Python +from e2b_code_interpreter import Sandbox + +# Create sandbox with and keep it running for 60 seconds. +sandbox = Sandbox.create(timeout=60) + +# Change the sandbox timeout to 30 seconds. +# 🚨 The new timeout will be 30 seconds from now. +sandbox.set_timeout(30) +``` + + +## Retrieve sandbox information + +You can retrieve sandbox information like sandbox ID, template, metadata, started at/end at date by calling the `getInfo` method in JavaScript or `get_info` method in Python. + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +// Create sandbox with and keep it running for 60 seconds. +const sandbox = await Sandbox.create({ timeoutMs: 60_000 }) + +// Retrieve sandbox information. +const info = await sandbox.getInfo() + +console.log(info) + +// { +// "sandboxId": "iiny0783cype8gmoawzmx-ce30bc46", +// "templateId": "rki5dems9wqfm4r03t7g", +// "name": "base", +// "metadata": {}, +// "startedAt": "2025-03-24T15:37:58.076Z", +// "endAt": "2025-03-24T15:42:58.076Z" +// } +``` + +```python Python +from e2b_code_interpreter import Sandbox + +# Create sandbox with and keep it running for 60 seconds. +sandbox = Sandbox.create(timeout=60) + +# Retrieve sandbox information. +info = sandbox.get_info() + +print(info) + +# SandboxInfo(sandbox_id='ig6f1yt6idvxkxl562scj-419ff533', +# template_id='u7nqkmpn3jjf1tvftlsu', +# name='base', +# metadata={}, +# started_at=datetime.datetime(2025, 3, 24, 15, 42, 59, 255612, tzinfo=tzutc()), +# end_at=datetime.datetime(2025, 3, 24, 15, 47, 59, 255612, tzinfo=tzutc()) +# ) +``` + + +## Metadata + +Metadata lets you attach arbitrary key-value pairs to a sandbox. This is useful for associating sandboxes with user sessions, storing custom data, or looking up sandboxes later via `Sandbox.list()`. + +You specify metadata when creating a sandbox and can access it later through [listing sandboxes](/docs/sandbox/lifecycle#list-sandboxes). + + +```js JavaScript & TypeScript highlight={6} +import { Sandbox } from '@e2b/code-interpreter' + +// Create sandbox with metadata. +const sandbox = await Sandbox.create({ + metadata: { + userId: '123', + }, +}) + +// List running sandboxes and access metadata. +const paginator = await Sandbox.list() +const runningSandboxes = await paginator.nextItems() +// Will print: +// { +// 'userId': '123', +// } +console.log(runningSandboxes[0].metadata) +``` +```python Python highlight={6} +from e2b_code_interpreter import Sandbox + +# Create sandbox with metadata. +sandbox = Sandbox.create( + metadata={ + 'userId': '123', + }, +) + +# List running sandboxes and access metadata. +paginator = Sandbox.list() +running_sandboxes = paginator.next_items() +# Will print: +# { +# 'userId': '123', +# } +print(running_sandboxes[0].metadata) +``` + + +You can also [filter sandboxes by metadata](/docs/sandbox/lifecycle#filter-sandboxes). + +## List sandboxes + +You can list sandboxes using the `Sandbox.list()` method. The method supports pagination and returns both running and paused sandboxes. + + + Once you have information about a running sandbox, you can [connect](/docs/sandbox/lifecycle#connect-to-a-sandbox) to it using the `Sandbox.connect()` method. + + + + ```js JavaScript & TypeScript highlight={6,11,14,24} + import { Sandbox, SandboxInfo } from '@e2b/code-interpreter' + + const sandbox = await Sandbox.create( + { + metadata: { + name: 'My Sandbox', + }, + }, + ) + + const paginator = Sandbox.list() + + // Get the first page of sandboxes (running and paused) + const firstPage = await paginator.nextItems() + + const runningSandbox = firstPage[0] + + console.log('Running sandbox metadata:', runningSandbox.metadata) + console.log('Running sandbox id:', runningSandbox.sandboxId) + console.log('Running sandbox started at:', runningSandbox.startedAt) + console.log('Running sandbox template id:', runningSandbox.templateId) + + // Get the next page of sandboxes + const nextPage = await paginator.nextItems() + ``` + ```python Python highlight={5,9,12,22} + from e2b_code_interpreter import Sandbox, SandboxInfo + + sandbox = Sandbox.create( + metadata={ + "name": "My Sandbox", + }, + ) + + paginator = Sandbox.list() + + # Get the first page of sandboxes (running and paused) + firstPage = paginator.next_items() + + running_sandbox = firstPage[0] + + print('Running sandbox metadata:', running_sandbox.metadata) + print('Running sandbox id:', running_sandbox.sandbox_id) + print('Running sandbox started at:', running_sandbox.started_at) + print('Running sandbox template id:', running_sandbox.template_id) + + # Get the next page of sandboxes + nextPage = paginator.next_items() + ``` + + +### Filter sandboxes + +Filter sandboxes by their current state. The state parameter can contain either "**running**" for running sandboxes or "**paused**" for paused sandboxes, or both. + + + ```js JavaScript & TypeScript highlight={9,13} + import { Sandbox } from '@e2b/code-interpreter' + + // Create a sandbox. + const sandbox = await Sandbox.create() + + // List sandboxes that are running or paused. + const paginator = Sandbox.list({ + query: { + state: ['running', 'paused'], + }, + }) + + const sandboxes = await paginator.nextItems() + ``` + ```python Python highlight={9,14} + from e2b_code_interpreter import Sandbox, SandboxQuery, SandboxState + + # Create a sandbox with metadata. + sandbox = Sandbox.create() + + # List sandboxes that are running or paused. + paginator = Sandbox.list( + query=SandboxQuery( + state=[SandboxState.RUNNING, SandboxState.PAUSED], + ), + ) + + # Get the first page of sandboxes (running and paused) + sandboxes = paginator.next_items() + ``` + + +You can also filter sandboxes by metadata key-value pairs specified during creation. + + + ```js JavaScript & TypeScript highlight={6-8,15,18} + import { Sandbox } from '@e2b/code-interpreter' + + // Create sandbox with metadata. + const sandbox = await Sandbox.create({ + metadata: { + env: 'dev', + app: 'my-app', + userId: '123', + }, + }) + + // List all sandboxes that has `userId` key with value `123` and `env` key with value `dev`. + const paginator = Sandbox.list({ + query: { + metadata: { userId: '123', env: 'dev' }, + }, + }) + + const sandboxes = await paginator.nextItems() + ``` + ```python Python highlight={6-8,16-17} + from e2b_code_interpreter import Sandbox, SandboxQuery, SandboxState + + # Create sandbox with metadata. + sandbox = Sandbox.create( + metadata={ + "env": "dev", + "app": "my-app", + "user_id": "123", + }, + ) + + # List running sandboxes that has `userId` key with value `123` and `env` key with value `dev`. + paginator = Sandbox.list( + query=SandboxQuery( + metadata={ + "userId": "123", + "env": "dev", + } + ), + ) + + # Get the first page of sandboxes (running and paused) + sandboxes = paginator.next_items() + ``` + + +### Advanced pagination + +For more granular pagination, you can set custom per-page item limit (default and maximum is **100**) and specify an offset parameter (`nextToken` or `next_token`) to start paginating from. + + + ```js JavaScript & TypeScript highlight={4-5,16} + import { Sandbox } from '@e2b/code-interpreter' + + const paginator = Sandbox.list({ + limit: 100, + nextToken: '', + }) + + // Additional paginator properties + // Whether there is a next page + paginator.hasNext + + // Next page token + paginator.nextToken + + // Fetch the next page + await paginator.nextItems() + ``` + ```python Python highlight={5-6,13} + from e2b_code_interpreter import Sandbox + + # List running sandboxes that has `userId` key with value `123` and `env` key with value `dev`. + paginator = Sandbox.list( + limit=100, + next_token="", + ) + + paginator.has_next # Whether there is a next page + paginator.next_token # Next page token + + # Fetch the next page + paginator.next_items() + ``` + + +You can fetch all pages by looping through the paginator while checking if there is a next page (using `hasNext` or `has_next` property) and fetching until there are no more pages left to fetch: + + + ```js JavaScript & TypeScript highlight={7} + import { Sandbox } from '@e2b/code-interpreter' + + const paginator = Sandbox.list() + + // Loop through all pages + const sandboxes: SandboxInfo[] = [] + while (paginator.hasNext) { + const items = await paginator.nextItems() + sandboxes.push(...items) + } + ``` + ```python Python highlight={7} + from e2b_code_interpreter import Sandbox, SandboxQuery + + paginator = Sandbox.list() + + # Loop through all pages + sandboxes: list[SandboxInfo] = [] + while paginator.has_next: + items = paginator.next_items() + sandboxes.extend(items) + ``` + + +## Connect to a sandbox + +If you have a running sandbox, you can connect to it using the `Sandbox.connect()` method and start controlling it with the SDK. This is useful for reusing the same sandbox instance after a period of inactivity, or for resuming a paused sandbox. + +### 1. Get the sandbox ID + +To connect to a running sandbox, you first need to retrieve its ID. You can do this by calling the `Sandbox.list()` method. + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sbx = await Sandbox.create() + +// Get all running sandboxes +const paginator = await Sandbox.list({ + query: { state: ['running'] }, +}) + +const runningSandboxes = await paginator.nextItems() +if (runningSandboxes.length === 0) { + throw new Error('No running sandboxes found') +} + +// Get the ID of the sandbox you want to connect to +const sandboxId = runningSandboxes[0].sandboxId +``` + +```python Python +from e2b_code_interpreter import Sandbox + +sbx = Sandbox.create() + +# Get all running sandboxes +paginator = Sandbox.list() + +# Get the ID of the sandbox you want to connect to +running_sandboxes = paginator.next_items() +if len(running_sandboxes) == 0: + raise Exception("No running sandboxes found") + +# Get the ID of the sandbox you want to connect to +sandbox_id = running_sandboxes[0].sandbox_id +``` + + +### 2. Connect to the sandbox + +Now that you have the sandbox ID, you can connect to the sandbox using the `Sandbox.connect()` method. + + +```js JavaScript & TypeScript highlight={3} +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.connect(sandboxId) + +// Now you can use the sandbox as usual +// ... +const result = await sandbox.commands.run("whoami") +console.log(`Running in sandbox ${sandbox.sandboxId} as "${result.stdout.trim()}"`) +``` + +```python Python highlight={3} +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.connect(sandbox_id) + +# Now you can use the sandbox as usual +# ... +r = sandbox.commands.run("whoami") +print(f"Running in sandbox {sandbox.sandbox_id} as \"{r.stdout.strip()}\"") +``` + + +## Persistence + +The sandbox persistence allows you to pause your sandbox and resume it later from the same state it was in when you paused it. This includes not only state of the sandbox's filesystem but also the sandbox's memory — all running processes, loaded variables, data, etc. + +### Sandbox state transitions + +Understanding how sandboxes transition between different states is crucial for managing their lifecycle effectively. Here's a diagram showing the possible state transitions: + +```mermaid actions={false} +flowchart TD + start(( )) -->|Sandbox.create| Running + + Running["Running
• Active execution
• Consumes resources"] + Paused["Paused
• Preserves memory and files
• Cannot execute code"] + Snapshotting["Snapshotting
• Creates persistent snapshot
• Briefly pauses execution"] + Killed["Killed
• Resources released
• Cannot be resumed"] + + Running -->|pause| Paused + Running -->|createSnapshot| Snapshotting + Paused -->|connect| Running + Snapshotting -->|snapshot complete| Running + Running -->|kill| Killed + Paused -->|kill| Killed +``` + +#### State descriptions + +- **Running**: The sandbox is actively running and can execute code. This is the initial state after creation. +- **Paused**: The sandbox execution is suspended but its state is preserved. +- **Snapshotting**: The sandbox is briefly paused while a persistent snapshot is being created. It automatically returns to Running. See [Snapshots](/docs/sandbox/lifecycle#snapshots). +- **Killed**: The sandbox is terminated and all resources are released. This is a terminal state. + +#### Changing sandbox state + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() // Starts in Running state + +// Pause the sandbox +await sandbox.pause() // Running → Paused + +// Resume the sandbox +await sandbox.connect() // Running/Paused → Running + +// Kill the sandbox (from any state) +await sandbox.kill() // Running/Paused → Killed +``` + +```python Python +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() # Starts in Running state + +# Pause the sandbox +sandbox.pause() # Running → Paused + +# Resume the sandbox +sandbox.connect() # Running/Paused → Running + +# Kill the sandbox (from any state) +sandbox.kill() # Running/Paused → Killed +``` + + +### Pausing sandbox + +When you pause a sandbox, both the sandbox's filesystem and memory state will be saved. This includes all the files in the sandbox's filesystem and all the running processes, loaded variables, data, etc. + + +```js JavaScript & TypeScript highlight={8-9} +import { Sandbox } from '@e2b/code-interpreter' + +const sbx = await Sandbox.create() +console.log('Sandbox created', sbx.sandboxId) + +// Pause the sandbox +// You can save the sandbox ID in your database to resume the sandbox later +await sbx.pause() +console.log('Sandbox paused', sbx.sandboxId) +``` +```python Python highlight={8-9} +from e2b_code_interpreter import Sandbox + +sbx = Sandbox.create() +print('Sandbox created', sbx.sandbox_id) + +# Pause the sandbox +# You can save the sandbox ID in your database to resume the sandbox later +sbx.pause() +print('Sandbox paused', sbx.sandbox_id) +``` + + + +### Resuming sandbox + +When you resume a sandbox, it will be in the same state it was in when you paused it. +This means that all the files in the sandbox's filesystem will be restored and all the running processes, loaded variables, data, etc. will be restored. + + +```js JavaScript & TypeScript highlight={12-13} +import { Sandbox } from '@e2b/code-interpreter' + +const sbx = await Sandbox.create() +console.log('Sandbox created', sbx.sandboxId) + +// Pause the sandbox +// You can save the sandbox ID in your database to resume the sandbox later +await sbx.pause() +console.log('Sandbox paused', sbx.sandboxId) + +// Connect to the sandbox (it will automatically resume the sandbox, if paused) +const sameSbx = await sbx.connect() +console.log('Connected to the sandbox', sameSbx.sandboxId) +``` +```python Python highlight={12-13} +from e2b_code_interpreter import Sandbox + +sbx = Sandbox.create() +print('Sandbox created', sbx.sandbox_id) + +# Pause the sandbox +# You can save the sandbox ID in your database to resume the sandbox later +sbx.pause() +print('Sandbox paused', sbx.sandbox_id) + +# Connect to the sandbox (it will automatically resume the sandbox, if paused) +same_sbx = sbx.connect() +print('Connected to the sandbox', same_sbx.sandbox_id) +``` + + +### Listing paused sandboxes + +You can list all paused sandboxes by calling the `Sandbox.list` method and supplying the `state` query parameter. + + +```js JavaScript & TypeScript highlight={4,7} +import { Sandbox, SandboxInfo } from '@e2b/code-interpreter' + +// List all paused sandboxes +const paginator = Sandbox.list({ query: { state: ['paused'] } }) + +// Get the first page of paused sandboxes +const sandboxes = await paginator.nextItems() + +// Get all paused sandboxes +while (paginator.hasNext) { + const items = await paginator.nextItems() + sandboxes.push(...items) +} +``` +```python Python highlight={4,7} +# List all paused sandboxes +from e2b_code_interpreter import Sandbox, SandboxQuery, SandboxState + +paginator = Sandbox.list(SandboxQuery(state=[SandboxState.PAUSED])) + +# Get the first page of paused sandboxes +sandboxes = paginator.next_items() + +# Get all paused sandboxes +while paginator.has_next: + items = paginator.next_items() + sandboxes.extend(items) +``` + + +### Removing paused sandboxes + +You can remove paused sandboxes by calling the `kill` method on the Sandbox instance. + + +```js JavaScript & TypeScript highlight={11,14} +import { Sandbox } from '@e2b/code-interpreter' + +const sbx = await Sandbox.create() +console.log('Sandbox created', sbx.sandboxId) + +// Pause the sandbox +// You can save the sandbox ID in your database to resume the sandbox later +await sbx.pause() + +// Remove the sandbox +await sbx.kill() + +// Remove sandbox by id +await Sandbox.kill(sbx.sandboxId) +``` +```python Python highlight={9,12} +from e2b_code_interpreter import Sandbox + +sbx = Sandbox.create() + +# Pause the sandbox +sbx.pause() + +# Remove the sandbox +sbx.kill() + +# Remove sandbox by id +Sandbox.kill(sbx.sandbox_id) +``` + + +### Timeout on connect + +When you connect to a sandbox, the inactivity timeout resets. The default is 5 minutes, but you can pass a custom timeout to the `Sandbox.connect()` method: + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sbx = await Sandbox.connect(sandboxId, { timeoutMs: 60 * 1000 }) // 60 seconds +``` +```python Python +from e2b_code_interpreter import Sandbox + +sbx = Sandbox.connect(sandbox_id, timeout=60) # 60 seconds +``` + + +### Auto-pause + +Auto-pause is configured in the sandbox lifecycle on create. Set `onTimeout`/`on_timeout` to `pause`. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sandbox = await Sandbox.create({ + timeoutMs: 10 * 60 * 1000, // Optional: change default timeout (10 minutes) + lifecycle: { + onTimeout: 'pause', + autoResume: false, // Optional (default is false) + }, +}) +``` +```python Python +from e2b import Sandbox + +sandbox = Sandbox.create( + timeout=10 * 60, # Optional: change default timeout (10 minutes) + lifecycle={ + "on_timeout": "pause", # Auto-pause after the sandbox times out + "auto_resume": False, # Optional (default is False) + }, +) +``` + + +Auto-pause is persistent, meaning if your sandbox resumes and later times out again, it will pause again. + +If you call `.kill()`, the sandbox is permanently deleted and cannot be resumed. + +### Network behavior during pause + +If you have a service (for example a server) running inside your sandbox and you pause the sandbox, the service won't be accessible from the outside and all the clients will be disconnected. +If you resume the sandbox, the service will be accessible again but you need to connect clients again. + +### Limitations + +#### Pause and resume performance +- Pausing a sandbox takes approximately **4 seconds per 1 GiB of RAM** +- Resuming a sandbox takes approximately **1 second** + +#### Paused sandbox retention +- Paused sandboxes are kept **indefinitely** — there is no automatic deletion or time-to-live limit +- You can resume a paused sandbox at any time + +#### Continuous runtime limits +- A sandbox can remain running (without being paused) for: + - **24 hours** on the **Pro tier** + - **1 hour** on the **Base tier** +- After a sandbox is paused and resumed, the continuous runtime limit is **reset** + +## AutoResume + +Many workloads don't need a sandbox running all the time, but when they do need it, it should just work, whether it was paused or not. + +`AutoResume` handles this automatically: a paused sandbox wakes up when activity arrives, so your code does not have to check or manage sandbox state. +Configure it through the `lifecycle` object when creating a sandbox. + +### Configure lifecycle on create + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sandbox = await Sandbox.create({ + timeoutMs: 10 * 60 * 1000, + lifecycle: { + onTimeout: 'pause', + autoResume: true, // resume when activity arrives + }, +}) +``` +```python Python +from e2b import Sandbox + +sandbox = Sandbox.create( + timeout=10 * 60, + lifecycle={ + "on_timeout": "pause", + "auto_resume": True, # resume when activity arrives + }, +) +``` + + +### Lifecycle options + +- `onTimeout` / `on_timeout` + - `kill` (default): sandbox is terminated when timeout is reached + - `pause`: sandbox is paused when timeout is reached +- `autoResume` / `auto_resume` + - `false` (default): paused sandboxes do not auto-resume + - `true`: paused sandboxes auto-resume on activity + - `true` is valid only when `onTimeout`/`on_timeout` is `pause` + +### Behavior summary + +- Default behavior is equivalent to `onTimeout: "kill"` with `autoResume: false`. +- `onTimeout: "pause"` with `autoResume: false` gives auto-pause without auto-resume. +- `onTimeout: "pause"` with `autoResume: true` gives auto-pause with auto-resume. +- `Sandbox.connect()` can still be used to resume a paused sandbox manually. + +If you use `autoResume: false`, resume explicitly with `Sandbox.connect()`. + +### What counts as activity + +Auto-resume is triggered by the sandbox activity - that's both HTTP traffic and controlling the sandbox from the SDK. + +That includes SDK operations like: +- `sandbox.commands.run(...)` +- `sandbox.files.read(...)` +- `sandbox.files.write(...)` +- opening a tunneled app URL or sending requests to a service running inside the sandbox + +If a sandbox is paused and `autoResume` is enabled, the next supported operation resumes it automatically. You do not need to call `Sandbox.connect()` first. + +#### SDK example: pause, then read a file + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sandbox = await Sandbox.create({ + timeoutMs: 10 * 60 * 1000, + lifecycle: { + onTimeout: 'pause', + autoResume: true, + }, +}) + +await sandbox.files.write('/home/user/hello.txt', 'hello from a paused sandbox') +await sandbox.pause() + +const content = await sandbox.files.read('/home/user/hello.txt') +console.log(content) +console.log(`State after read: ${(await sandbox.getInfo()).state}`) +``` +```python Python +from e2b import Sandbox + +sandbox = Sandbox.create( + timeout=10 * 60, + lifecycle={ + "on_timeout": "pause", + "auto_resume": True, + }, +) + +sandbox.files.write("/home/user/hello.txt", "hello from a paused sandbox") +sandbox.pause() + +content = sandbox.files.read("/home/user/hello.txt") +print(content) +print(f"State after read: {sandbox.get_info().state}") +``` + + +### AutoResume use cases + +#### Web and dev/preview servers + +Use `onTimeout: "pause"` + `autoResume: true` so inbound traffic can wake a paused sandbox automatically. +This works for both basic web/API servers and dev or preview servers you open occasionally. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sandbox = await Sandbox.create({ + timeoutMs: 10 * 60 * 1000, + lifecycle: { + onTimeout: 'pause', + autoResume: true, + }, +}) + +await sandbox.commands.run( + `python3 -m pip -q install 'flask>=2.2'` +) + +await sandbox.files.write( + '/home/user/app.py', + [ + 'from flask import Flask', + 'app = Flask(__name__)', + '@app.route("/")', + 'def hello():', + ' return "Hello, World!"', + 'app.run(host="0.0.0.0", port=3000)', + '', + ].join('\n') +) + +await sandbox.commands.run( + 'python3 -u /home/user/app.py > /home/user/flask.log 2>&1', + { background: true } +) + +await new Promise((resolve) => setTimeout(resolve, 1000)) + +const previewHost = sandbox.getHost(3000) +console.log(`Preview URL: https://${previewHost}`) + +console.log(`Status before pause: ${(await sandbox.getInfo()).state}`) +await sandbox.pause() +console.log(`Status after pause: ${(await sandbox.getInfo()).state}`) +``` +```python Python +import time + +from e2b import Sandbox + +sandbox = Sandbox.create( + timeout=10 * 60, + lifecycle={ + "on_timeout": "pause", + "auto_resume": True, + }, +) + +sandbox.commands.run("python3 -m pip -q install 'flask>=2.2'") + +sandbox.files.write( + "/home/user/app.py", + 'from flask import Flask\n' + 'app = Flask(__name__)\n' + '@app.route("/")\n' + 'def hello():\n' + ' return "Hello, World!"\n' + 'app.run(host="0.0.0.0", port=3000)\n' +) + +sandbox.commands.run( + "python3 -u /home/user/app.py > /home/user/flask.log 2>&1", + background=True, +) + +time.sleep(1) + +preview_host = sandbox.get_host(3000) +print(f"Preview URL: https://{preview_host}") + +print(f"Status before pause: {sandbox.get_info().state}") +sandbox.pause() +print(f"Status after pause: {sandbox.get_info().state}") +``` + + +#### Agent/tool execution + +For queued tasks or tool calls, create once and keep using the same sandbox handle. If it is paused, it will auto-resume when you run the next command. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +// One-time setup +const sandbox = await Sandbox.create({ + timeoutMs: 5 * 60 * 1000, + lifecycle: { + onTimeout: 'pause', + autoResume: true, + }, +}) + +// Later: called for each agent/tool task +async function runToolTask(command) { + const result = await sandbox.commands.run(command) + return result.stdout +} + +console.log(await runToolTask('python -c "print(2 + 2)"')) +``` +```python Python +from e2b import Sandbox + +# One-time setup +sandbox = Sandbox.create( + timeout=5 * 60, + lifecycle={ + "on_timeout": "pause", + "auto_resume": True, + }, +) + +# Later: called for each agent/tool task +def run_tool_task(command: str) -> str: + result = sandbox.commands.run(command) + return result.stdout + +print(run_tool_task('python -c "print(2 + 2)"')) +``` + + +#### Per-user sandboxes + +For multi-tenant apps, keep a map of sandbox IDs by user. On each request, connect to the user's existing sandbox (which auto-resumes if paused) or create a new one. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const userSandboxes = new Map() // userId → Sandbox + +async function getSandbox(userId) { + let sandbox = userSandboxes.get(userId) + + if (!sandbox) { + sandbox = await Sandbox.create({ + timeoutMs: 5 * 60 * 1000, + lifecycle: { + onTimeout: 'pause', + autoResume: true, + }, + }) + userSandboxes.set(userId, sandbox) + } + + return sandbox +} + +// On each user request (auto-resumes if paused) +const sandbox = await getSandbox('user-123') +const result = await sandbox.commands.run('echo "Hello from your sandbox"') +console.log(result.stdout) +``` +```python Python +from e2b import Sandbox + +user_sandboxes: dict[str, Sandbox] = {} # user_id → Sandbox + +def get_sandbox(user_id: str) -> Sandbox: + if user_id not in user_sandboxes: + user_sandboxes[user_id] = Sandbox.create( + timeout=5 * 60, + lifecycle={ + "on_timeout": "pause", + "auto_resume": True, + }, + ) + + return user_sandboxes[user_id] + +# On each user request (auto-resumes if paused) +sandbox = get_sandbox("user-123") +result = sandbox.commands.run('echo "Hello from your sandbox"') +print(result.stdout) +``` + + +### AutoResume cleanup + +Auto-resume is persistent, meaning if your sandbox resumes and later times out again, it will pause again. + +If you call `.kill()`, the sandbox is permanently deleted and cannot be resumed. + +## Snapshots + +Snapshots let you create a persistent point-in-time capture of a running sandbox, including both its filesystem and memory state. +You can then use a snapshot to spawn new sandboxes that start from the exact same state. + +The original sandbox continues running after the snapshot is created, and a single snapshot can be used to create many new sandboxes. + +### Prerequisites + +Snapshots require templates with envd version `v0.5.0` or above. If you are using a custom template created before envd `v0.5.0`, you need to rebuild it. + +You can check the template envd version using the `e2b template list` command or by viewing the templates list on the dashboard. + +### Snapshots vs. Pause/Resume + +| | Pause/Resume | Snapshots | +|---|---|---| +| Effect on original sandbox | Pauses (stops) the sandbox | Sandbox briefly pauses, then continues running | +| Relationship | One-to-one — resume restores the same sandbox | One-to-many — snapshot can spawn many new sandboxes | +| Use case | Suspend and resume a single sandbox | Create a reusable checkpoint | + +### Snapshot flow + +```mermaid actions={false} +graph LR + A[Running Sandbox] -->|createSnapshot| B[Snapshotting] + B --> C[Snapshot Created] + B --> A + C -->|Sandbox.create| D[New Sandbox 1] + C -->|Sandbox.create| E[New Sandbox 2] + C -->|Sandbox.create| F[New Sandbox N] +``` + +The sandbox is briefly paused during the snapshot process but automatically returns to running state. The sandbox ID stays the same after the snapshot completes. + + +During the snapshot, the sandbox is temporarily paused and resumed. This causes all active connections (e.g. WebSocket, PTY, command streams) to be dropped. Make sure your client handles reconnection properly. + + +### Create a snapshot + +You can create a snapshot from a running sandbox instance. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sandbox = await Sandbox.create() + +// Create a snapshot from a running sandbox +const snapshot = await sandbox.createSnapshot() +console.log('Snapshot ID:', snapshot.snapshotId) +``` +```python Python +from e2b import Sandbox + +sandbox = Sandbox.create() + +# Create a snapshot from a running sandbox +snapshot = sandbox.create_snapshot() +print('Snapshot ID:', snapshot.snapshot_id) +``` + + +You can also create a snapshot by sandbox ID using the static method. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +// Create a snapshot by sandbox ID +const snapshot = await Sandbox.createSnapshot(sandboxId) +console.log('Snapshot ID:', snapshot.snapshotId) +``` +```python Python +from e2b import Sandbox + +# Create a snapshot by sandbox ID +snapshot = Sandbox.create_snapshot(sandbox_id) +print('Snapshot ID:', snapshot.snapshot_id) +``` + + +### Create a sandbox from a snapshot + +The snapshot ID can be used directly with `Sandbox.create()` to spawn a new sandbox from the snapshot. The new sandbox starts with the exact filesystem and memory state captured in the snapshot. + + +```js JavaScript & TypeScript highlight={5} +import { Sandbox } from 'e2b' + +const snapshot = await sandbox.createSnapshot() + +// Create a new sandbox from the snapshot +const newSandbox = await Sandbox.create(snapshot.snapshotId) +``` +```python Python highlight={5} +from e2b import Sandbox + +snapshot = sandbox.create_snapshot() + +# Create a new sandbox from the snapshot +new_sandbox = Sandbox.create(snapshot.snapshot_id) +``` + + +### List snapshots + +You can list all snapshots. The method returns a paginator for iterating through results. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const paginator = Sandbox.listSnapshots() + +const snapshots = [] +while (paginator.hasNext) { + const items = await paginator.nextItems() + snapshots.push(...items) +} +``` +```python Python +from e2b import Sandbox + +paginator = Sandbox.list_snapshots() + +snapshots = [] +while paginator.has_next: + items = paginator.next_items() + snapshots.extend(items) +``` + + +#### Filter by sandbox + +You can filter snapshots created from a specific sandbox. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const paginator = Sandbox.listSnapshots({ sandboxId: 'your-sandbox-id' }) +const snapshots = await paginator.nextItems() +``` +```python Python +from e2b import Sandbox + +paginator = Sandbox.list_snapshots(sandbox_id="your-sandbox-id") +snapshots = paginator.next_items() +``` + + +### Delete a snapshot + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +// Returns true if deleted, false if the snapshot was not found +const deleted = await Sandbox.deleteSnapshot(snapshot.snapshotId) +``` +```python Python +from e2b import Sandbox + +Sandbox.delete_snapshot(snapshot.snapshot_id) +``` + + +### Snapshots vs. Templates + +Both snapshots and [templates](/docs/template/quickstart) create reusable starting points for sandboxes, but they solve different problems. + +| | Templates | Snapshots | +|---|---|---| +| Defined by | Declarative code (Template builder) | Capturing a running sandbox | +| Reproducibility | Same definition produces the same sandbox every time | Captures whatever state exists at that moment | +| Best for | Repeatable base environments | Checkpointing, rollback, forking runtime state | + +Use templates when every sandbox should start from an identical, known state — pre-installed tools, fixed configurations, consistent environments. +Use snapshots when you need to capture or fork live runtime state that depends on what happened during execution. + +### Snapshot use cases + +- **Checkpointing agent work** — an AI agent has loaded data and produced partial results in memory. Snapshot it so you can resume or fork from that point later. +- **Rollback points** — snapshot before a risky or expensive operation (running untrusted code, applying a migration, refactoring a web app). If it fails, rollback - spawn a fresh sandbox from the snapshot before the operation happened. +- **Forking workflows** — spawn multiple sandboxes from the same snapshot to explore different approaches in parallel. +- **Cached sandboxes** — avoid repeating expensive setup by snapshotting a sandbox that has already loaded a large dataset or started a long-running process. +- **Sharing state** — one user or agent configures an environment interactively, snapshots it, and others start from that exact state. + +## Shutdown sandbox + +You can shutdown the sandbox any time even before the timeout is up by calling the `kill` method. + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +// Create sandbox with and keep it running for 60 seconds. +const sandbox = await Sandbox.create({ timeoutMs: 60_000 }) + +// Shutdown the sandbox immediately. +await sandbox.kill() +``` +```python Python +from e2b_code_interpreter import Sandbox + +# Create sandbox with and keep it running for 60 seconds. +sandbox = Sandbox.create(timeout=60) + +# Shutdown the sandbox immediately. +sandbox.kill() +``` + diff --git a/docs/sandbox/list.mdx b/docs/sandbox/list.mdx deleted file mode 100644 index 4d836e8f..00000000 --- a/docs/sandbox/list.mdx +++ /dev/null @@ -1,339 +0,0 @@ ---- -title: "List sandboxes" ---- - -You can list sandboxes using the `Sandbox.list()` method. - - - Once you have information about running sandbox, you can [connect](/docs/sandbox/connect) to it using the `Sandbox.connect()` method. - - -### Listing sandboxes - -The `Sandbox.list()` method supports pagination. In the [advanced pagination](/docs/sandbox/list#advanced-pagination) section, you can find more information about pagination techniques using the updated method. - - - ```js JavaScript & TypeScript highlight={6,11,14,24} - import { Sandbox, SandboxInfo } from '@e2b/code-interpreter' - - const sandbox = await Sandbox.create( - { - metadata: { - name: 'My Sandbox', - }, - }, - ) - - const paginator = Sandbox.list() - - // Get the first page of sandboxes (running and paused) - const firstPage = await paginator.nextItems() - - const runningSandbox = firstPage[0] - - console.log('Running sandbox metadata:', runningSandbox.metadata) - console.log('Running sandbox id:', runningSandbox.sandboxId) - console.log('Running sandbox started at:', runningSandbox.startedAt) - console.log('Running sandbox template id:', runningSandbox.templateId) - - // Get the next page of sandboxes - const nextPage = await paginator.nextItems() - ``` - ```python Python highlight={5,9,12,22} - from e2b_code_interpreter import Sandbox, SandboxInfo - - sandbox = Sandbox.create( - metadata={ - "name": "My Sandbox", - }, - ) - - paginator = Sandbox.list() - - # Get the first page of sandboxes (running and paused) - firstPage = paginator.next_items() - - running_sandbox = firstPage[0] - - print('Running sandbox metadata:', running_sandbox.metadata) - print('Running sandbox id:', running_sandbox.sandbox_id) - print('Running sandbox started at:', running_sandbox.started_at) - print('Running sandbox template id:', running_sandbox.template_id) - - # Get the next page of sandboxes - nextPage = paginator.next_items() - ``` - -The code above will output something like this: - - ```bash JavaScript & TypeScript - Running sandbox metadata: {name: "My Sandbox"} - Running sandbox id: ixjj3iankaishgcge4jwn-b0b684e9 - Running sandbox started at: 2024-10-15T21:13:07.311Z - Running sandbox template id: 3e4rngfa34txe0gxc1zf - ``` - ```bash Python - Running sandbox metadata: {'name': 'My Sandbox'} - Running sandbox id: ixjj3iankaishgcge4jwn-b0b684e9 - Running sandbox started at: 2024-10-15 21:13:07.311861+00:00 - Running sandbox template id: 3e4rngfa34txe0gxc1zf - ``` - - - -### Filtering sandboxes - -Filter sandboxes by their current state. The state parameter can contain either "**running**" for running sandboxes or "**paused**" for paused sandboxes, or both. - - - ```js JavaScript & TypeScript highlight={9,13} - import { Sandbox } from '@e2b/code-interpreter' - - // Create a sandbox. - const sandbox = await Sandbox.create() - - // List sandboxes that are running or paused. - const paginator = Sandbox.list({ - query: { - state: ['running', 'paused'], - }, - }) - - const sandboxes = await paginator.nextItems() - ``` - ```python Python highlight={9,14} - from e2b_code_interpreter import Sandbox, SandboxQuery, SandboxState - - # Create a sandbox with metadata. - sandbox = Sandbox.create() - - # List sandboxes that are running or paused. - paginator = Sandbox.list( - query=SandboxQuery( - state=[SandboxState.RUNNING, SandboxState.PAUSED], - ), - ) - - # Get the first page of sandboxes (running and paused) - sandboxes = paginator.next_items() - ``` - - -Filter sandboxes by the metadata key value pairs specified during Sandbox creation. - - - ```js JavaScript & TypeScript highlight={6-8,15,18} - import { Sandbox } from '@e2b/code-interpreter' - - // Create sandbox with metadata. - const sandbox = await Sandbox.create({ - metadata: { - env: 'dev', - app: 'my-app', - userId: '123', - }, - }) - - // List all sandboxes that has `userId` key with value `123` and `env` key with value `dev`. - const paginator = Sandbox.list({ - query: { - metadata: { userId: '123', env: 'dev' }, - }, - }) - - const sandboxes = await paginator.nextItems() - ``` - ```python Python highlight={6-8,16-17} - from e2b_code_interpreter import Sandbox, SandboxQuery, SandboxState - - # Create sandbox with metadata. - sandbox = Sandbox.create( - metadata={ - "env": "dev", - "app": "my-app", - "user_id": "123", - }, - ) - - # List running sandboxes that has `userId` key with value `123` and `env` key with value `dev`. - paginator = Sandbox.list( - query=SandboxQuery( - metadata={ - "userId": "123", - "env": "dev", - } - ), - ) - - # Get the first page of sandboxes (running and paused) - sandboxes = paginator.next_items() - ``` - - -### Advanced pagination - -For more granular pagination, you can set custom per-page item limit (default and maximum is **100**) and specify an offset parameter (`nextToken` or `next_token`) to start paginating from. - - - ```js JavaScript & TypeScript highlight={4-5,16} - import { Sandbox } from '@e2b/code-interpreter' - - const paginator = Sandbox.list({ - limit: 100, - nextToken: '', - }) - - // Additional paginator properties - // Whether there is a next page - paginator.hasNext - - // Next page token - paginator.nextToken - - // Fetch the next page - await paginator.nextItems() - ``` - ```python Python highlight={5-6,13} - from e2b_code_interpreter import Sandbox - - # List running sandboxes that has `userId` key with value `123` and `env` key with value `dev`. - paginator = Sandbox.list( - limit=100, - next_token="", - ) - - paginator.has_next # Whether there is a next page - paginator.next_token # Next page token - - # Fetch the next page - paginator.next_items() - ``` - - -You can fetch all pages by looping through the paginator while checking if there is a next page (using `hasNext` or `has_next` property) and fetching until there are no more pages left to fetch: - - - ```js JavaScript & TypeScript highlight={7} - import { Sandbox } from '@e2b/code-interpreter' - - const paginator = Sandbox.list() - - // Loop through all pages - const sandboxes: SandboxInfo[] = [] - while (paginator.hasNext) { - const items = await paginator.nextItems() - sandboxes.push(...items) - } - ``` - ```python Python highlight={7} - from e2b_code_interpreter import Sandbox, SandboxQuery - - paginator = Sandbox.list() - - # Loop through all pages - sandboxes: list[SandboxInfo] = [] - while paginator.has_next: - items = paginator.next_items() - sandboxes.extend(items) - ``` - - -## Old SDK (v1.x.y) - - - If you're using SDK with version lower than `2.0.0`, the `Sandbox.list()` method behaves differently. - - - - - ```js JavaScript & TypeScript highlight={11} - import { Sandbox } from '@e2b/code-interpreter' - - // Create a sandbox. - const sandbox = await Sandbox.create({ - metadata: { - name: 'My Sandbox', - }, - }) - - // List all running sandboxes. - const runningSandboxes = await Sandbox.list() - const runningSandbox = runningSandboxes[0] - - console.log('Running sandbox metadata:', runningSandbox.metadata) - console.log('Running sandbox id:', runningSandbox.sandboxId) - console.log('Running sandbox started at:', runningSandbox.startedAt) - console.log('Running sandbox template id:', runningSandbox.templateId) - ``` - ```python Python highlight={11} - from e2b_code_interpreter import Sandbox - - # Create a sandbox. - sandbox = Sandbox.create( - metadata: { - name: 'My Sandbox', - }, - ) - - # List all running sandboxes. - running_sandboxes = Sandbox.list() - running_sandbox = running_sandboxes[0] - - print('Running sandbox metadata:', running_sandbox.metadata) - print('Running sandbox id:', running_sandbox.sandbox_id) - print('Running sandbox started at:', running_sandbox.started_at) - print('Running sandbox template id:', running_sandbox.template_id) - ``` - - -## Filtering sandboxes - -You can filter sandboxes by specifying [Metadata](/docs/sandbox/metadata) key value pairs. -Specifying multiple key value pairs will return sandboxes that match all of them. - -This can be useful when you have a large number of sandboxes and want to find only specific ones. The filtering is performed on the server. - - - ```js JavaScript & TypeScript highlight={6-8,15} - import { Sandbox } from '@e2b/code-interpreter' - - // Create sandbox with metadata. - const sandbox = await Sandbox.create({ - metadata: { - env: 'dev', - app: 'my-app', - userId: '123', - }, - }) - - // List running sandboxes that has `userId` key with value `123` and `env` key with value `dev`. - const runningSandboxes = await Sandbox.list({ - query: { - metadata: { userId: '123', env: 'dev' }, - }, - }) - ``` - ```python Python highlight={7-9,17-18} - from e2b_code_interpreter import Sandbox - from e2b.sandbox.sandbox_api import SandboxQuery - - # Create sandbox with metadata. - sandbox = Sandbox.create( - metadata={ - "env": "dev", - "app": "my-app", - "user_id": "123", - }, - ) - - # List running sandboxes that has `userId` key with value `123` and `env` key with value `dev`. - running_sandboxes = Sandbox.list( - query=SandboxQuery( - metadata={ - "userId": "123", - "env": "dev", - } - ), - ) - ``` - diff --git a/docs/sandbox/metadata.mdx b/docs/sandbox/metadata.mdx deleted file mode 100644 index 026c4362..00000000 --- a/docs/sandbox/metadata.mdx +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: "Sandbox metadata" -sidebarTitle: Metadata ---- - -Metadata is a way to attach arbitrary key-value pairs for a sandbox. - -This is useful in various scenarios, for example: -- Associate a sandbox with a user session. -- Store custom user data for a sandbox like API keys. -- Associate a sandbox with a user ID and [connect to it later](/docs/sandbox/connect). - -You specify metadata when creating a sandbox and can access it later through listing running sandboxes with `Sandbox.list()` method. - - -```js JavaScript & TypeScript highlight={6} -import { Sandbox } from '@e2b/code-interpreter' - -// Create sandbox with metadata. -const sandbox = await Sandbox.create({ - metadata: { - userId: '123', - }, -}) - -// List running sandboxes and access metadata. -const paginator = await Sandbox.list() -const runningSandboxes = await paginator.nextItems() -// Will print: -// { -// 'userId': '123', -// } -console.log(runningSandboxes[0].metadata) -``` -```python Python highlight={6} -from e2b_code_interpreter import Sandbox - -# Create sandbox with metadata. -sandbox = Sandbox.create( - metadata={ - 'userId': '123', - }, -) - -# List running sandboxes and access metadata. -paginator = Sandbox.list() -running_sandboxes = paginator.next_items() -# Will print: -# { -# 'userId': '123', -# } -print(running_sandboxes[0].metadata) -``` - - -## Filtering sandboxes by metadata -You can also filter sandboxes by metadata, you can find more about it [here](/docs/sandbox/list#filtering-sandboxes). \ No newline at end of file diff --git a/docs/sandbox/metrics.mdx b/docs/sandbox/metrics.mdx deleted file mode 100644 index 99769f47..00000000 --- a/docs/sandbox/metrics.mdx +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: "Sandbox metrics" -sidebarTitle: Metrics ---- - -The sandbox metrics allows you to get information about the sandbox's CPU, memory and disk usage. - -## Getting sandbox metrics -Getting the metrics of a sandbox returns an array of timestamped metrics containing CPU, memory and disk usage information. -The metrics are collected every 5 seconds. - -### Getting sandbox metrics using the SDKs - - -```js JavaScript & TypeScript highlight={9} -import { Sandbox } from '@e2b/code-interpreter' - -const sbx = await Sandbox.create() -console.log('Sandbox created', sbx.sandboxId) - -// Wait for a few seconds to collect some metrics -await new Promise((resolve) => setTimeout(resolve, 10_000)) - -const metrics = await sbx.getMetrics() - -// You can also get the metrics by sandbox ID: -// const metrics = await Sandbox.getMetrics(sbx.sandboxId) - -console.log('Sandbox metrics:', metrics) - -// Sandbox metrics: -// [ -// { -// timestamp: 2025-07-28T08:04:05.000Z, -// cpuUsedPct: 20.33, -// cpuCount: 2, -// memUsed: 32681984, // in bytes -// memTotal: 507592704, // in bytes -// diskUsed: 1514856448, // in bytes -// diskTotal: 2573185024 // in bytes -// }, -// { -// timestamp: 2025-07-28T08:04:10.000Z, -// cpuUsedPct: 0.2, -// cpuCount: 2, -// memUsed: 33316864, // in bytes -// memTotal: 507592704, // in bytes -// diskUsed: 1514856448, // in bytes -// diskTotal: 2573185024 // in bytes -// } -// ] -``` -```python Python highlight={10} -from time import sleep -from e2b_code_interpreter import Sandbox - -sbx = Sandbox.create() -print('Sandbox created', sbx.sandbox_id) - -# Wait for a few seconds to collect some metrics -sleep(10) - -metrics = sbx.get_metrics() - -# You can also get the metrics by sandbox ID: -# metrics = Sandbox.get_metrics(sbx.sandbox_id) - -print('Sandbox metrics', metrics) - -# Sandbox metrics -# [ -# SandboxMetric( -# cpu_count=2, -# cpu_used_pct=13.97, -# disk_total=2573185024, # in bytes -# disk_used=1514856448, # in bytes -# mem_total=507592704, # in bytes -# mem_used=30588928, # in bytes -# timestamp=datetime.datetime(2025, 7, 28, 8, 8, 15, tzinfo=tzutc()), -# ), -# SandboxMetric( -# cpu_count=2, -# cpu_used_pct=0.1, -# disk_total=2573185024, # in bytes -# disk_used=1514856448, # in bytes -# mem_total=507592704, # in bytes -# mem_used=31084544, # in bytes -# timestamp=datetime.datetime(2025, 7, 28, 8, 8, 20, tzinfo=tzutc()), -# ), -# ] -``` - - -### Getting sandbox metrics using the CLI - -```bash Terminal highlight={1} -e2b sandbox metrics - -# Metrics for sandbox -# -# [2025-07-25 14:05:55Z] CPU: 8.27% / 2 Cores | Memory: 31 / 484 MiB | Disk: 1445 / 2453 MiB -# [2025-07-25 14:06:00Z] CPU: 0.5% / 2 Cores | Memory: 32 / 484 MiB | Disk: 1445 / 2453 MiB -# [2025-07-25 14:06:05Z] CPU: 0.1% / 2 Cores | Memory: 32 / 484 MiB | Disk: 1445 / 2453 MiB -# [2025-07-25 14:06:10Z] CPU: 0.3% / 2 Cores | Memory: 32 / 484 MiB | Disk: 1445 / 2453 MiB -``` - - - - It may take a second or more to get the first metrics after the sandbox is created. Until the first metrics are collected from the sandbox, you will get an empty array. - diff --git a/docs/sandbox/observability.mdx b/docs/sandbox/observability.mdx new file mode 100644 index 00000000..57a65416 --- /dev/null +++ b/docs/sandbox/observability.mdx @@ -0,0 +1,623 @@ +--- +title: "Observability" +sidebarTitle: Observability +--- + +E2B provides several ways to monitor and track your sandboxes: resource metrics via the SDK and CLI, a REST API for lifecycle event history, and webhooks for real-time notifications. + +## Metrics + +Sandbox metrics let you monitor CPU, memory, and disk usage of running sandboxes. Metrics are collected every 5 seconds. + +### Getting metrics using the SDKs + + +```js JavaScript & TypeScript highlight={9} +import { Sandbox } from '@e2b/code-interpreter' + +const sbx = await Sandbox.create() +console.log('Sandbox created', sbx.sandboxId) + +// Wait for a few seconds to collect some metrics +await new Promise((resolve) => setTimeout(resolve, 10_000)) + +const metrics = await sbx.getMetrics() + +// You can also get the metrics by sandbox ID: +// const metrics = await Sandbox.getMetrics(sbx.sandboxId) + +console.log('Sandbox metrics:', metrics) + +// Sandbox metrics: +// [ +// { +// timestamp: 2025-07-28T08:04:05.000Z, +// cpuUsedPct: 20.33, +// cpuCount: 2, +// memUsed: 32681984, // in bytes +// memTotal: 507592704, // in bytes +// diskUsed: 1514856448, // in bytes +// diskTotal: 2573185024 // in bytes +// }, +// { +// timestamp: 2025-07-28T08:04:10.000Z, +// cpuUsedPct: 0.2, +// cpuCount: 2, +// memUsed: 33316864, // in bytes +// memTotal: 507592704, // in bytes +// diskUsed: 1514856448, // in bytes +// diskTotal: 2573185024 // in bytes +// } +// ] +``` +```python Python highlight={10} +from time import sleep +from e2b_code_interpreter import Sandbox + +sbx = Sandbox.create() +print('Sandbox created', sbx.sandbox_id) + +# Wait for a few seconds to collect some metrics +sleep(10) + +metrics = sbx.get_metrics() + +# You can also get the metrics by sandbox ID: +# metrics = Sandbox.get_metrics(sbx.sandbox_id) + +print('Sandbox metrics', metrics) + +# Sandbox metrics +# [ +# SandboxMetric( +# cpu_count=2, +# cpu_used_pct=13.97, +# disk_total=2573185024, # in bytes +# disk_used=1514856448, # in bytes +# mem_total=507592704, # in bytes +# mem_used=30588928, # in bytes +# timestamp=datetime.datetime(2025, 7, 28, 8, 8, 15, tzinfo=tzutc()), +# ), +# SandboxMetric( +# cpu_count=2, +# cpu_used_pct=0.1, +# disk_total=2573185024, # in bytes +# disk_used=1514856448, # in bytes +# mem_total=507592704, # in bytes +# mem_used=31084544, # in bytes +# timestamp=datetime.datetime(2025, 7, 28, 8, 8, 20, tzinfo=tzutc()), +# ), +# ] +``` + + +### Getting metrics using the CLI + + +```bash Terminal highlight={1} +e2b sandbox metrics + +# Metrics for sandbox +# +# [2025-07-25 14:05:55Z] CPU: 8.27% / 2 Cores | Memory: 31 / 484 MiB | Disk: 1445 / 2453 MiB +# [2025-07-25 14:06:00Z] CPU: 0.5% / 2 Cores | Memory: 32 / 484 MiB | Disk: 1445 / 2453 MiB +# [2025-07-25 14:06:05Z] CPU: 0.1% / 2 Cores | Memory: 32 / 484 MiB | Disk: 1445 / 2453 MiB +# [2025-07-25 14:06:10Z] CPU: 0.3% / 2 Cores | Memory: 32 / 484 MiB | Disk: 1445 / 2453 MiB +``` + + + + It may take a second or more to get the first metrics after the sandbox is created. Until the first metrics are collected from the sandbox, you will get an empty array. + + +## Lifecycle events API + +The lifecycle API provides RESTful endpoints to request the latest sandbox lifecycle events. This allows you to track when sandboxes are created, paused, resumed, updated, snapshotted, or killed, along with metadata. +All requests require authentication using your team [API key](/docs/api-key#where-to-find-api-key). + +Query Parameters: +- `offset` (optional): Number of events to skip (default: 0, min: 0) +- `limit` (optional): Number of events to return (default: 10, min: 1, max: 100) +- `orderAsc` (optional): Sort order - true for ascending, false for descending (default: false) + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sbx = await Sandbox.create() + +// Get the latest events for a specific sandbox +const resp1 = await fetch( + `https://api.e2b.app/events/sandboxes/${sbx.id}`, + { + method: 'GET', + headers: { + 'X-API-Key': E2B_API_KEY, + }, + } +) +const sandboxEvents = await resp1.json() + +// Get the latest 10 events for all sandboxes associated with the team +const resp2 = await fetch( + 'https://api.e2b.app/events/sandboxes?limit=10', + { + method: 'GET', + headers: { + 'X-API-Key': E2B_API_KEY, + }, + } +) +const teamSandboxEvents = await resp2.json() + +console.log(teamSandboxEvents) + +// [ +// { +// "version": "v1", +// "id": "f5911677-cb60-498f-afed-f68143b3cc59", +// "type": "sandbox.lifecycle.killed", +// "eventData": null, +// "sandboxBuildId": "a979a14b-bdcc-49e6-bc04-1189fc9fe7c2", +// "sandboxExecutionId": "1dae9e1c-9957-4ce7-a236-a99d5779aadf", +// "sandboxId": "${SANDBOX_ID}", +// "sandboxTeamId": "460355b3-4f64-48f9-9a16-4442817f79f5", +// "sandboxTemplateId": "rki5dems9wqfm4r03t7g", +// "timestamp": "2025-08-06T20:59:36Z" +// }, +// { +// "version": "v1", +// "id": "30b09e11-9ba2-42db-9cf6-d21f0f43a234", +// "type": "sandbox.lifecycle.updated", +// "eventData": { +// "set_timeout": "2025-08-06T20:59:59Z" +// }, +// "sandboxBuildId": "a979a14b-bdcc-49e6-bc04-1189fc9fe7c2", +// "sandboxExecutionId": "1dae9e1c-9957-4ce7-a236-a99d5779aadf", +// "sandboxId": "${SANDBOX_ID}", +// "sandboxTeamId": "460355b3-4f64-48f9-9a16-4442817f79f5", +// "sandboxTemplateId": "rki5dems9wqfm4r03t7g", +// "timestamp": "2025-08-06T20:59:29Z" +// }, +// [...] +// { +// "version": "v1", +// "id": "0568572b-a2ac-4e5f-85fa-fae90905f556", +// "type": "sandbox.lifecycle.created", +// "eventData": null, +// "sandboxBuildId": "a979a14b-bdcc-49e6-bc04-1189fc9fe7c2", +// "sandboxExecutionId": "1dae9e1c-9957-4ce7-a236-a99d5779aadf", +// "sandboxId": "${SANDBOX_ID}", +// "sandboxTeamId": "460355b3-4f64-48f9-9a16-4442817f79f5", +// "sandboxTemplateId": "rki5dems9wqfm4r03t7g", +// "timestamp": "2025-08-06T20:59:24Z" +// } +// ] +``` +```python Python +import requests +from e2b_code_interpreter import Sandbox + +sbx = Sandbox.create() + +# Get the latest events for a specific sandbox +resp1 = requests.get( + f"https://api.e2b.app/events/sandboxes/{sbx.sandbox_id}", + headers={ + "X-API-Key": E2B_API_KEY, + } +) +sandbox_events = resp1.json() + +# Get the latest 10 events for all sandboxes associated with the team +resp2 = requests.get( + "https://api.e2b.app/events/sandboxes?limit=10", + headers={ + "X-API-Key": E2B_API_KEY, + } +) +team_sandbox_events = resp2.json() + +print(team_sandbox_events) + +# [ +# { +# "version": "v1", +# "id": "0568572b-a2ac-4e5f-85fa-fae90905f556", +# "type": "sandbox.lifecycle.killed", +# "eventData": null, +# "sandboxBuildId": "a979a14b-bdcc-49e6-bc04-1189fc9fe7c2", +# "sandboxExecutionId": "1dae9e1c-9957-4ce7-a236-a99d5779aadf", +# "sandboxId": "${SANDBOX_ID}", +# "sandboxTeamId": "460355b3-4f64-48f9-9a16-4442817f79f5", +# "sandboxTemplateId": "rki5dems9wqfm4r03t7g", +# "timestamp": "2025-08-06T20:59:36Z" +# }, +# { +# "version": "v1", +# "id": "e7013704-2c51-4dd2-9f6c-388c91460149", +# "type": "sandbox.lifecycle.updated", +# "eventData": { +# "set_timeout": "2025-08-06T20:59:59Z" +# }, +# "sandboxBuildId": "a979a14b-bdcc-49e6-bc04-1189fc9fe7c2", +# "sandboxExecutionId": "1dae9e1c-9957-4ce7-a236-a99d5779aadf", +# "sandboxId": "${SANDBOX_ID}", +# "sandboxTeamId": "460355b3-4f64-48f9-9a16-4442817f79f5", +# "sandboxTemplateId": "rki5dems9wqfm4r03t7g", +# "timestamp": "2025-08-06T20:59:29Z" +# }, +# [...] +# { +# "version": "v1", +# "id": "f29ef778-2743-4c97-a802-7ba67f84ce24", +# "type": "sandbox.lifecycle.created", +# "eventData": null, +# "sandboxBuildId": "a979a14b-bdcc-49e6-bc04-1189fc9fe7c2", +# "sandboxExecutionId": "1dae9e1c-9957-4ce7-a236-a99d5779aadf", +# "sandboxId": "${SANDBOX_ID}", +# "sandboxTeamId": "460355b3-4f64-48f9-9a16-4442817f79f5", +# "sandboxTemplateId": "rki5dems9wqfm4r03t7g", +# "timestamp": "2025-08-06T20:59:24Z" +# } +# ] +``` + + +## Lifecycle webhooks + +Webhooks provide a way for notifications to be delivered to an external web server whenever certain sandbox lifecycle events occur. +This allows you to receive real-time updates about sandbox creation, updates, and termination without having to poll the API. +All webhook requests require authentication using your team [API key](/docs/api-key#where-to-find-api-key). + +### Register webhook + +Register a new webhook to receive sandbox lifecycle events. The webhook will be registered for the team ID associated with your API key. All events specified during webhook creation will be sent to URL provided during registration with the [webhook payload](#webhook-payload). + + +```js JavaScript & TypeScript +// Register a new webhook +const resp = await fetch( + 'https://api.e2b.app/events/webhooks', + { + method: 'POST', + headers: { + 'X-API-Key': E2B_API_KEY, + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + name: 'My Sandbox Webhook', + url: 'https://your-webhook-endpoint.com/webhook', + enabled: true, + events: ['sandbox.lifecycle.created', 'sandbox.lifecycle.updated', 'sandbox.lifecycle.killed'], + signatureSecret: 'secret-for-event-signature-verification' + }), + } +) + +if (resp.status === 201) { + console.log('Webhook registered successfully') +} +``` +```python Python +import requests + +# Register a new webhook +resp = requests.post( + "https://api.e2b.app/events/webhooks", + headers={ + "X-API-Key": E2B_API_KEY, + "Content-Type": "application/json", + }, + json={ + "name": "My Sandbox Webhook", + "url": "https://your-webhook-endpoint.com/webhook", + "enabled": true, + "events": ["sandbox.lifecycle.created", "sandbox.lifecycle.updated", "sandbox.lifecycle.killed"], + "signatureSecret": "secret-for-event-signature-verification" + } +) + +if resp.status_code == 201: + print("Webhook registered successfully") +``` + + +### List webhooks + +List all registered webhooks for your team. + + +```js JavaScript & TypeScript +// List webhooks +const resp = await fetch( + 'https://api.e2b.app/events/webhooks', + { + method: 'GET', + headers: { + 'X-API-Key': E2B_API_KEY + }, + }, +) + +if (resp.status === 200) { + console.log('Webhooks listed successfully') + console.log(await resp.json()) +} +``` +```python Python +import requests + +# List webhooks +resp = requests.get( + "https://api.e2b.app/events/webhooks", + headers={ + "X-API-Key": E2B_API_KEY + }, +) + +if resp.status_code == 200: + print("Webhooks listed successfully") + print(resp.json()) +``` + + + +### Get webhook configuration + +Retrieve the current webhook configuration for your team. + + +```js JavaScript & TypeScript +// Get webhook configuration +const resp = await fetch( + `https://api.e2b.app/events/webhooks/${webhookID}`, + { + method: 'GET', + headers: { + 'X-API-Key': E2B_API_KEY, + }, + } +) +const webhookConfig = await resp.json() +console.log(webhookConfig) +// { +// "id": "", +// "teamID": "", +// "name": "My Sandbox Webhook", +// "createdAt": "2025-08-06T21:00:00Z", +// "enabled": true, +// "url": "https://your-webhook-endpoint.com/webhook", +// "events": ["sandbox.lifecycle.created", "sandbox.lifecycle.killed"] +// } + +``` +```python Python +import requests + +# Get webhook configuration +resp = requests.get( + "https://api.e2b.app/events/webhooks/{webhookID}", + headers={ + "X-API-Key": E2B_API_KEY, + } +) + +webhook_config = resp.json() +print(webhook_config) +# { +# "id": "", +# "teamID": "", +# "name": "My Sandbox Webhook", +# "createdAt": "2025-08-06T21:00:00Z", +# "enabled": true, +# "url": "https://your-webhook-endpoint.com/webhook", +# "events": ["sandbox.lifecycle.created", "sandbox.lifecycle.killed"] +# } +``` + + +### Update webhook configuration + +Update an existing webhook configuration. The update will replace the previous configuration fields with provided fields. + + +```js JavaScript & TypeScript +// Update webhook configuration +const resp = await fetch( + `https://api.e2b.app/events/webhooks/${webhookID}`, + { + method: 'PATCH', + headers: { + 'X-API-Key': E2B_API_KEY, + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + url: 'https://your-updated-webhook-endpoint.com/webhook', + enabled: false, + events: ['sandbox.lifecycle.created'] + }), + } +) + +if (resp.status === 200) { + console.log('Webhook updated successfully') +} +``` +```python Python +import requests + +# Update webhook configuration +resp = requests.patch( + "https://api.e2b.app/events/webhooks/{webhookID}", + headers={ + "X-API-Key": E2B_API_KEY, + "Content-Type": "application/json", + }, + json={ + "url": "https://your-updated-webhook-endpoint.com/webhook", + "enabled": False, + "events": ["sandbox.lifecycle.created"] + } +) + +if resp.status_code == 200: + print("Webhook updated successfully") +``` + + +### Delete webhook + +Unregister the webhook. + + +```js JavaScript & TypeScript +// Delete webhook configuration +const resp = await fetch( + `https://api.e2b.app/events/webhooks/${webhookID}`, + { + method: 'DELETE', + headers: { + 'X-API-Key': E2B_API_KEY, + }, + } +) + +if (resp.status === 200) { + console.log('Webhook deleted successfully') +} +``` +```python Python +import requests + +# Delete webhook configuration +resp = requests.delete( + "https://api.e2b.app/events/webhooks/{webhookID}", + headers={ + "X-API-Key": E2B_API_KEY, + } +) + +if resp.status_code == 200: + print("Webhook deleted successfully") +``` + + +### Webhook payload + +When a webhook is triggered, your endpoint will receive a POST request with a JSON payload containing the sandbox event data. +The payload structure matches the event format from the API: + +```json +{ + "version": "v2", + "id": "", + "type": "", + "eventData": { + "sandbox_metadata": { + "": "" + }, + "execution": { + "started_at": "2025-08-06T20:58:24Z", + "vcpu_count": 2, + "memory_mb": 512, + "execution_time": 1000, + } + }, + "sandboxBuildId": "", + "sandboxExecutionId": "", + "sandboxId": "", + "sandboxTeamId": "", + "sandboxTemplateId": "", + "timestamp": "2025-08-06T20:59:24Z" +} +``` + +`eventData.execution` contains sandbox execution details and is included on `sandbox.lifecycle.killed` and `sandbox.lifecycle.paused` events: + +- `started_at` - UTC RFC3339 timestamp when the current sandbox execution started +- `vcpu_count` - Number of vCPUs assigned to the sandbox +- `memory_mb` - Memory assigned to the sandbox in MB +- `execution_time` - Sandbox runtime in milliseconds + +### Webhook verification + +To ensure the authenticity of webhook requests, each request includes a signature in the `e2b-signature` header. +You can verify the signature using the signature secret you provided when registering the webhook. +This confirms that the request originated from E2B and has not been tampered with. + + +```js JavaScript & TypeScript +function verifyWebhookSignature(secret : string, payload : string, payloadSignature : string) { + const expectedSignatureRaw = crypto.createHash('sha256').update(secret + payload).digest('base64'); + const expectedSignature = expectedSignatureRaw.replace(/=+$/, ''); + return expectedSignature == payloadSignature +} + +const payloadValid = verifyWebhookSignature(secret, webhookBodyRaw, webhookSignatureHeader) +if (payloadValid) { + console.log("Payload signature is valid") +} else { + console.log("Payload signature is INVALID") +} +``` +```python Python +import hashlib +import base64 + +def verify_webhook_signature(secret: str, payload: str, payload_signature: str) -> bool: + hash_bytes = hashlib.sha256((secret + payload).encode('utf-8')).digest() + expected_signature = base64.b64encode(hash_bytes).decode('utf-8') + expected_signature = expected_signature.rstrip('=') + + return expected_signature == payload_signature + +if verify_webhook_signature(secret, webhook_body_raw, webhook_signature_header): + print("Payload signature is valid") +else: + print("Payload signature is INVALID") +``` +```go Golang +import ( + "crypto/sha256" + "encoding/base64" + "fmt" + "strings" +) + +func verifyWebhookSignature(secret, payload, payloadSignature string) bool { + hash := sha256.Sum256([]byte(secret + payload)) + expectedSignature := base64.StdEncoding.EncodeToString(hash[:]) + + expectedSignature = strings.TrimRight(expectedSignature, "=") + + return expectedSignature == payloadSignature +} + +if verifyWebhookSignature(secret, webhookBodyString, webhookSignatureHeaderString) { + fmt.Println("Payload signature is valid") +} else { + fmt.Println("Payload signature is INVALID") +} +``` + + +### Webhook request headers + +When a webhook is sent, E2B adds headers to help you verify authenticity and debug delivery: + +- `e2b-webhook-id` - Webhook ID that triggered the event +- `e2b-delivery-id` - Unique ID for the delivery attempt +- `e2b-signature-version` - Currently always `v1`, reserved for future use +- `e2b-signature` - Signature for verifying the request authenticity + +### Available event types + +The following event types can be subscribed to via webhooks, they are used as the `type` field in the [payload](#webhook-payload). + +- `sandbox.lifecycle.created` - Sandbox creation +- `sandbox.lifecycle.killed` - Sandbox termination +- `sandbox.lifecycle.updated` - Sandbox configuration updates +- `sandbox.lifecycle.paused` - Sandbox pausing +- `sandbox.lifecycle.resumed` - Sandbox resuming +- `sandbox.lifecycle.checkpointed` - Sandbox [snapshot](/docs/sandbox/lifecycle#snapshots) created diff --git a/docs/sandbox/persistence.mdx b/docs/sandbox/persistence.mdx deleted file mode 100644 index 3042df07..00000000 --- a/docs/sandbox/persistence.mdx +++ /dev/null @@ -1,282 +0,0 @@ ---- -title: "Sandbox persistence" -sidebarTitle: Persistence ---- - -The sandbox persistence allows you to pause your sandbox and resume it later from the same state it was in when you paused it. - -This includes not only state of the sandbox's filesystem but also the sandbox's memory. This means all running processes, loaded variables, data, etc. - -## Sandbox state transitions - -Understanding how sandboxes transition between different states is crucial for managing their lifecycle effectively. Here's a diagram showing the possible state transitions: - -```mermaid actions={false} -flowchart TD - start(( )) -->|Sandbox.create| Running - - Running["Running
• Active execution
• Consumes resources"] - Paused["Paused
• Preserves memory and files
• Cannot execute code"] - Snapshotting["Snapshotting
• Creates persistent snapshot
• Briefly pauses execution"] - Killed["Killed
• Resources released
• Cannot be resumed"] - - Running -->|pause| Paused - Running -->|createSnapshot| Snapshotting - Paused -->|connect| Running - Snapshotting -->|snapshot complete| Running - Running -->|kill| Killed - Paused -->|kill| Killed -``` - -### State descriptions - -- **Running**: The sandbox is actively running and can execute code. This is the initial state after creation. -- **Paused**: The sandbox execution is suspended but its state is preserved. -- **Snapshotting**: The sandbox is briefly paused while a persistent snapshot is being created. It automatically returns to Running. See [Snapshots](/docs/sandbox/snapshots). -- **Killed**: The sandbox is terminated and all resources are released. This is a terminal state. - -### Changing sandbox's state - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() // Starts in Running state - -// Pause the sandbox -await sandbox.pause() // Running → Paused - -// Resume the sandbox -await sandbox.connect() // Running/Paused → Running - -// Kill the sandbox (from any state) -await sandbox.kill() // Running/Paused → Killed -``` - -```python Python -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() # Starts in Running state - -# Pause the sandbox -sandbox.pause() # Running → Paused - -# Resume the sandbox -sandbox.connect() # Running/Paused → Running - -# Kill the sandbox (from any state) -sandbox.kill() # Running/Paused → Killed -``` - - -## Pausing sandbox -When you pause a sandbox, both the sandbox's filesystem and memory state will be saved. This includes all the files in the sandbox's filesystem and all the running processes, loaded variables, data, etc. - - -```js JavaScript & TypeScript highlight={8-9} -import { Sandbox } from '@e2b/code-interpreter' - -const sbx = await Sandbox.create() -console.log('Sandbox created', sbx.sandboxId) - -// Pause the sandbox -// You can save the sandbox ID in your database to resume the sandbox later -await sbx.pause() -console.log('Sandbox paused', sbx.sandboxId) -``` -```python Python highlight={8-9} -from e2b_code_interpreter import Sandbox - -sbx = Sandbox.create() -print('Sandbox created', sbx.sandbox_id) - -# Pause the sandbox -# You can save the sandbox ID in your database to resume the sandbox later -sbx.pause() -print('Sandbox paused', sbx.sandbox_id) -``` - - - -## Resuming sandbox -When you resume a sandbox, it will be in the same state it was in when you paused it. -This means that all the files in the sandbox's filesystem will be restored and all the running processes, loaded variables, data, etc. will be restored. - - -```js JavaScript & TypeScript highlight={12-13} -import { Sandbox } from '@e2b/code-interpreter' - -const sbx = await Sandbox.create() -console.log('Sandbox created', sbx.sandboxId) - -// Pause the sandbox -// You can save the sandbox ID in your database to resume the sandbox later -await sbx.pause() -console.log('Sandbox paused', sbx.sandboxId) - -// Connect to the sandbox (it will automatically resume the sandbox, if paused) -const sameSbx = await sbx.connect() -console.log('Connected to the sandbox', sameSbx.sandboxId) -``` -```python Python highlight={12-13} -from e2b_code_interpreter import Sandbox - -sbx = Sandbox.create() -print('Sandbox created', sbx.sandbox_id) - -# Pause the sandbox -# You can save the sandbox ID in your database to resume the sandbox later -sbx.pause() -print('Sandbox paused', sbx.sandbox_id) - -# Connect to the sandbox (it will automatically resume the sandbox, if paused) -same_sbx = sbx.connect() -print('Connected to the sandbox', same_sbx.sandbox_id) -``` - - -## Listing paused sandboxes -You can list all paused sandboxes by calling the `Sandbox.list` method and supplying the `state` query parameter. -More information about using the method can be found in [List Sandboxes](/docs/sandbox/list). - - -```js JavaScript & TypeScript highlight={4,7} -import { Sandbox, SandboxInfo } from '@e2b/code-interpreter' - -// List all paused sandboxes -const paginator = Sandbox.list({ query: { state: ['paused'] } }) - -// Get the first page of paused sandboxes -const sandboxes = await paginator.nextItems() - -// Get all paused sandboxes -while (paginator.hasNext) { - const items = await paginator.nextItems() - sandboxes.push(...items) -} -``` -```python Python highlight={4,7} -# List all paused sandboxes -from e2b_code_interpreter import Sandbox, SandboxQuery, SandboxState - -paginator = Sandbox.list(SandboxQuery(state=[SandboxState.PAUSED])) - -# Get the first page of paused sandboxes -sandboxes = paginator.next_items() - -# Get all paused sandboxes -while paginator.has_next: - items = paginator.next_items() - sandboxes.extend(items) -``` - - -## Removing paused sandboxes - -You can remove paused sandboxes by calling the `kill` method on the Sandbox instance. - - -```js JavaScript & TypeScript highlight={11,14} -import { Sandbox } from '@e2b/code-interpreter' - -const sbx = await Sandbox.create() -console.log('Sandbox created', sbx.sandboxId) - -// Pause the sandbox -// You can save the sandbox ID in your database to resume the sandbox later -await sbx.pause() - -// Remove the sandbox -await sbx.kill() - -// Remove sandbox by id -await Sandbox.kill(sbx.sandboxId) -``` -```python Python highlight={9,12} -from e2b_code_interpreter import Sandbox - -sbx = Sandbox.create() - -# Pause the sandbox -sbx.pause() - -# Remove the sandbox -sbx.kill() - -# Remove sandbox by id -Sandbox.kill(sbx.sandbox_id) -``` - - -## Sandbox's timeout -When you connect to a sandbox, the inactivity timeout resets. The default is 5 minutes, but you can pass a custom timeout to the `Sandbox.connect()` method: - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sbx = await Sandbox.connect(sandboxId, { timeoutMs: 60 * 1000 }) // 60 seconds -``` -```python Python -from e2b_code_interpreter import Sandbox - -sbx = Sandbox.connect(sandbox_id, timeout=60) # 60 seconds -``` - - - -### Auto-pause - -Auto-pause is configured in the sandbox lifecycle on create. Set `onTimeout`/`on_timeout` to `pause`. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const sandbox = await Sandbox.create({ - timeoutMs: 10 * 60 * 1000, // Optional: change default timeout (10 minutes) - lifecycle: { - onTimeout: 'pause', - autoResume: false, // Optional (default is false) - }, -}) -``` -```python Python -from e2b import Sandbox - -sandbox = Sandbox.create( - timeout=10 * 60, # Optional: change default timeout (10 minutes) - lifecycle={ - "on_timeout": "pause", # Auto-pause after the sandbox times out - "auto_resume": False, # Optional (default is False) - }, -) -``` - - -Auto-pause is persistent, meaning if your sandbox resumes and later times out again, it will pause again. - -If you call `.kill()`, the sandbox is permanently deleted and cannot be resumed. - -For auto-resume behavior, see [AutoResume](/docs/sandbox/auto-resume). - -## Network -If you have a service (for example a server) running inside your sandbox and you pause the sandbox, the service won't be accessible from the outside and all the clients will be disconnected. -If you resume the sandbox, the service will be accessible again but you need to connect clients again. - - -## Limitations - -### Pause and resume performance -- Pausing a sandbox takes approximately **4 seconds per 1 GiB of RAM** -- Resuming a sandbox takes approximately **1 second** - -### Paused sandbox retention -- Paused sandboxes are kept **indefinitely** — there is no automatic deletion or time-to-live limit -- You can resume a paused sandbox at any time - -### Continuous runtime limits -- A sandbox can remain running (without being paused) for: - - **24 hours** on the **Pro tier** - - **1 hour** on the **Base tier** -- After a sandbox is paused and resumed, the continuous runtime limit is **reset** diff --git a/docs/sandbox/ip-tunneling.mdx b/docs/sandbox/proxy-tunneling.mdx similarity index 100% rename from docs/sandbox/ip-tunneling.mdx rename to docs/sandbox/proxy-tunneling.mdx diff --git a/docs/sandbox/rate-limits.mdx b/docs/sandbox/rate-limits.mdx deleted file mode 100644 index 2a1618f0..00000000 --- a/docs/sandbox/rate-limits.mdx +++ /dev/null @@ -1,143 +0,0 @@ ---- -title: "Rate limits" -sidebarTitle: "Rate limits" ---- - -This page describes API and sandbox rate limits of the E2B platform. - -## Table of Contents - -- [Sandbox lifecycle and management API](#sandbox-lifecycle-and-management-api) -- [Sandbox operations and requests to sandboxes](#sandbox-operations-and-requests-to-sandboxes) -- [Concurrent Sandboxes](#concurrent-sandboxes) -- [Sandbox creation rate](#sandbox-creation-rate) -- [Egress connections](#egress-connections) -- [Reaching limits](#reaching-limits) -- [Increasing and avoiding rate limits](#increasing-and-avoiding-rate-limits) - -Here's a limits breakdown table based on the plan: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PlanHobbyProEnterprise
Sandbox lifecycle & management API20,000 / 30s20,000 / 30sCustom
Sandbox operations40,000 / 60s per IP40,000 / 60s per IPCustom
Concurrent sandboxes20100 - 1,100*Custom
Sandbox creation rate1 / sec5 / secCustom
Egress connections per sandbox2,5002,500Custom
- - - *Pro plan default is 100 concurrent sandboxes. Higher concurrency of up to 1,100 is available as a separate purchasable add-on. - - ---- - -## Sandbox lifecycle and management API - -**20,000 requests per 30 seconds** - -This rate limit applies to lifecycle and management operations such as sandbox create, kill, update, list, and other. - ---- - -## Sandbox operations and requests to sandboxes - -**40,000 requests per 60 seconds** - -This rate limits applies to operations to/within running sandboxes such as running code, listing files, running commands, etc. -This also includes requests made to custom ports in the sandbox. - -
This limit is enforced globally across all sandbox operations from a single IP address. - ---- - -## Concurrent sandboxes - -Number of concurrent sandboxes differs based on the pricing tier. - -### Hobby tier -**Up to 20 concurrent sandboxes** - -### Pro tier -**Starts at 100 concurrent sandboxes** - - - Can go up to 1,100 with additional [add-ons available for purchase](https://e2b.dev/dashboard/?tab=billing). - - - -### Enterprise tier -**Custom limit - 1,100+ concurrent sandboxes** - ---- - -## Sandbox creation rate - -This limit controls how quickly you can create new sandboxes. - -### Hobby tier -**1 sandbox per second** - - -### Pro tier -**5 sandboxes per second** - -### Enterprise tier -**Custom limit - 5+ sandboxes per second** - ---- - -## Egress connections - -**2,500 connections per sandbox** - -This limit controls the maximum number of outbound (egress) network connections that can be established from a single sandbox. - -## Reaching limits - -When you reach the limits of your plan, subsequent requests/function calls will be effectively dropped and return the following: -- the `429 Too Many Requests` HTTP status code (when calling API/sandbox ports directly) -- `RateLimitError` in the JS/TS SDK -- `RateLimitException` in the Python SDK - -For example, if you're on the Pro tier (without any concurrency add-ons), you can create up to 100 sandboxes running concurrently. -If the 100 sandboxes are still running, requests for creating new sandboxes from the SDKs (`Sandbox.create()` in JS/TS or `Sandbox.create()` in Python) will fail and return `RateLimitError` or `RateLimitException` respectively. -Once the number of running sandboxes drops below 100, or you purchase a concurrency add-on, you’ll be able to create new sandboxes again. - -## Increasing and avoiding rate limits - -You can avoid rate limits by [upgrading your plan](https://e2b.dev/dashboard/?tab=billing) or contacting our sales team to discuss your specific requirements and we can provide tailored limits to meet your needs at [enterprise@e2b.dev](mailto:enterprise@e2b.dev). diff --git a/docs/sandbox/secured-access.mdx b/docs/sandbox/secured-access.mdx deleted file mode 100644 index d01ce3db..00000000 --- a/docs/sandbox/secured-access.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: "Secured access" -sidebarTitle: Secured access ---- - -Secure access authenticates communication between SDK and sandbox controller. - -Sandbox controller runs in sandbox itself and exposes APIs for work with file system, run commands, and generally control the sandbox via our SDK. -Without secure access, anyone with a sandbox ID can access the controller APIs and control the sandbox from inside. - - - SDKs version `v2.0.0` and above are using secure access by default when creating sandbox. This may not be compatible with older custom templates and you may need to rebuild them. - - -## Migration path - -When using custom templates created before envd `v0.2.0`, you need to rebuild the templates to enable secure access. -Temporarily, you can disable secure access by setting `secure` to `false` during sandbox creation, but this is not recommended for production use because it increases security risks. - -You can check the template envd version using the `e2b template list` command or by viewing the templates list on the dashboard. - -## Supported versions - -All sandboxes based on templates with envd version at least `v0.2.0` already support secure access without any additional changes. - -The secure access flag was introduced in `1.5.0` for JavaScript and Python SDKs to be used optionally. -Starting with SDK version `v2.0.0`, sandboxes are created with secure access enabled by default. - -## Access sandbox API directly - -In some cases, you might want to access sandbox controller APIs directly through its URL, such as when you are not using SDKs. -When secure access is enabled, you must provide an authentication token that was returned during sandbox creation. - -Each call to the sandbox controller must include an additional header `X-Access-Token` with the access token value returned during sandbox creation. - -For sandbox [upload](/docs/filesystem/upload#upload-with-pre-signed-url) and [download](/docs/filesystem/download#download-with-pre-signed-url) URLs, you need to generate pre-signed URLs. We are advising to use SDK for generating presigned URLs. - -## Disable secure access - -Disabling secured access is discouraged because it creates security vulnerabilities. - - - ```js JavaScript & TypeScript - import { Sandbox } from '@e2b/code-interpreter' - - const sandbox = await Sandbox.create({ secure: false }) // Explicitly disable - ``` - - ```python Python - from e2b_code_interpreter import Sandbox - - sandbox = Sandbox.create(secure=False) # Explicitly disable - ``` - diff --git a/docs/sandbox/internet-access.mdx b/docs/sandbox/security.mdx similarity index 56% rename from docs/sandbox/internet-access.mdx rename to docs/sandbox/security.mdx index ae9f2b93..e8557296 100644 --- a/docs/sandbox/internet-access.mdx +++ b/docs/sandbox/security.mdx @@ -1,464 +1,609 @@ ---- -title: "Internet access" ---- - -Every sandbox has access to the internet and can be reached by a public URL. - -## Controlling internet access - -You can control whether a sandbox has access to the internet by using the `allowInternetAccess` parameter when creating a sandbox. By default, internet access is enabled (`true`), but you can disable it for security-sensitive workloads. - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -// Create sandbox with internet access enabled (default) -const sandbox = await Sandbox.create({ allowInternetAccess: true }) - -// Create sandbox without internet access -const isolatedSandbox = await Sandbox.create({ allowInternetAccess: false }) -``` -```python Python -from e2b_code_interpreter import Sandbox - -# Create sandbox with internet access enabled (default) -sandbox = Sandbox.create(allow_internet_access=True) - -# Create sandbox without internet access -isolated_sandbox = Sandbox.create(allow_internet_access=False) -``` - - -When internet access is disabled, the sandbox cannot make outbound network connections, which provides an additional layer of security for sensitive code execution. - - -Setting `allowInternetAccess` to `false` is equivalent to setting `network.denyOut` to `['0.0.0.0/0']` (denying all traffic). - - -## Fine-grained network control - -For more granular control over network access, you can use the `network` configuration option to specify allow and deny lists for outbound traffic. - -### Allow and deny lists - -You can specify IP addresses, CIDR blocks, or domain names that the sandbox is allowed to use: - - -```js JavaScript & TypeScript -import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' - -// Deny all traffic except specific IPs -const sandbox = await Sandbox.create({ - network: { - denyOut: [ALL_TRAFFIC], - allowOut: ['1.1.1.1', '8.8.8.0/24'] - } -}) - -// Deny specific IPs only -const restrictedSandbox = await Sandbox.create({ - network: { - denyOut: ['8.8.8.8'] - } -}) -``` -```python Python -from e2b_code_interpreter import Sandbox, ALL_TRAFFIC - -# Deny all traffic except specific IPs -sandbox = Sandbox.create( - network={ - "deny_out": [ALL_TRAFFIC], - "allow_out": ["1.1.1.1", "8.8.8.0/24"] - } -) - -# Deny specific IPs only -restricted_sandbox = Sandbox.create( - network={ - "deny_out": ["8.8.8.8"] - } -) -``` - - -### Domain-based filtering - -You can allow traffic to specific domains by specifying hostnames in `allow out`. When using domain-based filtering, you must include `ALL_TRAFFIC` in `deny out` to block all other traffic. Domains are not supported in the `deny out` list. - - -```js JavaScript & TypeScript -import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' - -// Allow only traffic to google.com -const sandbox = await Sandbox.create({ - network: { - allowOut: ['google.com'], - denyOut: [ALL_TRAFFIC] - } -}) -``` -```python Python -from e2b_code_interpreter import Sandbox, ALL_TRAFFIC - -# Allow only traffic to google.com -sandbox = Sandbox.create( - network={ - "allow_out": ["google.com"], - "deny_out": [ALL_TRAFFIC] - } -) -``` - - - -When any domain is used, the default nameserver `8.8.8.8` is automatically allowed to ensure proper DNS resolution. - - -You can also use wildcards to allow all subdomains of a domain: - - -```js JavaScript & TypeScript -import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' - -// Allow traffic to any subdomain of mydomain.com -const sandbox = await Sandbox.create({ - network: { - allowOut: ['*.mydomain.com'], - denyOut: [ALL_TRAFFIC] - } -}) -``` -```python Python -from e2b_code_interpreter import Sandbox, ALL_TRAFFIC - -# Allow traffic to any subdomain of mydomain.com -sandbox = Sandbox.create( - network={ - "allow_out": ["*.mydomain.com"], - "deny_out": [ALL_TRAFFIC] - } -) -``` - - -You can combine domain names with IP addresses and CIDR blocks: - - -```js JavaScript & TypeScript -import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' - -// Allow traffic to specific domains and IPs -const sandbox = await Sandbox.create({ - network: { - allowOut: ['api.example.com', '*.github.com', '8.8.8.8'], - denyOut: [ALL_TRAFFIC] - } -}) -``` -```python Python -from e2b_code_interpreter import Sandbox, ALL_TRAFFIC - -# Allow traffic to specific domains and IPs -sandbox = Sandbox.create( - network={ - "allow_out": ["api.example.com", "*.github.com", "8.8.8.8"], - "deny_out": [ALL_TRAFFIC] - } -) -``` - - - -Domain-based filtering only works for HTTP traffic on port 80 (via Host header inspection) and TLS traffic on port 443 (via SNI inspection). Traffic on other ports uses CIDR-based filtering only. UDP-based protocols like QUIC/HTTP3 are not supported for domain filtering. - - -### Priority rules - -When both `allow out` and `deny out` are specified, **allow rules always take precedence** over deny rules. This means if an IP address is in both lists, it will be allowed. - - -```js JavaScript & TypeScript -import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' - -// Even though ALL_TRAFFIC is denied, 1.1.1.1 and 8.8.8.8 are explicitly allowed -const sandbox = await Sandbox.create({ - network: { - denyOut: [ALL_TRAFFIC], - allowOut: ['1.1.1.1', '8.8.8.8'] - } -}) -``` -```python Python -from e2b_code_interpreter import Sandbox, ALL_TRAFFIC - -# Even though ALL_TRAFFIC is denied, 1.1.1.1 and 8.8.8.8 are explicitly allowed -sandbox = Sandbox.create( - network={ - "deny_out": [ALL_TRAFFIC], - "allow_out": ["1.1.1.1", "8.8.8.8"] - } -) -``` - - -### ALL_TRAFFIC helper - -The `ALL_TRAFFIC` constant represents the CIDR range `0.0.0.0/0`, which matches all IP addresses. Use it to easily deny or allow all network traffic: - - -```js JavaScript & TypeScript -import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' - -// Deny all outbound traffic -const sandbox = await Sandbox.create({ - network: { - denyOut: [ALL_TRAFFIC] - } -}) -``` -```python Python -from e2b_code_interpreter import Sandbox, ALL_TRAFFIC - -# Deny all outbound traffic -sandbox = Sandbox.create( - network={ - "deny_out": [ALL_TRAFFIC] - } -) -``` - - -## Sandbox public URL -Every sandbox has a public URL that can be used to access running services inside the sandbox. - - -```js JavaScript & TypeScript highlight={6} -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() - -// You need to always pass a port number to get the host -const host = sandbox.getHost(3000) -console.log(`https://${host}`) -``` -```python Python highlight={6} -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() - -# You need to always pass a port number to get the host -host = sandbox.get_host(3000) -print(f'https://{host}') -``` - - -The code above will print something like this: - - -```bash JavaScript & TypeScript -https://3000-i62mff4ahtrdfdkyn2esc.e2b.app -``` -```bash Python -https://3000-i62mff4ahtrdfdkyn2esc.e2b.app -``` - - -The first leftmost part of the host is the port number we passed to the method. - -## Restricting public access to sandbox URLs - -By default, sandbox URLs are publicly accessible. You can restrict access to require authentication using the `allowPublicTraffic` option: - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -// Create sandbox with restricted public access -const sandbox = await Sandbox.create({ - network: { - allowPublicTraffic: false - } -}) - -// The sandbox has a traffic access token -console.log(sandbox.trafficAccessToken) - -// Start a server inside the sandbox -await sandbox.commands.run('python -m http.server 8080', { background: true }) - -const host = sandbox.getHost(8080) -const url = `https://${host}` - -// Request without token will fail with 403 -const response1 = await fetch(url) -console.log(response1.status) // 403 - -// Request with token will succeed -const response2 = await fetch(url, { - headers: { - 'e2b-traffic-access-token': sandbox.trafficAccessToken - } -}) -console.log(response2.status) // 200 -``` -```python Python -import requests -from e2b_code_interpreter import Sandbox - -# Create sandbox with restricted public access -sandbox = Sandbox.create( - network={ - "allow_public_traffic": False - } -) - -# The sandbox has a traffic access token -print(sandbox.traffic_access_token) - -# Start a server inside the sandbox -sandbox.commands.run("python -m http.server 8080", background=True) - -host = sandbox.get_host(8080) -url = f"https://{host}" - -# Request without token will fail with 403 -response1 = requests.get(url) -print(response1.status_code) # 403 - -# Request with token will succeed -response2 = requests.get(url, headers={ - 'e2b-traffic-access-token': sandbox.traffic_access_token -}) -print(response2.status_code) # 200 -``` - - -When `allowPublicTraffic` is set to `false`, all requests to the sandbox's public URLs must include the `e2b-traffic-access-token` header with the value from `sandbox.trafficAccessToken`. - -## Connecting to a server running inside the sandbox -You can start a server inside the sandbox and connect to it using the approach above. - -In this example we will start a simple HTTP server that listens on port 3000 and responds with the content of the directory where the server is started. - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() - -// Start a simple HTTP server inside the sandbox. -const process = await sandbox.commands.run('python -m http.server 3000', { background: true }) -const host = sandbox.getHost(3000) -const url = `https://${host}` -console.log('Server started at:', url) - -// Fetch data from the server inside the sandbox. -const response = await fetch(url); -const data = await response.text(); -console.log('Response from server inside sandbox:', data); - -// Kill the server process inside the sandbox. -await process.kill() -``` -```python Python -import requests -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() - -# Start a simple HTTP server inside the sandbox. -process = sandbox.commands.run("python -m http.server 3000", background=True) -host = sandbox.get_host(3000) -url = f"https://{host}" -print('Server started at:', url) - -# Fetch data from the server inside the sandbox. -response = requests.get(url) -data = response.text -print('Response from server inside sandbox:', data) - -# Kill the server process inside the sandbox. -process.kill() -``` - - -This output will look like this: - -```bash JavaScript & TypeScript -Server started at: https://3000-ip3nfrvajtqu5ktoxugc7.e2b.app -Response from server inside sandbox: - - - -Directory listing for / - - -

Directory listing for /

-
- -
- - -``` -```bash Python -Server started at: https://3000-ip3nfrvajtqu5ktoxugc7.e2b.app -Response from server inside sandbox: - - - -Directory listing for / - - -

Directory listing for /

-
- -
- - -``` -
- - -## Masking request host headers - -You can customize the `Host` header that gets sent to services running inside the sandbox using the `maskRequestHost` option. This is useful when your application expects a specific host format. - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -// Create sandbox with custom host masking -const sandbox = await Sandbox.create({ - network: { - maskRequestHost: 'localhost:${PORT}' - } -}) - -// The ${PORT} variable will be replaced with the actual port number -// Requests to the sandbox will have Host header set to for example: localhost:8080 -``` -```python Python -from e2b_code_interpreter import Sandbox - -# Create sandbox with custom host masking -sandbox = Sandbox.create( - network={ - "mask_request_host": "localhost:${PORT}" - } -) - -# The ${PORT} variable will be replaced with the actual port number -# Requests to the sandbox will have Host header set to for example: localhost:8080 -``` - - -The `${PORT}` variable in the mask will be automatically replaced with the actual port number of the requested service. +--- +title: "Security" +sidebarTitle: Security +--- + +E2B sandboxes provide multiple layers of security: authenticated access to the sandbox controller, fine-grained network controls, and rate limits to protect your infrastructure. + +## Secured access + +Secure access authenticates communication between SDK and sandbox controller. + +Sandbox controller runs in sandbox itself and exposes APIs for work with file system, run commands, and generally control the sandbox via our SDK. +Without secure access, anyone with a sandbox ID can access the controller APIs and control the sandbox from inside. + + + SDKs version `v2.0.0` and above are using secure access by default when creating sandbox. This may not be compatible with older custom templates and you may need to rebuild them. + + +### Migration path + +When using custom templates created before envd `v0.2.0`, you need to rebuild the templates to enable secure access. +Temporarily, you can disable secure access by setting `secure` to `false` during sandbox creation, but this is not recommended for production use because it increases security risks. + +You can check the template envd version using the `e2b template list` command or by viewing the templates list on the dashboard. + +### Supported versions + +All sandboxes based on templates with envd version at least `v0.2.0` already support secure access without any additional changes. + +The secure access flag was introduced in `1.5.0` for JavaScript and Python SDKs to be used optionally. +Starting with SDK version `v2.0.0`, sandboxes are created with secure access enabled by default. + +### Access sandbox API directly + +In some cases, you might want to access sandbox controller APIs directly through its URL, such as when you are not using SDKs. +When secure access is enabled, you must provide an authentication token that was returned during sandbox creation. + +Each call to the sandbox controller must include an additional header `X-Access-Token` with the access token value returned during sandbox creation. + +For sandbox [upload](/docs/filesystem/upload#upload-with-pre-signed-url) and [download](/docs/filesystem/download#download-with-pre-signed-url) URLs, you need to generate pre-signed URLs. We are advising to use SDK for generating presigned URLs. + +### Disable secure access + +Disabling secured access is discouraged because it creates security vulnerabilities. + + + ```js JavaScript & TypeScript + import { Sandbox } from '@e2b/code-interpreter' + + const sandbox = await Sandbox.create({ secure: false }) // Explicitly disable + ``` + + ```python Python + from e2b_code_interpreter import Sandbox + + sandbox = Sandbox.create(secure=False) # Explicitly disable + ``` + + +## Internet access + +Every sandbox has access to the internet and can be reached by a public URL. + +### Controlling internet access + +You can control whether a sandbox has access to the internet by using the `allowInternetAccess` parameter when creating a sandbox. By default, internet access is enabled (`true`), but you can disable it for security-sensitive workloads. + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +// Create sandbox with internet access enabled (default) +const sandbox = await Sandbox.create({ allowInternetAccess: true }) + +// Create sandbox without internet access +const isolatedSandbox = await Sandbox.create({ allowInternetAccess: false }) +``` +```python Python +from e2b_code_interpreter import Sandbox + +# Create sandbox with internet access enabled (default) +sandbox = Sandbox.create(allow_internet_access=True) + +# Create sandbox without internet access +isolated_sandbox = Sandbox.create(allow_internet_access=False) +``` + + +When internet access is disabled, the sandbox cannot make outbound network connections, which provides an additional layer of security for sensitive code execution. + + +Setting `allowInternetAccess` to `false` is equivalent to setting `network.denyOut` to `['0.0.0.0/0']` (denying all traffic). + + +### Fine-grained network control + +For more granular control over network access, you can use the `network` configuration option to specify allow and deny lists for outbound traffic. + +#### Allow and deny lists + +You can specify IP addresses, CIDR blocks, or domain names that the sandbox is allowed to use: + + +```js JavaScript & TypeScript +import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' + +// Deny all traffic except specific IPs +const sandbox = await Sandbox.create({ + network: { + denyOut: [ALL_TRAFFIC], + allowOut: ['1.1.1.1', '8.8.8.0/24'] + } +}) + +// Deny specific IPs only +const restrictedSandbox = await Sandbox.create({ + network: { + denyOut: ['8.8.8.8'] + } +}) +``` +```python Python +from e2b_code_interpreter import Sandbox, ALL_TRAFFIC + +# Deny all traffic except specific IPs +sandbox = Sandbox.create( + network={ + "deny_out": [ALL_TRAFFIC], + "allow_out": ["1.1.1.1", "8.8.8.0/24"] + } +) + +# Deny specific IPs only +restricted_sandbox = Sandbox.create( + network={ + "deny_out": ["8.8.8.8"] + } +) +``` + + +#### Domain-based filtering + +You can allow traffic to specific domains by specifying hostnames in `allow out`. When using domain-based filtering, you must include `ALL_TRAFFIC` in `deny out` to block all other traffic. Domains are not supported in the `deny out` list. + + +```js JavaScript & TypeScript +import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' + +// Allow only traffic to google.com +const sandbox = await Sandbox.create({ + network: { + allowOut: ['google.com'], + denyOut: [ALL_TRAFFIC] + } +}) +``` +```python Python +from e2b_code_interpreter import Sandbox, ALL_TRAFFIC + +# Allow only traffic to google.com +sandbox = Sandbox.create( + network={ + "allow_out": ["google.com"], + "deny_out": [ALL_TRAFFIC] + } +) +``` + + + +When any domain is used, the default nameserver `8.8.8.8` is automatically allowed to ensure proper DNS resolution. + + +You can also use wildcards to allow all subdomains of a domain: + + +```js JavaScript & TypeScript +import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' + +// Allow traffic to any subdomain of mydomain.com +const sandbox = await Sandbox.create({ + network: { + allowOut: ['*.mydomain.com'], + denyOut: [ALL_TRAFFIC] + } +}) +``` +```python Python +from e2b_code_interpreter import Sandbox, ALL_TRAFFIC + +# Allow traffic to any subdomain of mydomain.com +sandbox = Sandbox.create( + network={ + "allow_out": ["*.mydomain.com"], + "deny_out": [ALL_TRAFFIC] + } +) +``` + + +You can combine domain names with IP addresses and CIDR blocks: + + +```js JavaScript & TypeScript +import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' + +// Allow traffic to specific domains and IPs +const sandbox = await Sandbox.create({ + network: { + allowOut: ['api.example.com', '*.github.com', '8.8.8.8'], + denyOut: [ALL_TRAFFIC] + } +}) +``` +```python Python +from e2b_code_interpreter import Sandbox, ALL_TRAFFIC + +# Allow traffic to specific domains and IPs +sandbox = Sandbox.create( + network={ + "allow_out": ["api.example.com", "*.github.com", "8.8.8.8"], + "deny_out": [ALL_TRAFFIC] + } +) +``` + + + +Domain-based filtering only works for HTTP traffic on port 80 (via Host header inspection) and TLS traffic on port 443 (via SNI inspection). Traffic on other ports uses CIDR-based filtering only. UDP-based protocols like QUIC/HTTP3 are not supported for domain filtering. + + +#### Priority rules + +When both `allow out` and `deny out` are specified, **allow rules always take precedence** over deny rules. This means if an IP address is in both lists, it will be allowed. + + +```js JavaScript & TypeScript +import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' + +// Even though ALL_TRAFFIC is denied, 1.1.1.1 and 8.8.8.8 are explicitly allowed +const sandbox = await Sandbox.create({ + network: { + denyOut: [ALL_TRAFFIC], + allowOut: ['1.1.1.1', '8.8.8.8'] + } +}) +``` +```python Python +from e2b_code_interpreter import Sandbox, ALL_TRAFFIC + +# Even though ALL_TRAFFIC is denied, 1.1.1.1 and 8.8.8.8 are explicitly allowed +sandbox = Sandbox.create( + network={ + "deny_out": [ALL_TRAFFIC], + "allow_out": ["1.1.1.1", "8.8.8.8"] + } +) +``` + + +#### ALL_TRAFFIC helper + +The `ALL_TRAFFIC` constant represents the CIDR range `0.0.0.0/0`, which matches all IP addresses. Use it to easily deny or allow all network traffic: + + +```js JavaScript & TypeScript +import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' + +// Deny all outbound traffic +const sandbox = await Sandbox.create({ + network: { + denyOut: [ALL_TRAFFIC] + } +}) +``` +```python Python +from e2b_code_interpreter import Sandbox, ALL_TRAFFIC + +# Deny all outbound traffic +sandbox = Sandbox.create( + network={ + "deny_out": [ALL_TRAFFIC] + } +) +``` + + +### Sandbox public URL + +Every sandbox has a public URL that can be used to access running services inside the sandbox. + + +```js JavaScript & TypeScript highlight={6} +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() + +// You need to always pass a port number to get the host +const host = sandbox.getHost(3000) +console.log(`https://${host}`) +``` +```python Python highlight={6} +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() + +# You need to always pass a port number to get the host +host = sandbox.get_host(3000) +print(f'https://{host}') +``` + + +The code above will print something like this: + + +```bash JavaScript & TypeScript +https://3000-i62mff4ahtrdfdkyn2esc.e2b.app +``` +```bash Python +https://3000-i62mff4ahtrdfdkyn2esc.e2b.app +``` + + +The first leftmost part of the host is the port number we passed to the method. + +### Restricting public access to sandbox URLs + +By default, sandbox URLs are publicly accessible. You can restrict access to require authentication using the `allowPublicTraffic` option: + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +// Create sandbox with restricted public access +const sandbox = await Sandbox.create({ + network: { + allowPublicTraffic: false + } +}) + +// The sandbox has a traffic access token +console.log(sandbox.trafficAccessToken) + +// Start a server inside the sandbox +await sandbox.commands.run('python -m http.server 8080', { background: true }) + +const host = sandbox.getHost(8080) +const url = `https://${host}` + +// Request without token will fail with 403 +const response1 = await fetch(url) +console.log(response1.status) // 403 + +// Request with token will succeed +const response2 = await fetch(url, { + headers: { + 'e2b-traffic-access-token': sandbox.trafficAccessToken + } +}) +console.log(response2.status) // 200 +``` +```python Python +import requests +from e2b_code_interpreter import Sandbox + +# Create sandbox with restricted public access +sandbox = Sandbox.create( + network={ + "allow_public_traffic": False + } +) + +# The sandbox has a traffic access token +print(sandbox.traffic_access_token) + +# Start a server inside the sandbox +sandbox.commands.run("python -m http.server 8080", background=True) + +host = sandbox.get_host(8080) +url = f"https://{host}" + +# Request without token will fail with 403 +response1 = requests.get(url) +print(response1.status_code) # 403 + +# Request with token will succeed +response2 = requests.get(url, headers={ + 'e2b-traffic-access-token': sandbox.traffic_access_token +}) +print(response2.status_code) # 200 +``` + + +When `allowPublicTraffic` is set to `false`, all requests to the sandbox's public URLs must include the `e2b-traffic-access-token` header with the value from `sandbox.trafficAccessToken`. + +### Connecting to a server running inside the sandbox + +You can start a server inside the sandbox and connect to it using the public URL approach described above. + +In this example we will start a simple HTTP server that listens on port 3000 and responds with the content of the directory where the server is started. + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() + +// Start a simple HTTP server inside the sandbox. +const process = await sandbox.commands.run('python -m http.server 3000', { background: true }) +const host = sandbox.getHost(3000) +const url = `https://${host}` +console.log('Server started at:', url) + +// Fetch data from the server inside the sandbox. +const response = await fetch(url); +const data = await response.text(); +console.log('Response from server inside sandbox:', data); + +// Kill the server process inside the sandbox. +await process.kill() +``` +```python Python +import requests +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() + +# Start a simple HTTP server inside the sandbox. +process = sandbox.commands.run("python -m http.server 3000", background=True) +host = sandbox.get_host(3000) +url = f"https://{host}" +print('Server started at:', url) + +# Fetch data from the server inside the sandbox. +response = requests.get(url) +data = response.text +print('Response from server inside sandbox:', data) + +# Kill the server process inside the sandbox. +process.kill() +``` + + +### Masking request host headers + +You can customize the `Host` header that gets sent to services running inside the sandbox using the `maskRequestHost` option. This is useful when your application expects a specific host format. + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +// Create sandbox with custom host masking +const sandbox = await Sandbox.create({ + network: { + maskRequestHost: 'localhost:${PORT}' + } +}) + +// The ${PORT} variable will be replaced with the actual port number +// Requests to the sandbox will have Host header set to for example: localhost:8080 +``` +```python Python +from e2b_code_interpreter import Sandbox + +# Create sandbox with custom host masking +sandbox = Sandbox.create( + network={ + "mask_request_host": "localhost:${PORT}" + } +) + +# The ${PORT} variable will be replaced with the actual port number +# Requests to the sandbox will have Host header set to for example: localhost:8080 +``` + + +The `${PORT}` variable in the mask will be automatically replaced with the actual port number of the requested service. + +## Rate limits + +This section describes API and sandbox rate limits of the E2B platform. + +Here's a limits breakdown table based on the plan: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PlanHobbyProEnterprise
Sandbox lifecycle & management API20,000 / 30s20,000 / 30sCustom
Sandbox operations40,000 / 60s per IP40,000 / 60s per IPCustom
Concurrent sandboxes20100 - 1,100*Custom
Sandbox creation rate1 / sec5 / secCustom
Egress connections per sandbox2,5002,500Custom
+ + + *Pro plan default is 100 concurrent sandboxes. Higher concurrency of up to 1,100 is available as a separate purchasable add-on. + + +--- + +### Sandbox lifecycle and management API + +**20,000 requests per 30 seconds** + +This rate limit applies to lifecycle and management operations such as sandbox create, kill, update, list, and other. + +--- + +### Sandbox operations and requests to sandboxes + +**40,000 requests per 60 seconds** + +This rate limits applies to operations to/within running sandboxes such as running code, listing files, running commands, etc. +This also includes requests made to custom ports in the sandbox. + +
This limit is enforced globally across all sandbox operations from a single IP address. + +--- + +### Concurrent sandboxes + +Number of concurrent sandboxes differs based on the pricing tier. + +#### Hobby tier +**Up to 20 concurrent sandboxes** + +#### Pro tier +**Starts at 100 concurrent sandboxes** + + + Can go up to 1,100 with additional [add-ons available for purchase](https://e2b.dev/dashboard/?tab=billing). + + + +#### Enterprise tier +**Custom limit - 1,100+ concurrent sandboxes** + +--- + +### Sandbox creation rate + +This limit controls how quickly you can create new sandboxes. + +#### Hobby tier +**1 sandbox per second** + + +#### Pro tier +**5 sandboxes per second** + +#### Enterprise tier +**Custom limit - 5+ sandboxes per second** + +--- + +### Egress connections + +**2,500 connections per sandbox** + +This limit controls the maximum number of outbound (egress) network connections that can be established from a single sandbox. + +### Reaching limits + +When you reach the limits of your plan, subsequent requests/function calls will be effectively dropped and return the following: +- the `429 Too Many Requests` HTTP status code (when calling API/sandbox ports directly) +- `RateLimitError` in the JS/TS SDK +- `RateLimitException` in the Python SDK + +For example, if you're on the Pro tier (without any concurrency add-ons), you can create up to 100 sandboxes running concurrently. +If the 100 sandboxes are still running, requests for creating new sandboxes from the SDKs (`Sandbox.create()` in JS/TS or `Sandbox.create()` in Python) will fail and return `RateLimitError` or `RateLimitException` respectively. +Once the number of running sandboxes drops below 100, or you purchase a concurrency add-on, you'll be able to create new sandboxes again. + +### Increasing and avoiding rate limits + +You can avoid rate limits by [upgrading your plan](https://e2b.dev/dashboard/?tab=billing) or contacting our sales team to discuss your specific requirements and we can provide tailored limits to meet your needs at [enterprise@e2b.dev](mailto:enterprise@e2b.dev). diff --git a/docs/sandbox/snapshots.mdx b/docs/sandbox/snapshots.mdx deleted file mode 100644 index 8e28303d..00000000 --- a/docs/sandbox/snapshots.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: "Sandbox snapshots" -sidebarTitle: Snapshots ---- - -Snapshots let you create a persistent point-in-time capture of a running sandbox, including both its filesystem and memory state. -You can then use a snapshot to spawn new sandboxes that start from the exact same state. - -The original sandbox continues running after the snapshot is created, and a single snapshot can be used to create many new sandboxes. - -## Prerequisites - -Snapshots require templates with envd version `v0.5.0` or above. If you are using a custom template created before envd `v0.5.0`, you need to rebuild it. - -You can check the template envd version using the `e2b template list` command or by viewing the templates list on the dashboard. - -## Snapshots vs. Pause/Resume - -| | Pause/Resume | Snapshots | -|---|---|---| -| Effect on original sandbox | Pauses (stops) the sandbox | Sandbox briefly pauses, then continues running | -| Relationship | One-to-one — resume restores the same sandbox | One-to-many — snapshot can spawn many new sandboxes | -| Use case | Suspend and resume a single sandbox | Create a reusable checkpoint | - -For pause/resume functionality, see [Persistence](/docs/sandbox/persistence). - -## Snapshot flow - -```mermaid actions={false} -graph LR - A[Running Sandbox] -->|createSnapshot| B[Snapshotting] - B --> C[Snapshot Created] - B --> A - C -->|Sandbox.create| D[New Sandbox 1] - C -->|Sandbox.create| E[New Sandbox 2] - C -->|Sandbox.create| F[New Sandbox N] -``` - -The sandbox is briefly paused during the snapshot process but automatically returns to running state. The sandbox ID stays the same after the snapshot completes. - - -During the snapshot, the sandbox is temporarily paused and resumed. This causes all active connections (e.g. WebSocket, PTY, command streams) to be dropped. Make sure your client handles reconnection properly. - - -## Create a snapshot - -You can create a snapshot from a running sandbox instance. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const sandbox = await Sandbox.create() - -// Create a snapshot from a running sandbox -const snapshot = await sandbox.createSnapshot() -console.log('Snapshot ID:', snapshot.snapshotId) -``` -```python Python -from e2b import Sandbox - -sandbox = Sandbox.create() - -# Create a snapshot from a running sandbox -snapshot = sandbox.create_snapshot() -print('Snapshot ID:', snapshot.snapshot_id) -``` - - -You can also create a snapshot by sandbox ID using the static method. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -// Create a snapshot by sandbox ID -const snapshot = await Sandbox.createSnapshot(sandboxId) -console.log('Snapshot ID:', snapshot.snapshotId) -``` -```python Python -from e2b import Sandbox - -# Create a snapshot by sandbox ID -snapshot = Sandbox.create_snapshot(sandbox_id) -print('Snapshot ID:', snapshot.snapshot_id) -``` - - -## Create a sandbox from a snapshot - -The snapshot ID can be used directly with `Sandbox.create()` to spawn a new sandbox from the snapshot. The new sandbox starts with the exact filesystem and memory state captured in the snapshot. - - -```js JavaScript & TypeScript highlight={5} -import { Sandbox } from 'e2b' - -const snapshot = await sandbox.createSnapshot() - -// Create a new sandbox from the snapshot -const newSandbox = await Sandbox.create(snapshot.snapshotId) -``` -```python Python highlight={5} -from e2b import Sandbox - -snapshot = sandbox.create_snapshot() - -# Create a new sandbox from the snapshot -new_sandbox = Sandbox.create(snapshot.snapshot_id) -``` - - -## List snapshots - -You can list all snapshots. The method returns a paginator for iterating through results. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const paginator = Sandbox.listSnapshots() - -const snapshots = [] -while (paginator.hasNext) { - const items = await paginator.nextItems() - snapshots.push(...items) -} -``` -```python Python -from e2b import Sandbox - -paginator = Sandbox.list_snapshots() - -snapshots = [] -while paginator.has_next: - items = paginator.next_items() - snapshots.extend(items) -``` - - -### Filter by sandbox - -You can filter snapshots created from a specific sandbox. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const paginator = Sandbox.listSnapshots({ sandboxId: 'your-sandbox-id' }) -const snapshots = await paginator.nextItems() -``` -```python Python -from e2b import Sandbox - -paginator = Sandbox.list_snapshots(sandbox_id="your-sandbox-id") -snapshots = paginator.next_items() -``` - - -## Delete a snapshot - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -// Returns true if deleted, false if the snapshot was not found -const deleted = await Sandbox.deleteSnapshot(snapshot.snapshotId) -``` -```python Python -from e2b import Sandbox - -Sandbox.delete_snapshot(snapshot.snapshot_id) -``` - - -## Snapshots vs. Templates - -Both snapshots and [templates](/docs/template/quickstart) create reusable starting points for sandboxes, but they solve different problems. - -| | Templates | Snapshots | -|---|---|---| -| Defined by | Declarative code (Template builder) | Capturing a running sandbox | -| Reproducibility | Same definition produces the same sandbox every time | Captures whatever state exists at that moment | -| Best for | Repeatable base environments | Checkpointing, rollback, forking runtime state | - -Use templates when every sandbox should start from an identical, known state — pre-installed tools, fixed configurations, consistent environments. -Use snapshots when you need to capture or fork live runtime state that depends on what happened during execution. - -## Use cases - -- **Checkpointing agent work** — an AI agent has loaded data and produced partial results in memory. Snapshot it so you can resume or fork from that point later. -- **Rollback points** — snapshot before a risky or expensive operation (running untrusted code, applying a migration, refactoring a web app). If it fails, rollback - spawn a fresh sandbox from the snapshot before the operation happened. -- **Forking workflows** — spawn multiple sandboxes from the same snapshot to explore different approaches in parallel. -- **Cached sandboxes** — avoid repeating expensive setup by snapshotting a sandbox that has already loaded a large dataset or started a long-running process. -- **Sharing state** — one user or agent configures an environment interactively, snapshots it, and others start from that exact state. diff --git a/docs/sandbox/ssh-access.mdx b/docs/sandbox/ssh-access.mdx deleted file mode 100644 index 40f8ded0..00000000 --- a/docs/sandbox/ssh-access.mdx +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: "SSH access" -description: "Connect to your sandbox via SSH using a WebSocket proxy" ---- - -SSH access enables remote terminal sessions, SCP/SFTP file transfers, and integration with tools that expect SSH connectivity. - -## Quickstart - - - - -Define a template with OpenSSH server and [websocat](https://github.com/vi/websocat): - - - -```typescript JavaScript & TypeScript -// template.ts -import { Template, waitForPort } from 'e2b' - -export const template = Template() - .fromUbuntuImage('25.04') - .aptInstall(['openssh-server']) - .runCmd([ - 'curl -fsSL -o /usr/local/bin/websocat https://github.com/vi/websocat/releases/latest/download/websocat.x86_64-unknown-linux-musl', - 'chmod a+x /usr/local/bin/websocat', - ], { user: 'root' }) - .setStartCmd('sudo websocat -b --exit-on-eof ws-l:0.0.0.0:8081 tcp:127.0.0.1:22', waitForPort(8081)) -``` - -```python Python -# template.py -from e2b import Template, wait_for_port - -template = ( - Template() - .from_ubuntu_image("25.04") - .apt_install(["openssh-server"]) - .run_cmd([ - "curl -fsSL -o /usr/local/bin/websocat https://github.com/vi/websocat/releases/latest/download/websocat.x86_64-unknown-linux-musl", - "chmod a+x /usr/local/bin/websocat", - ], user="root") - .set_start_cmd("sudo websocat -b --exit-on-eof ws-l:0.0.0.0:8081 tcp:127.0.0.1:22", wait_for_port(8081)) -) -``` - - - -Build the template: - - - -```typescript JavaScript & TypeScript -// build.ts -import { Template, defaultBuildLogger } from 'e2b' -import { template as sshTemplate } from './template' - -await Template.build(sshTemplate, 'ssh-ready', { - cpuCount: 2, - memoryMB: 2048, - onBuildLogs: defaultBuildLogger(), -}) -``` - -```python Python -# build.py -from e2b import Template, default_build_logger -from template import template as ssh_template - -Template.build(ssh_template, "ssh-ready", - cpu_count=2, - memory_mb=2048, - on_build_logs=default_build_logger(), -) -``` - - - - - - - - -```typescript JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const sbx = await Sandbox.create('ssh-ready') -console.log(sbx.sandboxId) -``` - -```python Python -from e2b import Sandbox - -sbx = Sandbox.create("ssh-ready") -print(sbx.sandbox_id) -``` - - - - - - - - -```bash macOS -# Install websocat -brew install websocat - -# Connect to your sandbox -ssh -o 'ProxyCommand=websocat --binary -B 65536 - wss://8081-%h.e2b.app' user@ -``` - -```bash Linux -# Install websocat -sudo curl -fsSL -o /usr/local/bin/websocat https://github.com/vi/websocat/releases/latest/download/websocat.x86_64-unknown-linux-musl -sudo chmod a+x /usr/local/bin/websocat - -# Connect to your sandbox -ssh -o 'ProxyCommand=websocat --binary -B 65536 - wss://8081-%h.e2b.app' user@ -``` - - - - - - ---- - -## How it works - -This method uses [websocat](https://github.com/vi/websocat) to proxy SSH connections over WebSocket through the sandbox's exposed ports. - -``` -┌───────────────────────────────────────────────────────────┐ -│ Your Machine │ -│ ┌──────────┐ ProxyCommand ┌──────────────────┐ │ -│ │ SSH │ ────────────────── │ websocat │ │ -│ │ Client │ │ (WebSocket) │ │ -│ └──────────┘ └─────────┬────────┘ │ -└────────────────────────────────────────────┼──────────────┘ - │ - wss://8081-.e2b.app - │ -┌────────────────────────────────────────────┼──────────────┐ -│ E2B Sandbox ▼ │ -│ ┌──────────────────┐ │ -│ │ websocat │ │ -│ │ (WS → TCP:22) │ │ -│ └─────────┬────────┘ │ -│ │ │ -│ ┌─────────▼────────┐ │ -│ │ SSH Server │ │ -│ │ (OpenSSH) │ │ -│ └──────────────────┘ │ -└───────────────────────────────────────────────────────────┘ -``` diff --git a/docs/template/defining-template.mdx b/docs/template/defining-template.mdx index c4671ac4..4e8df920 100644 --- a/docs/template/defining-template.mdx +++ b/docs/template/defining-template.mdx @@ -245,7 +245,7 @@ template.git_clone("https://github.com/user/repo.git", "/app/repo", depth=1) Environment variables set in template definition are only available during template build. -[How to setup environment variables in sandbox?](/docs/sandbox/environment-variables) +[How to setup environment variables in sandbox?](/docs/sandbox/configuration#environment-variables) Set environment variables in the template: diff --git a/docs/template/start-ready-command.mdx b/docs/template/start-ready-command.mdx index 7920c4ec..6ab2b84e 100644 --- a/docs/template/start-ready-command.mdx +++ b/docs/template/start-ready-command.mdx @@ -13,7 +13,7 @@ This is how you get servers, seeded databases, or any long-running process avail The start command runs **once during template build** and is captured in a snapshot. It does not re-execute each time you create a sandbox. If you need to run a command every time a sandbox is created, use `sandbox.commands.run()` after creating the sandbox instead. - This also means that [environment variables passed to `Sandbox.create()`](/docs/sandbox/environment-variables#1-global-environment-variables) are **not available** to the start command process — it already ran during the build. If your start command needs environment variables, set them in the template definition using `setEnvs()` / `set_envs()`. + This also means that [environment variables passed to `Sandbox.create()`](/docs/sandbox/configuration#environment-variables) are **not available** to the start command process — it already ran during the build. If your start command needs environment variables, set them in the template definition using `setEnvs()` / `set_envs()`. You can see the full build process [here](/docs/template/how-it-works). diff --git a/docs/use-cases/coding-agents.mdx b/docs/use-cases/coding-agents.mdx index bdf3f304..3fe384b3 100644 --- a/docs/use-cases/coding-agents.mdx +++ b/docs/use-cases/coding-agents.mdx @@ -47,7 +47,7 @@ Since each sandbox is a full Linux environment, you can run any coding agent — Clone repos, manage branches, and push changes from sandboxes - + Pause and resume sandboxes to preserve state From 243e116c99ab2d0c4ac1a2793ce545d0c8275411 Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Wed, 18 Mar 2026 20:35:00 +0100 Subject: [PATCH 02/22] docs: rename nav groups and reorder Code Interpreting section Rename nested Core/Guides subgroups to flat top-level "Sandbox core" and "Sandbox guides" groups. Move Code Interpreting section between MCP gateway and CLI in the sidebar. --- docs.json | 93 ++++++++++++++++++++++++++----------------------------- 1 file changed, 44 insertions(+), 49 deletions(-) diff --git a/docs.json b/docs.json index a26623de..68fd3621 100644 --- a/docs.json +++ b/docs.json @@ -68,60 +68,22 @@ ] }, { - "group": "Code interpreting", + "group": "Sandbox core", "pages": [ - { - "group": "Analyze data with AI", - "pages": [ - "docs/code-interpreting/analyze-data-with-ai", - "docs/code-interpreting/analyze-data-with-ai/pre-installed-libraries" - ] - }, - { - "group": "Charts & visualizations", - "pages": [ - "docs/code-interpreting/create-charts-visualizations", - "docs/code-interpreting/create-charts-visualizations/static-charts", - "docs/code-interpreting/create-charts-visualizations/interactive-charts" - ] - }, - "docs/code-interpreting/streaming", - "docs/code-interpreting/contexts", - { - "group": "Supported languages", - "pages": [ - "docs/code-interpreting/supported-languages", - "docs/code-interpreting/supported-languages/python", - "docs/code-interpreting/supported-languages/javascript", - "docs/code-interpreting/supported-languages/r", - "docs/code-interpreting/supported-languages/java", - "docs/code-interpreting/supported-languages/bash" - ] - } + "docs/sandbox", + "docs/sandbox/lifecycle", + "docs/sandbox/commands", + "docs/sandbox/configuration", + "docs/sandbox/security", + "docs/sandbox/observability" ] }, { - "group": "Sandbox", + "group": "Sandbox guides", "pages": [ - "docs/sandbox", - { - "group": "Core", - "pages": [ - "docs/sandbox/lifecycle", - "docs/sandbox/commands", - "docs/sandbox/configuration", - "docs/sandbox/security", - "docs/sandbox/observability" - ] - }, - { - "group": "Guides", - "pages": [ - "docs/sandbox/git-integration", - "docs/sandbox/proxy-tunneling", - "docs/sandbox/custom-domain" - ] - } + "docs/sandbox/git-integration", + "docs/sandbox/proxy-tunneling", + "docs/sandbox/custom-domain" ] }, { @@ -184,6 +146,39 @@ "docs/mcp/examples" ] }, + { + "group": "Code interpreting", + "pages": [ + { + "group": "Analyze data with AI", + "pages": [ + "docs/code-interpreting/analyze-data-with-ai", + "docs/code-interpreting/analyze-data-with-ai/pre-installed-libraries" + ] + }, + { + "group": "Charts & visualizations", + "pages": [ + "docs/code-interpreting/create-charts-visualizations", + "docs/code-interpreting/create-charts-visualizations/static-charts", + "docs/code-interpreting/create-charts-visualizations/interactive-charts" + ] + }, + "docs/code-interpreting/streaming", + "docs/code-interpreting/contexts", + { + "group": "Supported languages", + "pages": [ + "docs/code-interpreting/supported-languages", + "docs/code-interpreting/supported-languages/python", + "docs/code-interpreting/supported-languages/javascript", + "docs/code-interpreting/supported-languages/r", + "docs/code-interpreting/supported-languages/java", + "docs/code-interpreting/supported-languages/bash" + ] + } + ] + }, { "group": "CLI", "pages": [ From 7e9cbf254db623fb88658d3808565a8d951d59f5 Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Wed, 18 Mar 2026 20:40:38 +0100 Subject: [PATCH 03/22] docs: rewrite sandbox overview with persistence-first framing, split lifecycle into lifecycle + persistence - Rewrite sandbox.mdx overview with state diagram, key characteristics, and quick setup code showing persistent sandbox configuration - Extract persistence content (pause/resume, auto-pause, auto-resume, snapshots) from lifecycle.mdx into new persistence.mdx page - Add description frontmatter to overview, lifecycle, and persistence pages - Update docs.json navigation to include persistence page - Update redirects: snapshots and auto-resume now point to persistence - Update cross-references in 7 agent/use-case pages to link to persistence --- docs.json | 10 +- docs/agents/amp.mdx | 2 +- docs/agents/claude-code.mdx | 2 +- docs/agents/codex.mdx | 2 +- docs/agents/openclaw.mdx | 4 +- docs/agents/opencode.mdx | 2 +- docs/sandbox.mdx | 64 ++- docs/sandbox/lifecycle.mdx | 778 +------------------------------ docs/sandbox/observability.mdx | 2 +- docs/sandbox/persistence.mdx | 776 ++++++++++++++++++++++++++++++ docs/use-cases/coding-agents.mdx | 2 +- 11 files changed, 852 insertions(+), 792 deletions(-) create mode 100644 docs/sandbox/persistence.mdx diff --git a/docs.json b/docs.json index 68fd3621..83b53434 100644 --- a/docs.json +++ b/docs.json @@ -72,6 +72,7 @@ "pages": [ "docs/sandbox", "docs/sandbox/lifecycle", + "docs/sandbox/persistence", "docs/sandbox/commands", "docs/sandbox/configuration", "docs/sandbox/security", @@ -3936,19 +3937,14 @@ "destination": "/docs/sandbox/git-integration", "permanent": true }, - { - "source": "/docs/sandbox/persistence", - "destination": "/docs/sandbox/lifecycle", - "permanent": true - }, { "source": "/docs/sandbox/snapshots", - "destination": "/docs/sandbox/lifecycle", + "destination": "/docs/sandbox/persistence", "permanent": true }, { "source": "/docs/sandbox/auto-resume", - "destination": "/docs/sandbox/lifecycle", + "destination": "/docs/sandbox/persistence", "permanent": true }, { diff --git a/docs/agents/amp.mdx b/docs/agents/amp.mdx index b9081338..80e7c16d 100644 --- a/docs/agents/amp.mdx +++ b/docs/agents/amp.mdx @@ -298,7 +298,7 @@ python build.py ## Related guides - + Auto-pause, resume, and manage sandbox lifecycle diff --git a/docs/agents/claude-code.mdx b/docs/agents/claude-code.mdx index 89dc2f43..cb551604 100644 --- a/docs/agents/claude-code.mdx +++ b/docs/agents/claude-code.mdx @@ -450,7 +450,7 @@ python build.py Connect Claude Code to 200+ MCP tools - + Auto-pause, resume, and manage sandbox lifecycle diff --git a/docs/agents/codex.mdx b/docs/agents/codex.mdx index b647f455..c569b33a 100644 --- a/docs/agents/codex.mdx +++ b/docs/agents/codex.mdx @@ -354,7 +354,7 @@ python build.py ## Related guides - + Auto-pause, resume, and manage sandbox lifecycle diff --git a/docs/agents/openclaw.mdx b/docs/agents/openclaw.mdx index da88841e..17dc1d82 100644 --- a/docs/agents/openclaw.mdx +++ b/docs/agents/openclaw.mdx @@ -387,7 +387,7 @@ sandbox.kill() OpenClaw has a built-in [web UI and chat interface](https://openclaw.ai) served by its gateway. Start it inside a sandbox and connect from your browser. -This sandbox is created with a 10-minute timeout and auto-pause enabled. After 10 minutes of inactivity it pauses and can be resumed later. See [Sandbox Persistence](/docs/sandbox/lifecycle) and [Sandbox Lifecycle](/docs/sandbox) for more details. +This sandbox is created with a 10-minute timeout and auto-pause enabled. After 10 minutes of inactivity it pauses and can be resumed later. See [Sandbox Persistence](/docs/sandbox/persistence) and [Sandbox Lifecycle](/docs/sandbox) for more details. @@ -500,7 +500,7 @@ python build.py ## Related guides - + Auto-pause, resume, and manage sandbox lifecycle diff --git a/docs/agents/opencode.mdx b/docs/agents/opencode.mdx index 0eb70e6d..006a7544 100644 --- a/docs/agents/opencode.mdx +++ b/docs/agents/opencode.mdx @@ -278,7 +278,7 @@ python build.py ## Related guides - + Auto-pause, resume, and manage sandbox lifecycle diff --git a/docs/sandbox.mdx b/docs/sandbox.mdx index b4833636..82b290d7 100644 --- a/docs/sandbox.mdx +++ b/docs/sandbox.mdx @@ -1,13 +1,73 @@ --- title: "Sandbox" sidebarTitle: Overview +description: "E2B sandboxes are persistent, secure cloud environments for AI agents — with pause/resume, snapshots, and auto-resume built in." --- -An E2B sandbox is a secure, isolated cloud environment where your AI agents can execute code, run commands, interact with the filesystem, and access the internet. Sandboxes support long-lived sessions with pause/resume, snapshots, and configurable timeouts. +An E2B sandbox is a persistent, secure cloud environment where your AI agents execute code, run commands, manage files, and access the internet. Every sandbox supports **pause and resume** — when a sandbox times out, it can automatically pause and preserve its full state (filesystem and memory), then resume exactly where it left off when activity arrives. + +```mermaid actions={false} +flowchart LR + start(( )) -->|create| Running + Running -->|timeout or pause| Paused + Paused -->|connect or auto‑resume| Running + Running -->|snapshot| Snapshotting + Snapshotting -->|complete| Running + Running -->|kill| Killed + Paused -->|kill| Killed +``` + +## Key characteristics + +- **Persistent by default** — configure `onTimeout: 'pause'` and sandboxes preserve their full state (filesystem + memory) indefinitely. Resume at any time. +- **Auto-resume** — paused sandboxes wake automatically when SDK calls or HTTP traffic arrive. No manual state management needed. +- **Snapshots** — capture a running sandbox's state and spawn multiple new sandboxes from it. Useful for checkpointing, forking, and rollback. +- **Configurable timeouts** — sandboxes run for up to 24 hours (Pro) or 1 hour (Base) continuously. Pausing resets the runtime window. +- **Isolated and secure** — each sandbox runs in its own microVM with network controls, access tokens, and rate limits. + +## Quick setup: persistent sandbox + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sandbox = await Sandbox.create({ + timeoutMs: 5 * 60 * 1000, + lifecycle: { + onTimeout: 'pause', // pause instead of kill on timeout + autoResume: true, // wake on activity + }, +}) + +// Use the sandbox — it auto-pauses when idle, auto-resumes when needed +const result = await sandbox.commands.run('echo "Hello from a persistent sandbox"') +console.log(result.stdout) +``` +```python Python +from e2b import Sandbox + +sandbox = Sandbox.create( + timeout=5 * 60, + lifecycle={ + "on_timeout": "pause", # pause instead of kill on timeout + "auto_resume": True, # wake on activity + }, +) + +# Use the sandbox — it auto-pauses when idle, auto-resumes when needed +result = sandbox.commands.run('echo "Hello from a persistent sandbox"') +print(result.stdout) +``` + + +## Core - Timeouts, pause/resume, snapshots, listing and connecting to sandboxes. + Timeouts, sandbox info, metadata, listing, connecting, and shutdown. + + + Pause/resume, auto-pause, auto-resume, snapshots, and state transitions. Interactive terminal (PTY) and SSH access. diff --git a/docs/sandbox/lifecycle.mdx b/docs/sandbox/lifecycle.mdx index 20075fd0..4fb220bf 100644 --- a/docs/sandbox/lifecycle.mdx +++ b/docs/sandbox/lifecycle.mdx @@ -1,12 +1,13 @@ --- title: "Sandbox lifecycle" sidebarTitle: Lifecycle +description: "Manage E2B sandbox lifecycle — timeouts, metadata, listing, connecting, and shutdown. Sandboxes are persistent cloud environments for AI agents." --- -Sandboxes stay running as long as you need them. When their timeout expires, they can automatically pause to save resources — preserving their full state so you can resume at any time. You can also configure an explicit timeout or shut down a sandbox manually. +Sandboxes stay running as long as you need them. When their timeout expires, they can automatically [pause to save resources](/docs/sandbox/persistence) — preserving their full state so you can resume at any time. You can also configure an explicit timeout or shut down a sandbox manually. -Sandboxes can run continuously for up to 24 hours (Pro) or 1 hour (Base). For longer workloads, use [pause and resume](/docs/sandbox/lifecycle#persistence) — pausing resets the runtime window, and your sandbox's full state is preserved indefinitely. +Sandboxes can run continuously for up to 24 hours (Pro) or 1 hour (Base). For longer workloads, use [pause and resume](/docs/sandbox/persistence) — pausing resets the runtime window, and your sandbox's full state is preserved indefinitely. ## Timeouts @@ -454,779 +455,6 @@ print(f"Running in sandbox {sandbox.sandbox_id} as \"{r.stdout.strip()}\"") ``` -## Persistence - -The sandbox persistence allows you to pause your sandbox and resume it later from the same state it was in when you paused it. This includes not only state of the sandbox's filesystem but also the sandbox's memory — all running processes, loaded variables, data, etc. - -### Sandbox state transitions - -Understanding how sandboxes transition between different states is crucial for managing their lifecycle effectively. Here's a diagram showing the possible state transitions: - -```mermaid actions={false} -flowchart TD - start(( )) -->|Sandbox.create| Running - - Running["Running
• Active execution
• Consumes resources"] - Paused["Paused
• Preserves memory and files
• Cannot execute code"] - Snapshotting["Snapshotting
• Creates persistent snapshot
• Briefly pauses execution"] - Killed["Killed
• Resources released
• Cannot be resumed"] - - Running -->|pause| Paused - Running -->|createSnapshot| Snapshotting - Paused -->|connect| Running - Snapshotting -->|snapshot complete| Running - Running -->|kill| Killed - Paused -->|kill| Killed -``` - -#### State descriptions - -- **Running**: The sandbox is actively running and can execute code. This is the initial state after creation. -- **Paused**: The sandbox execution is suspended but its state is preserved. -- **Snapshotting**: The sandbox is briefly paused while a persistent snapshot is being created. It automatically returns to Running. See [Snapshots](/docs/sandbox/lifecycle#snapshots). -- **Killed**: The sandbox is terminated and all resources are released. This is a terminal state. - -#### Changing sandbox state - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() // Starts in Running state - -// Pause the sandbox -await sandbox.pause() // Running → Paused - -// Resume the sandbox -await sandbox.connect() // Running/Paused → Running - -// Kill the sandbox (from any state) -await sandbox.kill() // Running/Paused → Killed -``` - -```python Python -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() # Starts in Running state - -# Pause the sandbox -sandbox.pause() # Running → Paused - -# Resume the sandbox -sandbox.connect() # Running/Paused → Running - -# Kill the sandbox (from any state) -sandbox.kill() # Running/Paused → Killed -``` - - -### Pausing sandbox - -When you pause a sandbox, both the sandbox's filesystem and memory state will be saved. This includes all the files in the sandbox's filesystem and all the running processes, loaded variables, data, etc. - - -```js JavaScript & TypeScript highlight={8-9} -import { Sandbox } from '@e2b/code-interpreter' - -const sbx = await Sandbox.create() -console.log('Sandbox created', sbx.sandboxId) - -// Pause the sandbox -// You can save the sandbox ID in your database to resume the sandbox later -await sbx.pause() -console.log('Sandbox paused', sbx.sandboxId) -``` -```python Python highlight={8-9} -from e2b_code_interpreter import Sandbox - -sbx = Sandbox.create() -print('Sandbox created', sbx.sandbox_id) - -# Pause the sandbox -# You can save the sandbox ID in your database to resume the sandbox later -sbx.pause() -print('Sandbox paused', sbx.sandbox_id) -``` - - - -### Resuming sandbox - -When you resume a sandbox, it will be in the same state it was in when you paused it. -This means that all the files in the sandbox's filesystem will be restored and all the running processes, loaded variables, data, etc. will be restored. - - -```js JavaScript & TypeScript highlight={12-13} -import { Sandbox } from '@e2b/code-interpreter' - -const sbx = await Sandbox.create() -console.log('Sandbox created', sbx.sandboxId) - -// Pause the sandbox -// You can save the sandbox ID in your database to resume the sandbox later -await sbx.pause() -console.log('Sandbox paused', sbx.sandboxId) - -// Connect to the sandbox (it will automatically resume the sandbox, if paused) -const sameSbx = await sbx.connect() -console.log('Connected to the sandbox', sameSbx.sandboxId) -``` -```python Python highlight={12-13} -from e2b_code_interpreter import Sandbox - -sbx = Sandbox.create() -print('Sandbox created', sbx.sandbox_id) - -# Pause the sandbox -# You can save the sandbox ID in your database to resume the sandbox later -sbx.pause() -print('Sandbox paused', sbx.sandbox_id) - -# Connect to the sandbox (it will automatically resume the sandbox, if paused) -same_sbx = sbx.connect() -print('Connected to the sandbox', same_sbx.sandbox_id) -``` - - -### Listing paused sandboxes - -You can list all paused sandboxes by calling the `Sandbox.list` method and supplying the `state` query parameter. - - -```js JavaScript & TypeScript highlight={4,7} -import { Sandbox, SandboxInfo } from '@e2b/code-interpreter' - -// List all paused sandboxes -const paginator = Sandbox.list({ query: { state: ['paused'] } }) - -// Get the first page of paused sandboxes -const sandboxes = await paginator.nextItems() - -// Get all paused sandboxes -while (paginator.hasNext) { - const items = await paginator.nextItems() - sandboxes.push(...items) -} -``` -```python Python highlight={4,7} -# List all paused sandboxes -from e2b_code_interpreter import Sandbox, SandboxQuery, SandboxState - -paginator = Sandbox.list(SandboxQuery(state=[SandboxState.PAUSED])) - -# Get the first page of paused sandboxes -sandboxes = paginator.next_items() - -# Get all paused sandboxes -while paginator.has_next: - items = paginator.next_items() - sandboxes.extend(items) -``` - - -### Removing paused sandboxes - -You can remove paused sandboxes by calling the `kill` method on the Sandbox instance. - - -```js JavaScript & TypeScript highlight={11,14} -import { Sandbox } from '@e2b/code-interpreter' - -const sbx = await Sandbox.create() -console.log('Sandbox created', sbx.sandboxId) - -// Pause the sandbox -// You can save the sandbox ID in your database to resume the sandbox later -await sbx.pause() - -// Remove the sandbox -await sbx.kill() - -// Remove sandbox by id -await Sandbox.kill(sbx.sandboxId) -``` -```python Python highlight={9,12} -from e2b_code_interpreter import Sandbox - -sbx = Sandbox.create() - -# Pause the sandbox -sbx.pause() - -# Remove the sandbox -sbx.kill() - -# Remove sandbox by id -Sandbox.kill(sbx.sandbox_id) -``` - - -### Timeout on connect - -When you connect to a sandbox, the inactivity timeout resets. The default is 5 minutes, but you can pass a custom timeout to the `Sandbox.connect()` method: - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sbx = await Sandbox.connect(sandboxId, { timeoutMs: 60 * 1000 }) // 60 seconds -``` -```python Python -from e2b_code_interpreter import Sandbox - -sbx = Sandbox.connect(sandbox_id, timeout=60) # 60 seconds -``` - - -### Auto-pause - -Auto-pause is configured in the sandbox lifecycle on create. Set `onTimeout`/`on_timeout` to `pause`. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const sandbox = await Sandbox.create({ - timeoutMs: 10 * 60 * 1000, // Optional: change default timeout (10 minutes) - lifecycle: { - onTimeout: 'pause', - autoResume: false, // Optional (default is false) - }, -}) -``` -```python Python -from e2b import Sandbox - -sandbox = Sandbox.create( - timeout=10 * 60, # Optional: change default timeout (10 minutes) - lifecycle={ - "on_timeout": "pause", # Auto-pause after the sandbox times out - "auto_resume": False, # Optional (default is False) - }, -) -``` - - -Auto-pause is persistent, meaning if your sandbox resumes and later times out again, it will pause again. - -If you call `.kill()`, the sandbox is permanently deleted and cannot be resumed. - -### Network behavior during pause - -If you have a service (for example a server) running inside your sandbox and you pause the sandbox, the service won't be accessible from the outside and all the clients will be disconnected. -If you resume the sandbox, the service will be accessible again but you need to connect clients again. - -### Limitations - -#### Pause and resume performance -- Pausing a sandbox takes approximately **4 seconds per 1 GiB of RAM** -- Resuming a sandbox takes approximately **1 second** - -#### Paused sandbox retention -- Paused sandboxes are kept **indefinitely** — there is no automatic deletion or time-to-live limit -- You can resume a paused sandbox at any time - -#### Continuous runtime limits -- A sandbox can remain running (without being paused) for: - - **24 hours** on the **Pro tier** - - **1 hour** on the **Base tier** -- After a sandbox is paused and resumed, the continuous runtime limit is **reset** - -## AutoResume - -Many workloads don't need a sandbox running all the time, but when they do need it, it should just work, whether it was paused or not. - -`AutoResume` handles this automatically: a paused sandbox wakes up when activity arrives, so your code does not have to check or manage sandbox state. -Configure it through the `lifecycle` object when creating a sandbox. - -### Configure lifecycle on create - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const sandbox = await Sandbox.create({ - timeoutMs: 10 * 60 * 1000, - lifecycle: { - onTimeout: 'pause', - autoResume: true, // resume when activity arrives - }, -}) -``` -```python Python -from e2b import Sandbox - -sandbox = Sandbox.create( - timeout=10 * 60, - lifecycle={ - "on_timeout": "pause", - "auto_resume": True, # resume when activity arrives - }, -) -``` - - -### Lifecycle options - -- `onTimeout` / `on_timeout` - - `kill` (default): sandbox is terminated when timeout is reached - - `pause`: sandbox is paused when timeout is reached -- `autoResume` / `auto_resume` - - `false` (default): paused sandboxes do not auto-resume - - `true`: paused sandboxes auto-resume on activity - - `true` is valid only when `onTimeout`/`on_timeout` is `pause` - -### Behavior summary - -- Default behavior is equivalent to `onTimeout: "kill"` with `autoResume: false`. -- `onTimeout: "pause"` with `autoResume: false` gives auto-pause without auto-resume. -- `onTimeout: "pause"` with `autoResume: true` gives auto-pause with auto-resume. -- `Sandbox.connect()` can still be used to resume a paused sandbox manually. - -If you use `autoResume: false`, resume explicitly with `Sandbox.connect()`. - -### What counts as activity - -Auto-resume is triggered by the sandbox activity - that's both HTTP traffic and controlling the sandbox from the SDK. - -That includes SDK operations like: -- `sandbox.commands.run(...)` -- `sandbox.files.read(...)` -- `sandbox.files.write(...)` -- opening a tunneled app URL or sending requests to a service running inside the sandbox - -If a sandbox is paused and `autoResume` is enabled, the next supported operation resumes it automatically. You do not need to call `Sandbox.connect()` first. - -#### SDK example: pause, then read a file - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const sandbox = await Sandbox.create({ - timeoutMs: 10 * 60 * 1000, - lifecycle: { - onTimeout: 'pause', - autoResume: true, - }, -}) - -await sandbox.files.write('/home/user/hello.txt', 'hello from a paused sandbox') -await sandbox.pause() - -const content = await sandbox.files.read('/home/user/hello.txt') -console.log(content) -console.log(`State after read: ${(await sandbox.getInfo()).state}`) -``` -```python Python -from e2b import Sandbox - -sandbox = Sandbox.create( - timeout=10 * 60, - lifecycle={ - "on_timeout": "pause", - "auto_resume": True, - }, -) - -sandbox.files.write("/home/user/hello.txt", "hello from a paused sandbox") -sandbox.pause() - -content = sandbox.files.read("/home/user/hello.txt") -print(content) -print(f"State after read: {sandbox.get_info().state}") -``` - - -### AutoResume use cases - -#### Web and dev/preview servers - -Use `onTimeout: "pause"` + `autoResume: true` so inbound traffic can wake a paused sandbox automatically. -This works for both basic web/API servers and dev or preview servers you open occasionally. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const sandbox = await Sandbox.create({ - timeoutMs: 10 * 60 * 1000, - lifecycle: { - onTimeout: 'pause', - autoResume: true, - }, -}) - -await sandbox.commands.run( - `python3 -m pip -q install 'flask>=2.2'` -) - -await sandbox.files.write( - '/home/user/app.py', - [ - 'from flask import Flask', - 'app = Flask(__name__)', - '@app.route("/")', - 'def hello():', - ' return "Hello, World!"', - 'app.run(host="0.0.0.0", port=3000)', - '', - ].join('\n') -) - -await sandbox.commands.run( - 'python3 -u /home/user/app.py > /home/user/flask.log 2>&1', - { background: true } -) - -await new Promise((resolve) => setTimeout(resolve, 1000)) - -const previewHost = sandbox.getHost(3000) -console.log(`Preview URL: https://${previewHost}`) - -console.log(`Status before pause: ${(await sandbox.getInfo()).state}`) -await sandbox.pause() -console.log(`Status after pause: ${(await sandbox.getInfo()).state}`) -``` -```python Python -import time - -from e2b import Sandbox - -sandbox = Sandbox.create( - timeout=10 * 60, - lifecycle={ - "on_timeout": "pause", - "auto_resume": True, - }, -) - -sandbox.commands.run("python3 -m pip -q install 'flask>=2.2'") - -sandbox.files.write( - "/home/user/app.py", - 'from flask import Flask\n' - 'app = Flask(__name__)\n' - '@app.route("/")\n' - 'def hello():\n' - ' return "Hello, World!"\n' - 'app.run(host="0.0.0.0", port=3000)\n' -) - -sandbox.commands.run( - "python3 -u /home/user/app.py > /home/user/flask.log 2>&1", - background=True, -) - -time.sleep(1) - -preview_host = sandbox.get_host(3000) -print(f"Preview URL: https://{preview_host}") - -print(f"Status before pause: {sandbox.get_info().state}") -sandbox.pause() -print(f"Status after pause: {sandbox.get_info().state}") -``` - - -#### Agent/tool execution - -For queued tasks or tool calls, create once and keep using the same sandbox handle. If it is paused, it will auto-resume when you run the next command. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -// One-time setup -const sandbox = await Sandbox.create({ - timeoutMs: 5 * 60 * 1000, - lifecycle: { - onTimeout: 'pause', - autoResume: true, - }, -}) - -// Later: called for each agent/tool task -async function runToolTask(command) { - const result = await sandbox.commands.run(command) - return result.stdout -} - -console.log(await runToolTask('python -c "print(2 + 2)"')) -``` -```python Python -from e2b import Sandbox - -# One-time setup -sandbox = Sandbox.create( - timeout=5 * 60, - lifecycle={ - "on_timeout": "pause", - "auto_resume": True, - }, -) - -# Later: called for each agent/tool task -def run_tool_task(command: str) -> str: - result = sandbox.commands.run(command) - return result.stdout - -print(run_tool_task('python -c "print(2 + 2)"')) -``` - - -#### Per-user sandboxes - -For multi-tenant apps, keep a map of sandbox IDs by user. On each request, connect to the user's existing sandbox (which auto-resumes if paused) or create a new one. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const userSandboxes = new Map() // userId → Sandbox - -async function getSandbox(userId) { - let sandbox = userSandboxes.get(userId) - - if (!sandbox) { - sandbox = await Sandbox.create({ - timeoutMs: 5 * 60 * 1000, - lifecycle: { - onTimeout: 'pause', - autoResume: true, - }, - }) - userSandboxes.set(userId, sandbox) - } - - return sandbox -} - -// On each user request (auto-resumes if paused) -const sandbox = await getSandbox('user-123') -const result = await sandbox.commands.run('echo "Hello from your sandbox"') -console.log(result.stdout) -``` -```python Python -from e2b import Sandbox - -user_sandboxes: dict[str, Sandbox] = {} # user_id → Sandbox - -def get_sandbox(user_id: str) -> Sandbox: - if user_id not in user_sandboxes: - user_sandboxes[user_id] = Sandbox.create( - timeout=5 * 60, - lifecycle={ - "on_timeout": "pause", - "auto_resume": True, - }, - ) - - return user_sandboxes[user_id] - -# On each user request (auto-resumes if paused) -sandbox = get_sandbox("user-123") -result = sandbox.commands.run('echo "Hello from your sandbox"') -print(result.stdout) -``` - - -### AutoResume cleanup - -Auto-resume is persistent, meaning if your sandbox resumes and later times out again, it will pause again. - -If you call `.kill()`, the sandbox is permanently deleted and cannot be resumed. - -## Snapshots - -Snapshots let you create a persistent point-in-time capture of a running sandbox, including both its filesystem and memory state. -You can then use a snapshot to spawn new sandboxes that start from the exact same state. - -The original sandbox continues running after the snapshot is created, and a single snapshot can be used to create many new sandboxes. - -### Prerequisites - -Snapshots require templates with envd version `v0.5.0` or above. If you are using a custom template created before envd `v0.5.0`, you need to rebuild it. - -You can check the template envd version using the `e2b template list` command or by viewing the templates list on the dashboard. - -### Snapshots vs. Pause/Resume - -| | Pause/Resume | Snapshots | -|---|---|---| -| Effect on original sandbox | Pauses (stops) the sandbox | Sandbox briefly pauses, then continues running | -| Relationship | One-to-one — resume restores the same sandbox | One-to-many — snapshot can spawn many new sandboxes | -| Use case | Suspend and resume a single sandbox | Create a reusable checkpoint | - -### Snapshot flow - -```mermaid actions={false} -graph LR - A[Running Sandbox] -->|createSnapshot| B[Snapshotting] - B --> C[Snapshot Created] - B --> A - C -->|Sandbox.create| D[New Sandbox 1] - C -->|Sandbox.create| E[New Sandbox 2] - C -->|Sandbox.create| F[New Sandbox N] -``` - -The sandbox is briefly paused during the snapshot process but automatically returns to running state. The sandbox ID stays the same after the snapshot completes. - - -During the snapshot, the sandbox is temporarily paused and resumed. This causes all active connections (e.g. WebSocket, PTY, command streams) to be dropped. Make sure your client handles reconnection properly. - - -### Create a snapshot - -You can create a snapshot from a running sandbox instance. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const sandbox = await Sandbox.create() - -// Create a snapshot from a running sandbox -const snapshot = await sandbox.createSnapshot() -console.log('Snapshot ID:', snapshot.snapshotId) -``` -```python Python -from e2b import Sandbox - -sandbox = Sandbox.create() - -# Create a snapshot from a running sandbox -snapshot = sandbox.create_snapshot() -print('Snapshot ID:', snapshot.snapshot_id) -``` - - -You can also create a snapshot by sandbox ID using the static method. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -// Create a snapshot by sandbox ID -const snapshot = await Sandbox.createSnapshot(sandboxId) -console.log('Snapshot ID:', snapshot.snapshotId) -``` -```python Python -from e2b import Sandbox - -# Create a snapshot by sandbox ID -snapshot = Sandbox.create_snapshot(sandbox_id) -print('Snapshot ID:', snapshot.snapshot_id) -``` - - -### Create a sandbox from a snapshot - -The snapshot ID can be used directly with `Sandbox.create()` to spawn a new sandbox from the snapshot. The new sandbox starts with the exact filesystem and memory state captured in the snapshot. - - -```js JavaScript & TypeScript highlight={5} -import { Sandbox } from 'e2b' - -const snapshot = await sandbox.createSnapshot() - -// Create a new sandbox from the snapshot -const newSandbox = await Sandbox.create(snapshot.snapshotId) -``` -```python Python highlight={5} -from e2b import Sandbox - -snapshot = sandbox.create_snapshot() - -# Create a new sandbox from the snapshot -new_sandbox = Sandbox.create(snapshot.snapshot_id) -``` - - -### List snapshots - -You can list all snapshots. The method returns a paginator for iterating through results. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const paginator = Sandbox.listSnapshots() - -const snapshots = [] -while (paginator.hasNext) { - const items = await paginator.nextItems() - snapshots.push(...items) -} -``` -```python Python -from e2b import Sandbox - -paginator = Sandbox.list_snapshots() - -snapshots = [] -while paginator.has_next: - items = paginator.next_items() - snapshots.extend(items) -``` - - -#### Filter by sandbox - -You can filter snapshots created from a specific sandbox. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const paginator = Sandbox.listSnapshots({ sandboxId: 'your-sandbox-id' }) -const snapshots = await paginator.nextItems() -``` -```python Python -from e2b import Sandbox - -paginator = Sandbox.list_snapshots(sandbox_id="your-sandbox-id") -snapshots = paginator.next_items() -``` - - -### Delete a snapshot - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -// Returns true if deleted, false if the snapshot was not found -const deleted = await Sandbox.deleteSnapshot(snapshot.snapshotId) -``` -```python Python -from e2b import Sandbox - -Sandbox.delete_snapshot(snapshot.snapshot_id) -``` - - -### Snapshots vs. Templates - -Both snapshots and [templates](/docs/template/quickstart) create reusable starting points for sandboxes, but they solve different problems. - -| | Templates | Snapshots | -|---|---|---| -| Defined by | Declarative code (Template builder) | Capturing a running sandbox | -| Reproducibility | Same definition produces the same sandbox every time | Captures whatever state exists at that moment | -| Best for | Repeatable base environments | Checkpointing, rollback, forking runtime state | - -Use templates when every sandbox should start from an identical, known state — pre-installed tools, fixed configurations, consistent environments. -Use snapshots when you need to capture or fork live runtime state that depends on what happened during execution. - -### Snapshot use cases - -- **Checkpointing agent work** — an AI agent has loaded data and produced partial results in memory. Snapshot it so you can resume or fork from that point later. -- **Rollback points** — snapshot before a risky or expensive operation (running untrusted code, applying a migration, refactoring a web app). If it fails, rollback - spawn a fresh sandbox from the snapshot before the operation happened. -- **Forking workflows** — spawn multiple sandboxes from the same snapshot to explore different approaches in parallel. -- **Cached sandboxes** — avoid repeating expensive setup by snapshotting a sandbox that has already loaded a large dataset or started a long-running process. -- **Sharing state** — one user or agent configures an environment interactively, snapshots it, and others start from that exact state. - ## Shutdown sandbox You can shutdown the sandbox any time even before the timeout is up by calling the `kill` method. diff --git a/docs/sandbox/observability.mdx b/docs/sandbox/observability.mdx index 57a65416..73eef5b9 100644 --- a/docs/sandbox/observability.mdx +++ b/docs/sandbox/observability.mdx @@ -620,4 +620,4 @@ The following event types can be subscribed to via webhooks, they are used as th - `sandbox.lifecycle.updated` - Sandbox configuration updates - `sandbox.lifecycle.paused` - Sandbox pausing - `sandbox.lifecycle.resumed` - Sandbox resuming -- `sandbox.lifecycle.checkpointed` - Sandbox [snapshot](/docs/sandbox/lifecycle#snapshots) created +- `sandbox.lifecycle.checkpointed` - Sandbox [snapshot](/docs/sandbox/persistence#snapshots) created diff --git a/docs/sandbox/persistence.mdx b/docs/sandbox/persistence.mdx new file mode 100644 index 00000000..0ec1b1cd --- /dev/null +++ b/docs/sandbox/persistence.mdx @@ -0,0 +1,776 @@ +--- +title: "Pause, resume & snapshots" +sidebarTitle: "Pause, resume & snapshots" +description: "E2B sandboxes are persistent — pause and resume preserves filesystem and memory state. Use snapshots to checkpoint and fork sandbox state." +--- + +E2B sandboxes are persistent environments. When you pause a sandbox, its full state — filesystem and memory, including all running processes and loaded variables — is preserved indefinitely. Resume it at any time and pick up exactly where you left off. Snapshots let you capture that state and spawn multiple new sandboxes from it. + +## Sandbox state transitions + +Understanding how sandboxes transition between states: + +```mermaid actions={false} +flowchart TD + start(( )) -->|Sandbox.create| Running + + Running["Running
• Active execution
• Consumes resources"] + Paused["Paused
• Preserves memory and files
• Cannot execute code"] + Snapshotting["Snapshotting
• Creates persistent snapshot
• Briefly pauses execution"] + Killed["Killed
• Resources released
• Cannot be resumed"] + + Running -->|pause| Paused + Running -->|createSnapshot| Snapshotting + Paused -->|connect| Running + Snapshotting -->|snapshot complete| Running + Running -->|kill| Killed + Paused -->|kill| Killed +``` + +### State descriptions + +- **Running**: The sandbox is actively running and can execute code. This is the initial state after creation. +- **Paused**: The sandbox execution is suspended but its state is preserved. +- **Snapshotting**: The sandbox is briefly paused while a persistent snapshot is being created. It automatically returns to Running. See [Snapshots](/docs/sandbox/persistence#snapshots). +- **Killed**: The sandbox is terminated and all resources are released. This is a terminal state. + +### Changing sandbox state + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() // Starts in Running state + +// Pause the sandbox +await sandbox.pause() // Running → Paused + +// Resume the sandbox +await sandbox.connect() // Running/Paused → Running + +// Kill the sandbox (from any state) +await sandbox.kill() // Running/Paused → Killed +``` + +```python Python +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() # Starts in Running state + +# Pause the sandbox +sandbox.pause() # Running → Paused + +# Resume the sandbox +sandbox.connect() # Running/Paused → Running + +# Kill the sandbox (from any state) +sandbox.kill() # Running/Paused → Killed +``` + + +## Pausing sandbox + +When you pause a sandbox, both the sandbox's filesystem and memory state will be saved. This includes all the files in the sandbox's filesystem and all the running processes, loaded variables, data, etc. + + +```js JavaScript & TypeScript highlight={8-9} +import { Sandbox } from '@e2b/code-interpreter' + +const sbx = await Sandbox.create() +console.log('Sandbox created', sbx.sandboxId) + +// Pause the sandbox +// You can save the sandbox ID in your database to resume the sandbox later +await sbx.pause() +console.log('Sandbox paused', sbx.sandboxId) +``` +```python Python highlight={8-9} +from e2b_code_interpreter import Sandbox + +sbx = Sandbox.create() +print('Sandbox created', sbx.sandbox_id) + +# Pause the sandbox +# You can save the sandbox ID in your database to resume the sandbox later +sbx.pause() +print('Sandbox paused', sbx.sandbox_id) +``` + + + +## Resuming sandbox + +When you resume a sandbox, it will be in the same state it was in when you paused it. +This means that all the files in the sandbox's filesystem will be restored and all the running processes, loaded variables, data, etc. will be restored. + + +```js JavaScript & TypeScript highlight={12-13} +import { Sandbox } from '@e2b/code-interpreter' + +const sbx = await Sandbox.create() +console.log('Sandbox created', sbx.sandboxId) + +// Pause the sandbox +// You can save the sandbox ID in your database to resume the sandbox later +await sbx.pause() +console.log('Sandbox paused', sbx.sandboxId) + +// Connect to the sandbox (it will automatically resume the sandbox, if paused) +const sameSbx = await sbx.connect() +console.log('Connected to the sandbox', sameSbx.sandboxId) +``` +```python Python highlight={12-13} +from e2b_code_interpreter import Sandbox + +sbx = Sandbox.create() +print('Sandbox created', sbx.sandbox_id) + +# Pause the sandbox +# You can save the sandbox ID in your database to resume the sandbox later +sbx.pause() +print('Sandbox paused', sbx.sandbox_id) + +# Connect to the sandbox (it will automatically resume the sandbox, if paused) +same_sbx = sbx.connect() +print('Connected to the sandbox', same_sbx.sandbox_id) +``` + + +## Listing paused sandboxes + +You can list all paused sandboxes by calling the `Sandbox.list` method and supplying the `state` query parameter. + + +```js JavaScript & TypeScript highlight={4,7} +import { Sandbox, SandboxInfo } from '@e2b/code-interpreter' + +// List all paused sandboxes +const paginator = Sandbox.list({ query: { state: ['paused'] } }) + +// Get the first page of paused sandboxes +const sandboxes = await paginator.nextItems() + +// Get all paused sandboxes +while (paginator.hasNext) { + const items = await paginator.nextItems() + sandboxes.push(...items) +} +``` +```python Python highlight={4,7} +# List all paused sandboxes +from e2b_code_interpreter import Sandbox, SandboxQuery, SandboxState + +paginator = Sandbox.list(SandboxQuery(state=[SandboxState.PAUSED])) + +# Get the first page of paused sandboxes +sandboxes = paginator.next_items() + +# Get all paused sandboxes +while paginator.has_next: + items = paginator.next_items() + sandboxes.extend(items) +``` + + +## Removing paused sandboxes + +You can remove paused sandboxes by calling the `kill` method on the Sandbox instance. + + +```js JavaScript & TypeScript highlight={11,14} +import { Sandbox } from '@e2b/code-interpreter' + +const sbx = await Sandbox.create() +console.log('Sandbox created', sbx.sandboxId) + +// Pause the sandbox +// You can save the sandbox ID in your database to resume the sandbox later +await sbx.pause() + +// Remove the sandbox +await sbx.kill() + +// Remove sandbox by id +await Sandbox.kill(sbx.sandboxId) +``` +```python Python highlight={9,12} +from e2b_code_interpreter import Sandbox + +sbx = Sandbox.create() + +# Pause the sandbox +sbx.pause() + +# Remove the sandbox +sbx.kill() + +# Remove sandbox by id +Sandbox.kill(sbx.sandbox_id) +``` + + +## Timeout on connect + +When you connect to a sandbox, the inactivity timeout resets. The default is 5 minutes, but you can pass a custom timeout to the `Sandbox.connect()` method: + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sbx = await Sandbox.connect(sandboxId, { timeoutMs: 60 * 1000 }) // 60 seconds +``` +```python Python +from e2b_code_interpreter import Sandbox + +sbx = Sandbox.connect(sandbox_id, timeout=60) # 60 seconds +``` + + +## Auto-pause + +Auto-pause is configured in the sandbox lifecycle on create. Set `onTimeout`/`on_timeout` to `pause`. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sandbox = await Sandbox.create({ + timeoutMs: 10 * 60 * 1000, // Optional: change default timeout (10 minutes) + lifecycle: { + onTimeout: 'pause', + autoResume: false, // Optional (default is false) + }, +}) +``` +```python Python +from e2b import Sandbox + +sandbox = Sandbox.create( + timeout=10 * 60, # Optional: change default timeout (10 minutes) + lifecycle={ + "on_timeout": "pause", # Auto-pause after the sandbox times out + "auto_resume": False, # Optional (default is False) + }, +) +``` + + +Auto-pause is persistent, meaning if your sandbox resumes and later times out again, it will pause again. + +If you call `.kill()`, the sandbox is permanently deleted and cannot be resumed. + +### Network behavior during pause + +If you have a service (for example a server) running inside your sandbox and you pause the sandbox, the service won't be accessible from the outside and all the clients will be disconnected. +If you resume the sandbox, the service will be accessible again but you need to connect clients again. + +### Limitations + +#### Pause and resume performance +- Pausing a sandbox takes approximately **4 seconds per 1 GiB of RAM** +- Resuming a sandbox takes approximately **1 second** + +#### Paused sandbox retention +- Paused sandboxes are kept **indefinitely** — there is no automatic deletion or time-to-live limit +- You can resume a paused sandbox at any time + +#### Continuous runtime limits +- A sandbox can remain running (without being paused) for: + - **24 hours** on the **Pro tier** + - **1 hour** on the **Base tier** +- After a sandbox is paused and resumed, the continuous runtime limit is **reset** + +## AutoResume + +Many workloads don't need a sandbox running all the time, but when they do need it, it should just work, whether it was paused or not. + +`AutoResume` handles this automatically: a paused sandbox wakes up when activity arrives, so your code does not have to check or manage sandbox state. +Configure it through the `lifecycle` object when creating a sandbox. + +### Configure lifecycle on create + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sandbox = await Sandbox.create({ + timeoutMs: 10 * 60 * 1000, + lifecycle: { + onTimeout: 'pause', + autoResume: true, // resume when activity arrives + }, +}) +``` +```python Python +from e2b import Sandbox + +sandbox = Sandbox.create( + timeout=10 * 60, + lifecycle={ + "on_timeout": "pause", + "auto_resume": True, # resume when activity arrives + }, +) +``` + + +### Lifecycle options + +- `onTimeout` / `on_timeout` + - `kill` (default): sandbox is terminated when timeout is reached + - `pause`: sandbox is paused when timeout is reached +- `autoResume` / `auto_resume` + - `false` (default): paused sandboxes do not auto-resume + - `true`: paused sandboxes auto-resume on activity + - `true` is valid only when `onTimeout`/`on_timeout` is `pause` + +### Behavior summary + +- Default behavior is equivalent to `onTimeout: "kill"` with `autoResume: false`. +- `onTimeout: "pause"` with `autoResume: false` gives auto-pause without auto-resume. +- `onTimeout: "pause"` with `autoResume: true` gives auto-pause with auto-resume. +- `Sandbox.connect()` can still be used to resume a paused sandbox manually. + +If you use `autoResume: false`, resume explicitly with `Sandbox.connect()`. + +### What counts as activity + +Auto-resume is triggered by the sandbox activity - that's both HTTP traffic and controlling the sandbox from the SDK. + +That includes SDK operations like: +- `sandbox.commands.run(...)` +- `sandbox.files.read(...)` +- `sandbox.files.write(...)` +- opening a tunneled app URL or sending requests to a service running inside the sandbox + +If a sandbox is paused and `autoResume` is enabled, the next supported operation resumes it automatically. You do not need to call `Sandbox.connect()` first. + +#### SDK example: pause, then read a file + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sandbox = await Sandbox.create({ + timeoutMs: 10 * 60 * 1000, + lifecycle: { + onTimeout: 'pause', + autoResume: true, + }, +}) + +await sandbox.files.write('/home/user/hello.txt', 'hello from a paused sandbox') +await sandbox.pause() + +const content = await sandbox.files.read('/home/user/hello.txt') +console.log(content) +console.log(`State after read: ${(await sandbox.getInfo()).state}`) +``` +```python Python +from e2b import Sandbox + +sandbox = Sandbox.create( + timeout=10 * 60, + lifecycle={ + "on_timeout": "pause", + "auto_resume": True, + }, +) + +sandbox.files.write("/home/user/hello.txt", "hello from a paused sandbox") +sandbox.pause() + +content = sandbox.files.read("/home/user/hello.txt") +print(content) +print(f"State after read: {sandbox.get_info().state}") +``` + + +### AutoResume use cases + +#### Web and dev/preview servers + +Use `onTimeout: "pause"` + `autoResume: true` so inbound traffic can wake a paused sandbox automatically. +This works for both basic web/API servers and dev or preview servers you open occasionally. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sandbox = await Sandbox.create({ + timeoutMs: 10 * 60 * 1000, + lifecycle: { + onTimeout: 'pause', + autoResume: true, + }, +}) + +await sandbox.commands.run( + `python3 -m pip -q install 'flask>=2.2'` +) + +await sandbox.files.write( + '/home/user/app.py', + [ + 'from flask import Flask', + 'app = Flask(__name__)', + '@app.route("/")', + 'def hello():', + ' return "Hello, World!"', + 'app.run(host="0.0.0.0", port=3000)', + '', + ].join('\n') +) + +await sandbox.commands.run( + 'python3 -u /home/user/app.py > /home/user/flask.log 2>&1', + { background: true } +) + +await new Promise((resolve) => setTimeout(resolve, 1000)) + +const previewHost = sandbox.getHost(3000) +console.log(`Preview URL: https://${previewHost}`) + +console.log(`Status before pause: ${(await sandbox.getInfo()).state}`) +await sandbox.pause() +console.log(`Status after pause: ${(await sandbox.getInfo()).state}`) +``` +```python Python +import time + +from e2b import Sandbox + +sandbox = Sandbox.create( + timeout=10 * 60, + lifecycle={ + "on_timeout": "pause", + "auto_resume": True, + }, +) + +sandbox.commands.run("python3 -m pip -q install 'flask>=2.2'") + +sandbox.files.write( + "/home/user/app.py", + 'from flask import Flask\n' + 'app = Flask(__name__)\n' + '@app.route("/")\n' + 'def hello():\n' + ' return "Hello, World!"\n' + 'app.run(host="0.0.0.0", port=3000)\n' +) + +sandbox.commands.run( + "python3 -u /home/user/app.py > /home/user/flask.log 2>&1", + background=True, +) + +time.sleep(1) + +preview_host = sandbox.get_host(3000) +print(f"Preview URL: https://{preview_host}") + +print(f"Status before pause: {sandbox.get_info().state}") +sandbox.pause() +print(f"Status after pause: {sandbox.get_info().state}") +``` + + +#### Agent/tool execution + +For queued tasks or tool calls, create once and keep using the same sandbox handle. If it is paused, it will auto-resume when you run the next command. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +// One-time setup +const sandbox = await Sandbox.create({ + timeoutMs: 5 * 60 * 1000, + lifecycle: { + onTimeout: 'pause', + autoResume: true, + }, +}) + +// Later: called for each agent/tool task +async function runToolTask(command) { + const result = await sandbox.commands.run(command) + return result.stdout +} + +console.log(await runToolTask('python -c "print(2 + 2)"')) +``` +```python Python +from e2b import Sandbox + +# One-time setup +sandbox = Sandbox.create( + timeout=5 * 60, + lifecycle={ + "on_timeout": "pause", + "auto_resume": True, + }, +) + +# Later: called for each agent/tool task +def run_tool_task(command: str) -> str: + result = sandbox.commands.run(command) + return result.stdout + +print(run_tool_task('python -c "print(2 + 2)"')) +``` + + +#### Per-user sandboxes + +For multi-tenant apps, keep a map of sandbox IDs by user. On each request, connect to the user's existing sandbox (which auto-resumes if paused) or create a new one. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const userSandboxes = new Map() // userId → Sandbox + +async function getSandbox(userId) { + let sandbox = userSandboxes.get(userId) + + if (!sandbox) { + sandbox = await Sandbox.create({ + timeoutMs: 5 * 60 * 1000, + lifecycle: { + onTimeout: 'pause', + autoResume: true, + }, + }) + userSandboxes.set(userId, sandbox) + } + + return sandbox +} + +// On each user request (auto-resumes if paused) +const sandbox = await getSandbox('user-123') +const result = await sandbox.commands.run('echo "Hello from your sandbox"') +console.log(result.stdout) +``` +```python Python +from e2b import Sandbox + +user_sandboxes: dict[str, Sandbox] = {} # user_id → Sandbox + +def get_sandbox(user_id: str) -> Sandbox: + if user_id not in user_sandboxes: + user_sandboxes[user_id] = Sandbox.create( + timeout=5 * 60, + lifecycle={ + "on_timeout": "pause", + "auto_resume": True, + }, + ) + + return user_sandboxes[user_id] + +# On each user request (auto-resumes if paused) +sandbox = get_sandbox("user-123") +result = sandbox.commands.run('echo "Hello from your sandbox"') +print(result.stdout) +``` + + +### AutoResume cleanup + +Auto-resume is persistent, meaning if your sandbox resumes and later times out again, it will pause again. + +If you call `.kill()`, the sandbox is permanently deleted and cannot be resumed. + +## Snapshots + +Snapshots let you create a persistent point-in-time capture of a running sandbox, including both its filesystem and memory state. +You can then use a snapshot to spawn new sandboxes that start from the exact same state. + +The original sandbox continues running after the snapshot is created, and a single snapshot can be used to create many new sandboxes. + +### Prerequisites + +Snapshots require templates with envd version `v0.5.0` or above. If you are using a custom template created before envd `v0.5.0`, you need to rebuild it. + +You can check the template envd version using the `e2b template list` command or by viewing the templates list on the dashboard. + +### Snapshots vs. Pause/Resume + +| | Pause/Resume | Snapshots | +|---|---|---| +| Effect on original sandbox | Pauses (stops) the sandbox | Sandbox briefly pauses, then continues running | +| Relationship | One-to-one — resume restores the same sandbox | One-to-many — snapshot can spawn many new sandboxes | +| Use case | Suspend and resume a single sandbox | Create a reusable checkpoint | + +### Snapshot flow + +```mermaid actions={false} +graph LR + A[Running Sandbox] -->|createSnapshot| B[Snapshotting] + B --> C[Snapshot Created] + B --> A + C -->|Sandbox.create| D[New Sandbox 1] + C -->|Sandbox.create| E[New Sandbox 2] + C -->|Sandbox.create| F[New Sandbox N] +``` + +The sandbox is briefly paused during the snapshot process but automatically returns to running state. The sandbox ID stays the same after the snapshot completes. + + +During the snapshot, the sandbox is temporarily paused and resumed. This causes all active connections (e.g. WebSocket, PTY, command streams) to be dropped. Make sure your client handles reconnection properly. + + +### Create a snapshot + +You can create a snapshot from a running sandbox instance. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sandbox = await Sandbox.create() + +// Create a snapshot from a running sandbox +const snapshot = await sandbox.createSnapshot() +console.log('Snapshot ID:', snapshot.snapshotId) +``` +```python Python +from e2b import Sandbox + +sandbox = Sandbox.create() + +# Create a snapshot from a running sandbox +snapshot = sandbox.create_snapshot() +print('Snapshot ID:', snapshot.snapshot_id) +``` + + +You can also create a snapshot by sandbox ID using the static method. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +// Create a snapshot by sandbox ID +const snapshot = await Sandbox.createSnapshot(sandboxId) +console.log('Snapshot ID:', snapshot.snapshotId) +``` +```python Python +from e2b import Sandbox + +# Create a snapshot by sandbox ID +snapshot = Sandbox.create_snapshot(sandbox_id) +print('Snapshot ID:', snapshot.snapshot_id) +``` + + +### Create a sandbox from a snapshot + +The snapshot ID can be used directly with `Sandbox.create()` to spawn a new sandbox from the snapshot. The new sandbox starts with the exact filesystem and memory state captured in the snapshot. + + +```js JavaScript & TypeScript highlight={5} +import { Sandbox } from 'e2b' + +const snapshot = await sandbox.createSnapshot() + +// Create a new sandbox from the snapshot +const newSandbox = await Sandbox.create(snapshot.snapshotId) +``` +```python Python highlight={5} +from e2b import Sandbox + +snapshot = sandbox.create_snapshot() + +# Create a new sandbox from the snapshot +new_sandbox = Sandbox.create(snapshot.snapshot_id) +``` + + +### List snapshots + +You can list all snapshots. The method returns a paginator for iterating through results. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const paginator = Sandbox.listSnapshots() + +const snapshots = [] +while (paginator.hasNext) { + const items = await paginator.nextItems() + snapshots.push(...items) +} +``` +```python Python +from e2b import Sandbox + +paginator = Sandbox.list_snapshots() + +snapshots = [] +while paginator.has_next: + items = paginator.next_items() + snapshots.extend(items) +``` + + +#### Filter by sandbox + +You can filter snapshots created from a specific sandbox. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const paginator = Sandbox.listSnapshots({ sandboxId: 'your-sandbox-id' }) +const snapshots = await paginator.nextItems() +``` +```python Python +from e2b import Sandbox + +paginator = Sandbox.list_snapshots(sandbox_id="your-sandbox-id") +snapshots = paginator.next_items() +``` + + +### Delete a snapshot + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +// Returns true if deleted, false if the snapshot was not found +const deleted = await Sandbox.deleteSnapshot(snapshot.snapshotId) +``` +```python Python +from e2b import Sandbox + +Sandbox.delete_snapshot(snapshot.snapshot_id) +``` + + +### Snapshots vs. Templates + +Both snapshots and [templates](/docs/template/quickstart) create reusable starting points for sandboxes, but they solve different problems. + +| | Templates | Snapshots | +|---|---|---| +| Defined by | Declarative code (Template builder) | Capturing a running sandbox | +| Reproducibility | Same definition produces the same sandbox every time | Captures whatever state exists at that moment | +| Best for | Repeatable base environments | Checkpointing, rollback, forking runtime state | + +Use templates when every sandbox should start from an identical, known state — pre-installed tools, fixed configurations, consistent environments. +Use snapshots when you need to capture or fork live runtime state that depends on what happened during execution. + +### Snapshot use cases + +- **Checkpointing agent work** — an AI agent has loaded data and produced partial results in memory. Snapshot it so you can resume or fork from that point later. +- **Rollback points** — snapshot before a risky or expensive operation (running untrusted code, applying a migration, refactoring a web app). If it fails, rollback - spawn a fresh sandbox from the snapshot before the operation happened. +- **Forking workflows** — spawn multiple sandboxes from the same snapshot to explore different approaches in parallel. +- **Cached sandboxes** — avoid repeating expensive setup by snapshotting a sandbox that has already loaded a large dataset or started a long-running process. +- **Sharing state** — one user or agent configures an environment interactively, snapshots it, and others start from that exact state. diff --git a/docs/use-cases/coding-agents.mdx b/docs/use-cases/coding-agents.mdx index 3fe384b3..bdf3f304 100644 --- a/docs/use-cases/coding-agents.mdx +++ b/docs/use-cases/coding-agents.mdx @@ -47,7 +47,7 @@ Since each sandbox is a full Linux environment, you can run any coding agent — Clone repos, manage branches, and push changes from sandboxes - + Pause and resume sandboxes to preserve state From 80332be0d0fac1bb841dc1b67cc6416570f6ed24 Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Wed, 18 Mar 2026 20:41:42 +0100 Subject: [PATCH 04/22] docs: add runtime customization guide for sandbox setup without templates New guide covering patterns for when you can't bake everything into a template: installing packages at runtime, uploading files, environment variables, start_cmd overrides, and combining patterns. Includes snapshot-based caching for repeated setups. --- docs.json | 1 + docs/sandbox.mdx | 3 + docs/sandbox/runtime-customization.mdx | 274 +++++++++++++++++++++++++ 3 files changed, 278 insertions(+) create mode 100644 docs/sandbox/runtime-customization.mdx diff --git a/docs.json b/docs.json index 83b53434..2b4de68a 100644 --- a/docs.json +++ b/docs.json @@ -82,6 +82,7 @@ { "group": "Sandbox guides", "pages": [ + "docs/sandbox/runtime-customization", "docs/sandbox/git-integration", "docs/sandbox/proxy-tunneling", "docs/sandbox/custom-domain" diff --git a/docs/sandbox.mdx b/docs/sandbox.mdx index 82b290d7..7294e857 100644 --- a/docs/sandbox.mdx +++ b/docs/sandbox.mdx @@ -86,6 +86,9 @@ print(result.stdout) ## Guides + + Install packages, upload files, and configure sandboxes dynamically. + Clone repos, manage branches, push changes. diff --git a/docs/sandbox/runtime-customization.mdx b/docs/sandbox/runtime-customization.mdx new file mode 100644 index 00000000..7f1dd455 --- /dev/null +++ b/docs/sandbox/runtime-customization.mdx @@ -0,0 +1,274 @@ +--- +title: "Customizing sandboxes at runtime" +sidebarTitle: "Runtime customization" +description: "Install packages, upload files, and configure E2B sandboxes at runtime when you can't bake everything into a template." +--- + +Templates define a sandbox's base environment, but many workloads need dynamic configuration — different packages per user, files uploaded at request time, or settings that change between runs. This guide covers patterns for customizing sandboxes at runtime without modifying your template. + +## When to use runtime customization vs. templates + +| Use case | Template | Runtime | +|---|---|---| +| Same packages for every sandbox | Yes | | +| Packages vary per user or request | | Yes | +| Large, slow-to-install dependencies | Yes | | +| Small, fast-to-install dependencies | | Yes | +| Static config files | Yes | | +| User-uploaded files | | Yes | +| Secrets that change per request | | Yes | + +When in doubt: put slow, stable dependencies in a [template](/docs/template/quickstart), and handle everything dynamic at runtime. + +## Install packages at runtime + +Use `sandbox.commands.run()` to install packages after the sandbox starts. This is useful when different users or requests need different dependencies. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sandbox = await Sandbox.create() + +// Install Python packages +await sandbox.commands.run('pip install pandas numpy matplotlib') + +// Install system packages +await sandbox.commands.run('sudo apt-get update && sudo apt-get install -y ffmpeg') + +// Install Node.js packages +await sandbox.commands.run('npm install express') +``` +```python Python +from e2b import Sandbox + +sandbox = Sandbox.create() + +# Install Python packages +sandbox.commands.run("pip install pandas numpy matplotlib") + +# Install system packages +sandbox.commands.run("sudo apt-get update && sudo apt-get install -y ffmpeg") + +# Install Node.js packages +sandbox.commands.run("npm install express") +``` + + + +For packages you install in every sandbox, add them to a [template](/docs/template/quickstart) instead. Template-based installs are faster because they're baked into the sandbox image. + + +### Speed up repeated installs with snapshots + +If multiple sandboxes need the same runtime setup, install once and [snapshot](/docs/sandbox/persistence#snapshots) the result. New sandboxes start from the snapshot with everything already installed. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +// First time: install and snapshot +const sandbox = await Sandbox.create() +await sandbox.commands.run('pip install pandas numpy scikit-learn') + +const snapshot = await sandbox.createSnapshot() +console.log('Snapshot ID:', snapshot.snapshotId) + +// Later: start from snapshot — packages are already installed +const fast = await Sandbox.create(snapshot.snapshotId) +``` +```python Python +from e2b import Sandbox + +# First time: install and snapshot +sandbox = Sandbox.create() +sandbox.commands.run("pip install pandas numpy scikit-learn") + +snapshot = sandbox.create_snapshot() +print("Snapshot ID:", snapshot.snapshot_id) + +# Later: start from snapshot — packages are already installed +fast = Sandbox.create(snapshot.snapshot_id) +``` + + +## Upload files at sandbox start + +Use `sandbox.files.write()` to upload configuration files, scripts, or user data into the sandbox. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sandbox = await Sandbox.create() + +// Upload a configuration file +await sandbox.files.write('/home/user/config.json', JSON.stringify({ + apiEndpoint: 'https://api.example.com', + maxRetries: 3, +})) + +// Upload a script +await sandbox.files.write('/home/user/analyze.py', ` +import json +with open('config.json') as f: + config = json.load(f) +print(f"Connecting to {config['apiEndpoint']}") +`) + +// Run the uploaded script +const result = await sandbox.commands.run('cd /home/user && python analyze.py') +console.log(result.stdout) +``` +```python Python +import json +from e2b import Sandbox + +sandbox = Sandbox.create() + +# Upload a configuration file +sandbox.files.write("/home/user/config.json", json.dumps({ + "apiEndpoint": "https://api.example.com", + "maxRetries": 3, +})) + +# Upload a script +sandbox.files.write("/home/user/analyze.py", """ +import json +with open('config.json') as f: + config = json.load(f) +print(f"Connecting to {config['apiEndpoint']}") +""") + +# Run the uploaded script +result = sandbox.commands.run("cd /home/user && python analyze.py") +print(result.stdout) +``` + + +## Use environment variables + +Pass per-request configuration through [environment variables](/docs/sandbox/configuration#environment-variables). You can set them globally at sandbox creation or per command. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +// Set environment variables at creation +const sandbox = await Sandbox.create({ + envs: { + API_KEY: 'sk-user-123-key', + ENVIRONMENT: 'production', + }, +}) + +// Or set per command +const result = await sandbox.commands.run('echo $CUSTOM_VAR', { + envs: { CUSTOM_VAR: 'hello' }, +}) +console.log(result.stdout) // "hello" +``` +```python Python +from e2b import Sandbox + +# Set environment variables at creation +sandbox = Sandbox.create( + envs={ + "API_KEY": "sk-user-123-key", + "ENVIRONMENT": "production", + }, +) + +# Or set per command +result = sandbox.commands.run("echo $CUSTOM_VAR", envs={"CUSTOM_VAR": "hello"}) +print(result.stdout) # "hello" +``` + + +## Use `start_cmd` for initialization + +If your [template](/docs/template/quickstart) supports a [start command](/docs/template/start-ready-command), you can override it at runtime to customize sandbox initialization. + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +const sandbox = await Sandbox.create('my-template', { + startCmd: 'python /home/user/setup.py && nginx -g "daemon off;"', +}) +``` +```python Python +from e2b import Sandbox + +sandbox = Sandbox.create("my-template", start_cmd="python /home/user/setup.py && nginx -g 'daemon off;'") +``` + + +## Combine patterns + +For complex setups, combine multiple approaches. Here's an example setting up a data analysis sandbox per user: + + +```js JavaScript & TypeScript +import { Sandbox } from 'e2b' + +async function createAnalysisSandbox(userId, dataUrl, packages) { + const sandbox = await Sandbox.create({ + timeoutMs: 10 * 60 * 1000, + lifecycle: { + onTimeout: 'pause', + autoResume: true, + }, + envs: { + USER_ID: userId, + DATA_URL: dataUrl, + }, + }) + + // Install user-specific packages + if (packages.length > 0) { + await sandbox.commands.run(`pip install ${packages.join(' ')}`) + } + + // Upload analysis script + await sandbox.files.write('/home/user/run.py', ` +import os, urllib.request +urllib.request.urlretrieve(os.environ['DATA_URL'], '/home/user/data.csv') +print(f"Data downloaded for user {os.environ['USER_ID']}") + `) + + await sandbox.commands.run('python /home/user/run.py') + return sandbox +} +``` +```python Python +from e2b import Sandbox + +def create_analysis_sandbox(user_id: str, data_url: str, packages: list[str]): + sandbox = Sandbox.create( + timeout=10 * 60, + lifecycle={ + "on_timeout": "pause", + "auto_resume": True, + }, + envs={ + "USER_ID": user_id, + "DATA_URL": data_url, + }, + ) + + # Install user-specific packages + if packages: + sandbox.commands.run(f"pip install {' '.join(packages)}") + + # Upload analysis script + sandbox.files.write("/home/user/run.py", """ +import os, urllib.request +urllib.request.urlretrieve(os.environ['DATA_URL'], '/home/user/data.csv') +print(f"Data downloaded for user {os.environ['USER_ID']}") + """) + + sandbox.commands.run("python /home/user/run.py") + return sandbox +``` + From c4d36732bebedd650975ca2c9c117c1e4007e562 Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Wed, 18 Mar 2026 20:42:21 +0100 Subject: [PATCH 05/22] docs: add description frontmatter to all sandbox pages for SEO and LLM discoverability --- docs/sandbox/commands.mdx | 1 + docs/sandbox/configuration.mdx | 1 + docs/sandbox/custom-domain.mdx | 1 + docs/sandbox/observability.mdx | 1 + docs/sandbox/security.mdx | 1 + 5 files changed, 5 insertions(+) diff --git a/docs/sandbox/commands.mdx b/docs/sandbox/commands.mdx index 5c4984f8..b368f40c 100644 --- a/docs/sandbox/commands.mdx +++ b/docs/sandbox/commands.mdx @@ -1,6 +1,7 @@ --- title: "Commands & terminal" sidebarTitle: Commands +description: "Run commands, use interactive terminals, and connect via SSH in E2B sandboxes." --- E2B sandboxes provide multiple ways to run commands and interact with the terminal: the PTY module for interactive terminal sessions, and SSH access for remote connectivity. diff --git a/docs/sandbox/configuration.mdx b/docs/sandbox/configuration.mdx index a8257241..eb45286b 100644 --- a/docs/sandbox/configuration.mdx +++ b/docs/sandbox/configuration.mdx @@ -1,6 +1,7 @@ --- title: "Configuration" sidebarTitle: Configuration +description: "Configure E2B sandboxes with environment variables and external storage buckets." --- This page covers sandbox configuration options including environment variables and connecting external storage buckets. diff --git a/docs/sandbox/custom-domain.mdx b/docs/sandbox/custom-domain.mdx index 1fa46272..bf9c1889 100644 --- a/docs/sandbox/custom-domain.mdx +++ b/docs/sandbox/custom-domain.mdx @@ -1,5 +1,6 @@ --- title: "Custom domain" +description: "Set up a custom domain to proxy requests to E2B sandboxes using Caddy and Cloudflare DNS." --- How to set up a custom domain for Sandboxes hosted on E2B. diff --git a/docs/sandbox/observability.mdx b/docs/sandbox/observability.mdx index 73eef5b9..231f0ac4 100644 --- a/docs/sandbox/observability.mdx +++ b/docs/sandbox/observability.mdx @@ -1,6 +1,7 @@ --- title: "Observability" sidebarTitle: Observability +description: "Monitor E2B sandboxes with resource metrics, lifecycle event history, and real-time webhooks." --- E2B provides several ways to monitor and track your sandboxes: resource metrics via the SDK and CLI, a REST API for lifecycle event history, and webhooks for real-time notifications. diff --git a/docs/sandbox/security.mdx b/docs/sandbox/security.mdx index e8557296..b2dd05a5 100644 --- a/docs/sandbox/security.mdx +++ b/docs/sandbox/security.mdx @@ -1,6 +1,7 @@ --- title: "Security" sidebarTitle: Security +description: "Learn about E2B sandbox security including authenticated access, network controls, and rate limits." --- E2B sandboxes provide multiple layers of security: authenticated access to the sandbox controller, fine-grained network controls, and rate limits to protect your infrastructure. From de8930dbeccd992fcef3d1f43a84d49df2d554bc Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Wed, 18 Mar 2026 20:57:41 +0100 Subject: [PATCH 06/22] docs: fix runtime customization table to focus on per-user vs shared --- docs/sandbox/runtime-customization.mdx | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/docs/sandbox/runtime-customization.mdx b/docs/sandbox/runtime-customization.mdx index 7f1dd455..8c580acb 100644 --- a/docs/sandbox/runtime-customization.mdx +++ b/docs/sandbox/runtime-customization.mdx @@ -12,13 +12,11 @@ Templates define a sandbox's base environment, but many workloads need dynamic c |---|---|---| | Same packages for every sandbox | Yes | | | Packages vary per user or request | | Yes | -| Large, slow-to-install dependencies | Yes | | -| Small, fast-to-install dependencies | | Yes | -| Static config files | Yes | | -| User-uploaded files | | Yes | +| Static config files, shared across sandboxes | Yes | | +| User-uploaded files or per-request data | | Yes | | Secrets that change per request | | Yes | -When in doubt: put slow, stable dependencies in a [template](/docs/template/quickstart), and handle everything dynamic at runtime. +When in doubt: if every sandbox needs it, put it in a [template](/docs/template/quickstart). If it varies per user or request, handle it at runtime. ## Install packages at runtime From ed703887f64bf9441c358a125739e5fd45a7e9b6 Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Wed, 18 Mar 2026 21:04:02 +0100 Subject: [PATCH 07/22] docs: fix typo and stale comment in lifecycle page --- docs/sandbox/lifecycle.mdx | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/docs/sandbox/lifecycle.mdx b/docs/sandbox/lifecycle.mdx index 4fb220bf..cd146b2a 100644 --- a/docs/sandbox/lifecycle.mdx +++ b/docs/sandbox/lifecycle.mdx @@ -18,7 +18,7 @@ Every sandbox has a configurable timeout that determines how long it stays runni ```js JavaScript & TypeScript highlight={6} import { Sandbox } from '@e2b/code-interpreter' -// Create sandbox with and keep it running for 60 seconds. +// Create a sandbox and keep it running for 60 seconds. // 🚨 Note: The units are milliseconds. const sandbox = await Sandbox.create({ timeoutMs: 60_000, @@ -27,7 +27,7 @@ const sandbox = await Sandbox.create({ ```python Python highlight={6} from e2b_code_interpreter import Sandbox -# Create sandbox with and keep it running for 60 seconds. +# Create a sandbox and keep it running for 60 seconds. # 🚨 Note: The units are seconds. sandbox = Sandbox.create( timeout=60, @@ -48,7 +48,7 @@ You can for example start with a sandbox with 1 minute timeout and then periodic ```js JavaScript & TypeScript import { Sandbox } from '@e2b/code-interpreter' -// Create sandbox with and keep it running for 60 seconds. +// Create a sandbox and keep it running for 60 seconds. const sandbox = await Sandbox.create({ timeoutMs: 60_000 }) // Change the sandbox timeout to 30 seconds. @@ -58,7 +58,7 @@ await sandbox.setTimeout(30_000) ```python Python from e2b_code_interpreter import Sandbox -# Create sandbox with and keep it running for 60 seconds. +# Create a sandbox and keep it running for 60 seconds. sandbox = Sandbox.create(timeout=60) # Change the sandbox timeout to 30 seconds. @@ -75,7 +75,7 @@ You can retrieve sandbox information like sandbox ID, template, metadata, starte ```js JavaScript & TypeScript import { Sandbox } from '@e2b/code-interpreter' -// Create sandbox with and keep it running for 60 seconds. +// Create a sandbox and keep it running for 60 seconds. const sandbox = await Sandbox.create({ timeoutMs: 60_000 }) // Retrieve sandbox information. @@ -96,7 +96,7 @@ console.log(info) ```python Python from e2b_code_interpreter import Sandbox -# Create sandbox with and keep it running for 60 seconds. +# Create a sandbox and keep it running for 60 seconds. sandbox = Sandbox.create(timeout=60) # Retrieve sandbox information. @@ -339,7 +339,6 @@ For more granular pagination, you can set custom per-page item limit (default an ```python Python highlight={5-6,13} from e2b_code_interpreter import Sandbox - # List running sandboxes that has `userId` key with value `123` and `env` key with value `dev`. paginator = Sandbox.list( limit=100, next_token="", @@ -463,7 +462,7 @@ You can shutdown the sandbox any time even before the timeout is up by calling t ```js JavaScript & TypeScript import { Sandbox } from '@e2b/code-interpreter' -// Create sandbox with and keep it running for 60 seconds. +// Create a sandbox and keep it running for 60 seconds. const sandbox = await Sandbox.create({ timeoutMs: 60_000 }) // Shutdown the sandbox immediately. @@ -472,7 +471,7 @@ await sandbox.kill() ```python Python from e2b_code_interpreter import Sandbox -# Create sandbox with and keep it running for 60 seconds. +# Create a sandbox and keep it running for 60 seconds. sandbox = Sandbox.create(timeout=60) # Shutdown the sandbox immediately. From daa0d31a1b410da42f3de2ab20e973688dfc392a Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Wed, 18 Mar 2026 21:15:20 +0100 Subject: [PATCH 08/22] docs: consolidate persistence page headings to reduce TOC clutter Restructure from ~9 H2 sections to 4: - Sandbox state transitions - Pause and resume (merges pausing, resuming, listing, removing, timeout) - Auto-pause and auto-resume (merges auto-pause and autoresume sections) - Snapshots --- docs/sandbox/persistence.mdx | 69 ++++++++++-------------------------- 1 file changed, 19 insertions(+), 50 deletions(-) diff --git a/docs/sandbox/persistence.mdx b/docs/sandbox/persistence.mdx index 0ec1b1cd..d81fdb0e 100644 --- a/docs/sandbox/persistence.mdx +++ b/docs/sandbox/persistence.mdx @@ -68,7 +68,9 @@ sandbox.kill() # Running/Paused → Killed ``` -## Pausing sandbox +## Pause and resume + +### Pausing a sandbox When you pause a sandbox, both the sandbox's filesystem and memory state will be saved. This includes all the files in the sandbox's filesystem and all the running processes, loaded variables, data, etc. @@ -97,8 +99,7 @@ print('Sandbox paused', sbx.sandbox_id) ``` - -## Resuming sandbox +### Resuming a sandbox When you resume a sandbox, it will be in the same state it was in when you paused it. This means that all the files in the sandbox's filesystem will be restored and all the running processes, loaded variables, data, etc. will be restored. @@ -136,7 +137,7 @@ print('Connected to the sandbox', same_sbx.sandbox_id) ``` -## Listing paused sandboxes +### Listing paused sandboxes You can list all paused sandboxes by calling the `Sandbox.list` method and supplying the `state` query parameter. @@ -172,7 +173,7 @@ while paginator.has_next: ``` -## Removing paused sandboxes +### Removing paused sandboxes You can remove paused sandboxes by calling the `kill` method on the Sandbox instance. @@ -209,7 +210,7 @@ Sandbox.kill(sbx.sandbox_id) ``` -## Timeout on connect +### Timeout on connect When you connect to a sandbox, the inactivity timeout resets. The default is 5 minutes, but you can pass a custom timeout to the `Sandbox.connect()` method: @@ -226,39 +227,6 @@ sbx = Sandbox.connect(sandbox_id, timeout=60) # 60 seconds ``` -## Auto-pause - -Auto-pause is configured in the sandbox lifecycle on create. Set `onTimeout`/`on_timeout` to `pause`. - - -```js JavaScript & TypeScript -import { Sandbox } from 'e2b' - -const sandbox = await Sandbox.create({ - timeoutMs: 10 * 60 * 1000, // Optional: change default timeout (10 minutes) - lifecycle: { - onTimeout: 'pause', - autoResume: false, // Optional (default is false) - }, -}) -``` -```python Python -from e2b import Sandbox - -sandbox = Sandbox.create( - timeout=10 * 60, # Optional: change default timeout (10 minutes) - lifecycle={ - "on_timeout": "pause", # Auto-pause after the sandbox times out - "auto_resume": False, # Optional (default is False) - }, -) -``` - - -Auto-pause is persistent, meaning if your sandbox resumes and later times out again, it will pause again. - -If you call `.kill()`, the sandbox is permanently deleted and cannot be resumed. - ### Network behavior during pause If you have a service (for example a server) running inside your sandbox and you pause the sandbox, the service won't be accessible from the outside and all the clients will be disconnected. @@ -280,12 +248,9 @@ If you resume the sandbox, the service will be accessible again but you need to - **1 hour** on the **Base tier** - After a sandbox is paused and resumed, the continuous runtime limit is **reset** -## AutoResume +## Auto-pause and auto-resume -Many workloads don't need a sandbox running all the time, but when they do need it, it should just work, whether it was paused or not. - -`AutoResume` handles this automatically: a paused sandbox wakes up when activity arrives, so your code does not have to check or manage sandbox state. -Configure it through the `lifecycle` object when creating a sandbox. +Auto-pause and auto-resume are configured together through the `lifecycle` object when creating a sandbox. Auto-pause suspends a sandbox when its timeout expires. Auto-resume wakes it back up when activity arrives. ### Configure lifecycle on create @@ -296,8 +261,8 @@ import { Sandbox } from 'e2b' const sandbox = await Sandbox.create({ timeoutMs: 10 * 60 * 1000, lifecycle: { - onTimeout: 'pause', - autoResume: true, // resume when activity arrives + onTimeout: 'pause', // auto-pause when timeout expires + autoResume: true, // auto-resume when activity arrives }, }) ``` @@ -307,13 +272,17 @@ from e2b import Sandbox sandbox = Sandbox.create( timeout=10 * 60, lifecycle={ - "on_timeout": "pause", - "auto_resume": True, # resume when activity arrives + "on_timeout": "pause", # auto-pause when timeout expires + "auto_resume": True, # auto-resume when activity arrives }, ) ``` +Auto-pause is persistent — if your sandbox resumes and later times out again, it will pause again. + +If you call `.kill()`, the sandbox is permanently deleted and cannot be resumed. + ### Lifecycle options - `onTimeout` / `on_timeout` @@ -386,7 +355,7 @@ print(f"State after read: {sandbox.get_info().state}") ``` -### AutoResume use cases +### Use cases #### Web and dev/preview servers @@ -579,7 +548,7 @@ print(result.stdout) ``` -### AutoResume cleanup +### Cleanup Auto-resume is persistent, meaning if your sandbox resumes and later times out again, it will pause again. From b3f119574fc1470129e4a3b299e0334bcb7803cd Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Wed, 18 Mar 2026 22:46:10 +0100 Subject: [PATCH 09/22] docs: flatten Sandbox core and Sandbox guides into single Sandbox group --- docs.json | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/docs.json b/docs.json index 2b4de68a..3fdf8613 100644 --- a/docs.json +++ b/docs.json @@ -68,7 +68,7 @@ ] }, { - "group": "Sandbox core", + "group": "Sandbox", "pages": [ "docs/sandbox", "docs/sandbox/lifecycle", @@ -76,12 +76,7 @@ "docs/sandbox/commands", "docs/sandbox/configuration", "docs/sandbox/security", - "docs/sandbox/observability" - ] - }, - { - "group": "Sandbox guides", - "pages": [ + "docs/sandbox/observability", "docs/sandbox/runtime-customization", "docs/sandbox/git-integration", "docs/sandbox/proxy-tunneling", From cabc5f58f89c51a6c6bc3c912ba21910938d3b02 Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Wed, 18 Mar 2026 23:22:53 +0100 Subject: [PATCH 10/22] docs: add sidebar icons to all sandbox pages --- docs/sandbox.mdx | 1 + docs/sandbox/commands.mdx | 1 + docs/sandbox/configuration.mdx | 1 + docs/sandbox/custom-domain.mdx | 1 + docs/sandbox/git-integration.mdx | 1 + docs/sandbox/lifecycle.mdx | 1 + docs/sandbox/observability.mdx | 1 + docs/sandbox/persistence.mdx | 1 + docs/sandbox/proxy-tunneling.mdx | 1 + docs/sandbox/runtime-customization.mdx | 1 + docs/sandbox/security.mdx | 1 + 11 files changed, 11 insertions(+) diff --git a/docs/sandbox.mdx b/docs/sandbox.mdx index 7294e857..42ec305d 100644 --- a/docs/sandbox.mdx +++ b/docs/sandbox.mdx @@ -1,6 +1,7 @@ --- title: "Sandbox" sidebarTitle: Overview +icon: "cube" description: "E2B sandboxes are persistent, secure cloud environments for AI agents — with pause/resume, snapshots, and auto-resume built in." --- diff --git a/docs/sandbox/commands.mdx b/docs/sandbox/commands.mdx index b368f40c..84e20b2b 100644 --- a/docs/sandbox/commands.mdx +++ b/docs/sandbox/commands.mdx @@ -1,6 +1,7 @@ --- title: "Commands & terminal" sidebarTitle: Commands +icon: "terminal" description: "Run commands, use interactive terminals, and connect via SSH in E2B sandboxes." --- diff --git a/docs/sandbox/configuration.mdx b/docs/sandbox/configuration.mdx index eb45286b..3ff1e7c7 100644 --- a/docs/sandbox/configuration.mdx +++ b/docs/sandbox/configuration.mdx @@ -1,6 +1,7 @@ --- title: "Configuration" sidebarTitle: Configuration +icon: "sliders" description: "Configure E2B sandboxes with environment variables and external storage buckets." --- diff --git a/docs/sandbox/custom-domain.mdx b/docs/sandbox/custom-domain.mdx index bf9c1889..02898c0b 100644 --- a/docs/sandbox/custom-domain.mdx +++ b/docs/sandbox/custom-domain.mdx @@ -1,5 +1,6 @@ --- title: "Custom domain" +icon: "globe" description: "Set up a custom domain to proxy requests to E2B sandboxes using Caddy and Cloudflare DNS." --- diff --git a/docs/sandbox/git-integration.mdx b/docs/sandbox/git-integration.mdx index f4618fe7..ac5ea593 100644 --- a/docs/sandbox/git-integration.mdx +++ b/docs/sandbox/git-integration.mdx @@ -1,5 +1,6 @@ --- title: "Git integration" +icon: "code-branch" description: "Clone repositories, manage branches, and push changes using the sandbox.git methods." --- diff --git a/docs/sandbox/lifecycle.mdx b/docs/sandbox/lifecycle.mdx index cd146b2a..4d73ff76 100644 --- a/docs/sandbox/lifecycle.mdx +++ b/docs/sandbox/lifecycle.mdx @@ -1,6 +1,7 @@ --- title: "Sandbox lifecycle" sidebarTitle: Lifecycle +icon: "rotate" description: "Manage E2B sandbox lifecycle — timeouts, metadata, listing, connecting, and shutdown. Sandboxes are persistent cloud environments for AI agents." --- diff --git a/docs/sandbox/observability.mdx b/docs/sandbox/observability.mdx index 231f0ac4..8149927d 100644 --- a/docs/sandbox/observability.mdx +++ b/docs/sandbox/observability.mdx @@ -1,6 +1,7 @@ --- title: "Observability" sidebarTitle: Observability +icon: "chart-line" description: "Monitor E2B sandboxes with resource metrics, lifecycle event history, and real-time webhooks." --- diff --git a/docs/sandbox/persistence.mdx b/docs/sandbox/persistence.mdx index d81fdb0e..88214191 100644 --- a/docs/sandbox/persistence.mdx +++ b/docs/sandbox/persistence.mdx @@ -1,6 +1,7 @@ --- title: "Pause, resume & snapshots" sidebarTitle: "Pause, resume & snapshots" +icon: "pause" description: "E2B sandboxes are persistent — pause and resume preserves filesystem and memory state. Use snapshots to checkpoint and fork sandbox state." --- diff --git a/docs/sandbox/proxy-tunneling.mdx b/docs/sandbox/proxy-tunneling.mdx index 82927194..21cb18ca 100644 --- a/docs/sandbox/proxy-tunneling.mdx +++ b/docs/sandbox/proxy-tunneling.mdx @@ -1,5 +1,6 @@ --- title: "Proxy tunneling" +icon: "route" description: "How to tunnel Sandbox network traffic through a proxy server" --- diff --git a/docs/sandbox/runtime-customization.mdx b/docs/sandbox/runtime-customization.mdx index 8c580acb..f7c9ef9a 100644 --- a/docs/sandbox/runtime-customization.mdx +++ b/docs/sandbox/runtime-customization.mdx @@ -1,6 +1,7 @@ --- title: "Customizing sandboxes at runtime" sidebarTitle: "Runtime customization" +icon: "wrench" description: "Install packages, upload files, and configure E2B sandboxes at runtime when you can't bake everything into a template." --- diff --git a/docs/sandbox/security.mdx b/docs/sandbox/security.mdx index b2dd05a5..9976aa06 100644 --- a/docs/sandbox/security.mdx +++ b/docs/sandbox/security.mdx @@ -1,6 +1,7 @@ --- title: "Security" sidebarTitle: Security +icon: "shield" description: "Learn about E2B sandbox security including authenticated access, network controls, and rate limits." --- From a7771763617f2fe276002d2d254778aaef6925a5 Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Wed, 18 Mar 2026 23:28:16 +0100 Subject: [PATCH 11/22] docs: use detailed state diagram on overview and clarify auto-resume requires opt-in --- docs/sandbox.mdx | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/docs/sandbox.mdx b/docs/sandbox.mdx index 42ec305d..20d4b7de 100644 --- a/docs/sandbox.mdx +++ b/docs/sandbox.mdx @@ -8,12 +8,18 @@ description: "E2B sandboxes are persistent, secure cloud environments for AI age An E2B sandbox is a persistent, secure cloud environment where your AI agents execute code, run commands, manage files, and access the internet. Every sandbox supports **pause and resume** — when a sandbox times out, it can automatically pause and preserve its full state (filesystem and memory), then resume exactly where it left off when activity arrives. ```mermaid actions={false} -flowchart LR - start(( )) -->|create| Running - Running -->|timeout or pause| Paused - Paused -->|connect or auto‑resume| Running - Running -->|snapshot| Snapshotting - Snapshotting -->|complete| Running +flowchart TD + start(( )) -->|Sandbox.create| Running + + Running["Running
• Active execution
• Consumes resources"] + Paused["Paused
• Preserves memory and files
• Cannot execute code"] + Snapshotting["Snapshotting
• Creates persistent snapshot
• Briefly pauses execution"] + Killed["Killed
• Resources released
• Cannot be resumed"] + + Running -->|pause| Paused + Running -->|createSnapshot| Snapshotting + Paused -->|connect| Running + Snapshotting -->|snapshot complete| Running Running -->|kill| Killed Paused -->|kill| Killed ``` @@ -21,7 +27,7 @@ flowchart LR ## Key characteristics - **Persistent by default** — configure `onTimeout: 'pause'` and sandboxes preserve their full state (filesystem + memory) indefinitely. Resume at any time. -- **Auto-resume** — paused sandboxes wake automatically when SDK calls or HTTP traffic arrive. No manual state management needed. +- **Auto-resume** — when [enabled](/docs/sandbox/persistence#auto-pause-and-auto-resume), paused sandboxes wake automatically when SDK calls or HTTP traffic arrive. No manual state management needed. - **Snapshots** — capture a running sandbox's state and spawn multiple new sandboxes from it. Useful for checkpointing, forking, and rollback. - **Configurable timeouts** — sandboxes run for up to 24 hours (Pro) or 1 hour (Base) continuously. Pausing resets the runtime window. - **Isolated and secure** — each sandbox runs in its own microVM with network controls, access tokens, and rate limits. From 69240ccb7f56e493f5c8f7fbe23bbdfe1f6fe6fb Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Mon, 30 Mar 2026 11:36:24 +0200 Subject: [PATCH 12/22] docs: move Filesystem, Volumes, Commands, and MCP gateway into Sandbox section --- docs.json | 86 +++++++++++++++++++++++++++---------------------------- 1 file changed, 43 insertions(+), 43 deletions(-) diff --git a/docs.json b/docs.json index d97660f4..dc1105fc 100644 --- a/docs.json +++ b/docs.json @@ -80,7 +80,49 @@ "docs/sandbox/runtime-customization", "docs/sandbox/git-integration", "docs/sandbox/proxy-tunneling", - "docs/sandbox/custom-domain" + "docs/sandbox/custom-domain", + { + "group": "Filesystem", + "pages": [ + "docs/filesystem", + "docs/filesystem/read-write", + "docs/filesystem/info", + "docs/filesystem/watch", + "docs/filesystem/upload", + "docs/filesystem/download" + ] + }, + { + "group": "Volumes", + "pages": [ + "docs/volumes", + "docs/volumes/manage", + "docs/volumes/mount", + "docs/volumes/read-write", + "docs/volumes/info", + "docs/volumes/upload", + "docs/volumes/download" + ] + }, + { + "group": "Commands", + "pages": [ + "docs/commands", + "docs/commands/streaming", + "docs/commands/background" + ] + }, + { + "group": "MCP gateway", + "pages": [ + "docs/mcp", + "docs/mcp/quickstart", + "docs/mcp/available-servers", + "docs/mcp/custom-templates", + "docs/mcp/custom-servers", + "docs/mcp/examples" + ] + } ] }, { @@ -113,48 +155,6 @@ "docs/template/migration-v2" ] }, - { - "group": "Filesystem", - "pages": [ - "docs/filesystem", - "docs/filesystem/read-write", - "docs/filesystem/info", - "docs/filesystem/watch", - "docs/filesystem/upload", - "docs/filesystem/download" - ] - }, - { - "group": "Volumes", - "pages": [ - "docs/volumes", - "docs/volumes/manage", - "docs/volumes/mount", - "docs/volumes/read-write", - "docs/volumes/info", - "docs/volumes/upload", - "docs/volumes/download" - ] - }, - { - "group": "Commands", - "pages": [ - "docs/commands", - "docs/commands/streaming", - "docs/commands/background" - ] - }, - { - "group": "MCP gateway", - "pages": [ - "docs/mcp", - "docs/mcp/quickstart", - "docs/mcp/available-servers", - "docs/mcp/custom-templates", - "docs/mcp/custom-servers", - "docs/mcp/examples" - ] - }, { "group": "Code interpreting", "pages": [ From ae2dccd73cae1f594d0a96abd096e24d6af9e435 Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Mon, 30 Mar 2026 11:52:45 +0200 Subject: [PATCH 13/22] docs: consolidate Filesystem, Volumes, Commands, and MCP gateway into Sandbox section - Create docs/sandbox/filesystem.mdx (merged 6 pages) - Create docs/sandbox/volumes.mdx (merged 6 pages) - Create docs/sandbox/mcp.mdx (merged 5 pages, available-servers kept separate) - Merge commands.run/streaming/background into existing sandbox/commands.mdx - Add redirects for all old URLs - Update cross-references in security, claude-code pages - Add new cards to sandbox overview --- docs.json | 156 +- docs/agents/claude-code.mdx | 4 +- docs/sandbox.mdx | 11 +- docs/sandbox/commands.mdx | 95 +- docs/sandbox/filesystem.mdx | 517 ++++ docs/sandbox/mcp.mdx | 514 ++++ docs/sandbox/mcp/available-servers.mdx | 3547 ++++++++++++++++++++++++ docs/sandbox/security.mdx | 2 +- docs/sandbox/volumes.mdx | 583 ++++ 9 files changed, 5382 insertions(+), 47 deletions(-) create mode 100644 docs/sandbox/filesystem.mdx create mode 100644 docs/sandbox/mcp.mdx create mode 100644 docs/sandbox/mcp/available-servers.mdx create mode 100644 docs/sandbox/volumes.mdx diff --git a/docs.json b/docs.json index dc1105fc..c6473f0d 100644 --- a/docs.json +++ b/docs.json @@ -81,48 +81,10 @@ "docs/sandbox/git-integration", "docs/sandbox/proxy-tunneling", "docs/sandbox/custom-domain", - { - "group": "Filesystem", - "pages": [ - "docs/filesystem", - "docs/filesystem/read-write", - "docs/filesystem/info", - "docs/filesystem/watch", - "docs/filesystem/upload", - "docs/filesystem/download" - ] - }, - { - "group": "Volumes", - "pages": [ - "docs/volumes", - "docs/volumes/manage", - "docs/volumes/mount", - "docs/volumes/read-write", - "docs/volumes/info", - "docs/volumes/upload", - "docs/volumes/download" - ] - }, - { - "group": "Commands", - "pages": [ - "docs/commands", - "docs/commands/streaming", - "docs/commands/background" - ] - }, - { - "group": "MCP gateway", - "pages": [ - "docs/mcp", - "docs/mcp/quickstart", - "docs/mcp/available-servers", - "docs/mcp/custom-templates", - "docs/mcp/custom-servers", - "docs/mcp/examples" - ] - } + "docs/sandbox/filesystem", + "docs/sandbox/volumes", + "docs/sandbox/mcp", + "docs/sandbox/mcp/available-servers" ] }, { @@ -3703,6 +3665,116 @@ } }, "redirects": [ + { + "source": "/docs/filesystem", + "destination": "/docs/sandbox/filesystem", + "permanent": true + }, + { + "source": "/docs/filesystem/read-write", + "destination": "/docs/sandbox/filesystem", + "permanent": true + }, + { + "source": "/docs/filesystem/info", + "destination": "/docs/sandbox/filesystem", + "permanent": true + }, + { + "source": "/docs/filesystem/watch", + "destination": "/docs/sandbox/filesystem", + "permanent": true + }, + { + "source": "/docs/filesystem/upload", + "destination": "/docs/sandbox/filesystem", + "permanent": true + }, + { + "source": "/docs/filesystem/download", + "destination": "/docs/sandbox/filesystem", + "permanent": true + }, + { + "source": "/docs/volumes", + "destination": "/docs/sandbox/volumes", + "permanent": true + }, + { + "source": "/docs/volumes/manage", + "destination": "/docs/sandbox/volumes", + "permanent": true + }, + { + "source": "/docs/volumes/mount", + "destination": "/docs/sandbox/volumes", + "permanent": true + }, + { + "source": "/docs/volumes/read-write", + "destination": "/docs/sandbox/volumes", + "permanent": true + }, + { + "source": "/docs/volumes/info", + "destination": "/docs/sandbox/volumes", + "permanent": true + }, + { + "source": "/docs/volumes/upload", + "destination": "/docs/sandbox/volumes", + "permanent": true + }, + { + "source": "/docs/volumes/download", + "destination": "/docs/sandbox/volumes", + "permanent": true + }, + { + "source": "/docs/commands", + "destination": "/docs/sandbox/commands", + "permanent": true + }, + { + "source": "/docs/commands/streaming", + "destination": "/docs/sandbox/commands", + "permanent": true + }, + { + "source": "/docs/commands/background", + "destination": "/docs/sandbox/commands", + "permanent": true + }, + { + "source": "/docs/mcp", + "destination": "/docs/sandbox/mcp", + "permanent": true + }, + { + "source": "/docs/mcp/quickstart", + "destination": "/docs/sandbox/mcp", + "permanent": true + }, + { + "source": "/docs/mcp/available-servers", + "destination": "/docs/sandbox/mcp/available-servers", + "permanent": true + }, + { + "source": "/docs/mcp/custom-templates", + "destination": "/docs/sandbox/mcp", + "permanent": true + }, + { + "source": "/docs/mcp/custom-servers", + "destination": "/docs/sandbox/mcp", + "permanent": true + }, + { + "source": "/docs/mcp/examples", + "destination": "/docs/sandbox/mcp", + "permanent": true + }, { "source": "/docs/template/aliases", "destination": "/docs/template/names", diff --git a/docs/agents/claude-code.mdx b/docs/agents/claude-code.mdx index 68c3423a..cc228b7b 100644 --- a/docs/agents/claude-code.mdx +++ b/docs/agents/claude-code.mdx @@ -326,7 +326,7 @@ sandbox.kill() ## Connect MCP tools -Claude Code has built-in support for [MCP](https://modelcontextprotocol.io/). E2B provides an [MCP gateway](/docs/mcp) that gives Claude access to 200+ tools from the [Docker MCP Catalog](https://hub.docker.com/mcp). +Claude Code has built-in support for [MCP](https://modelcontextprotocol.io/). E2B provides an [MCP gateway](/docs/sandbox/mcp) that gives Claude access to 200+ tools from the [Docker MCP Catalog](https://hub.docker.com/mcp). ```typescript JavaScript & TypeScript @@ -447,7 +447,7 @@ python build.py ## Related guides - + Connect Claude Code to 200+ MCP tools diff --git a/docs/sandbox.mdx b/docs/sandbox.mdx index 20d4b7de..699443ed 100644 --- a/docs/sandbox.mdx +++ b/docs/sandbox.mdx @@ -77,7 +77,16 @@ print(result.stdout) Pause/resume, auto-pause, auto-resume, snapshots, and state transitions. - Interactive terminal (PTY) and SSH access. + Run commands, stream output, interactive terminal (PTY), and SSH access. + + + Read, write, upload, download files and watch for changes. + + + Persistent storage that survives sandbox lifecycle. + + + Connect to 200+ tools through the Model Context Protocol. Environment variables and storage bucket integration. diff --git a/docs/sandbox/commands.mdx b/docs/sandbox/commands.mdx index 84e20b2b..ea3cfff2 100644 --- a/docs/sandbox/commands.mdx +++ b/docs/sandbox/commands.mdx @@ -5,7 +5,100 @@ icon: "terminal" description: "Run commands, use interactive terminals, and connect via SSH in E2B sandboxes." --- -E2B sandboxes provide multiple ways to run commands and interact with the terminal: the PTY module for interactive terminal sessions, and SSH access for remote connectivity. +E2B sandboxes provide multiple ways to run commands and interact with the terminal: `commands.run()` for executing commands, the PTY module for interactive terminal sessions, and SSH access for remote connectivity. + +## Running commands + +Use `commands.run()` to execute terminal commands inside the sandbox. + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() +const result = await sandbox.commands.run('ls -l') +console.log(result) +``` +```python Python +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() +result = sandbox.commands.run('ls -l') +print(result) +``` + + +### Streaming output + +To stream command output as it is being executed, pass `onStdout`/`onStderr` callbacks (JavaScript) or `on_stdout`/`on_stderr` callbacks (Python). + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() + +const result = await sandbox.commands.run('echo hello; sleep 1; echo world', { + onStdout: (data) => { + console.log(data) + }, + onStderr: (data) => { + console.log(data) + }, +}) +console.log(result) +``` +```python Python +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() + +result = sandbox.commands.run('echo hello; sleep 1; echo world', on_stdout=lambda data: print(data), on_stderr=lambda data: print(data)) +print(result) +``` + + +### Running commands in background + +To run commands in background, pass the `background` option. This returns immediately and the command continues running in the sandbox. You can later kill the command using `commands.kill()`. + + +```js JavaScript & TypeScript highlight={7} +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() + +// Start the command in the background +const command = await sandbox.commands.run('echo hello; sleep 10; echo world', { + background: true, + onStdout: (data) => { + console.log(data) + }, +}) + +// Kill the command +await command.kill() +``` +```python Python highlight={6} +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() + +# Start the command in the background +command = sandbox.commands.run('echo hello; sleep 10; echo world', background=True) + +# Get stdout and stderr from the command running in the background. +# You can run this code in a separate thread or use command.wait() to wait for the command to finish. +for stdout, stderr, _ in command: + if stdout: + print(stdout) + if stderr: + print(stderr) + +# Kill the command +command.kill() +``` + ## Interactive terminal (PTY) diff --git a/docs/sandbox/filesystem.mdx b/docs/sandbox/filesystem.mdx new file mode 100644 index 00000000..d48c8988 --- /dev/null +++ b/docs/sandbox/filesystem.mdx @@ -0,0 +1,517 @@ +--- +title: "Filesystem" +sidebarTitle: Filesystem +icon: "folder" +description: "Read, write, upload, and download files in E2B sandboxes. Each sandbox has an isolated filesystem with up to 20 GB of storage." +--- + +Each E2B Sandbox has its own isolated filesystem. The [Hobby tier](https://e2b.dev/pricing) sandboxes come with 10 GB of the free disk space and [Pro tier](https://e2b.dev/pricing) sandboxes come with 20 GB. + +With E2B SDK you can read and write files, get file and directory metadata, watch directories for changes, upload data to the sandbox, and download data from the sandbox. + +## Read and write files + +### Reading files + +You can read files from the sandbox filesystem using the `files.read()` method. + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() +const fileContent = await sandbox.files.read('/path/to/file') +``` +```python Python +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() +file_content = sandbox.files.read('/path/to/file') +``` + + +### Writing single files + +You can write single files to the sandbox filesystem using the `files.write()` method. + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() + +await sandbox.files.write('/path/to/file', 'file content') +``` +```python Python +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() + +sandbox.files.write('/path/to/file', 'file content') +``` + + +### Writing multiple files + +You can also write multiple files to the sandbox. + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() + +await sandbox.files.write([ + { path: '/path/to/a', data: 'file content' }, + { path: '/another/path/to/b', data: 'file content' } +]) +``` +```python Python +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() + +sandbox.files.write_files([ + { "path": "/path/to/a", "data": "file content" }, + { "path": "another/path/to/b", "data": "file content" } +]) +``` + + +## File and directory metadata + +You can get information about a file or directory using the `files.getInfo()` / `files.get_info()` methods. Information such as file name, type, and path is returned. + +### File metadata + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() + +// Create a new file +await sandbox.files.write('test_file.txt', 'Hello, world!') + +// Get information about the file +const info = await sandbox.files.getInfo('test_file.txt') + +console.log(info) +// { +// name: 'test_file.txt', +// type: 'file', +// path: '/home/user/test_file.txt', +// size: 13, +// mode: 0o644, +// permissions: '-rw-r--r--', +// owner: 'user', +// group: 'user', +// modifiedTime: '2025-05-26T12:00:00.000Z', +// symlinkTarget: null +// } +``` +```python Python +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() + +# Create a new file +sandbox.files.write('test_file', 'Hello, world!') + +# Get information about the file +info = sandbox.files.get_info('test_file') + +print(info) +# EntryInfo( +# name='test_file.txt', +# type=, +# path='/home/user/test_file.txt', +# size=13, +# mode=0o644, +# permissions='-rw-r--r--', +# owner='user', +# group='user', +# modified_time='2025-05-26T12:00:00.000Z', +# symlink_target=None +# ) +``` + + +### Directory metadata + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() + +// Create a new directory +await sandbox.files.makeDir('test_dir') + +// Get information about the directory +const info = await sandbox.files.getInfo('test_dir') + +console.log(info) +// { +// name: 'test_dir', +// type: 'dir', +// path: '/home/user/test_dir', +// size: 0, +// mode: 0o755, +// permissions: 'drwxr-xr-x', +// owner: 'user', +// group: 'user', +// modifiedTime: '2025-05-26T12:00:00.000Z', +// symlinkTarget: null +// } +``` +```python Python +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() + +# Create a new directory +sandbox.files.make_dir('test_dir') + +# Get information about the directory +info = sandbox.files.get_info('test_dir') + +print(info) +# EntryInfo( +# name='test_dir', +# type=, +# path='/home/user/test_dir', +# size=0, +# mode=0o755, +# permissions='drwxr-xr-x', +# owner='user', +# group='user', +# modified_time='2025-05-26T12:00:00.000Z', +# symlink_target=None +# ) +``` + + +## Watch directory for changes + +You can watch a directory for changes using the `files.watchDir()` method in JavaScript and `files.watch_dir()` method in Python. + + +Since events are tracked asynchronously, their delivery may be delayed. +It's recommended not to collect or close watcher immediately after making a change. + + + +```js JavaScript & TypeScript highlight={7-12} +import { Sandbox, FilesystemEventType } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() +const dirname = '/home/user' + +// Start watching directory for changes +const handle = await sandbox.files.watchDir(dirname, async (event) => { + console.log(event) + if (event.type === FilesystemEventType.WRITE) { + console.log(`wrote to file ${event.name}`) + } +}) + +// Trigger file write event +await sandbox.files.write(`${dirname}/my-file`, 'hello') +``` +```python Python highlight={7,12-16} +from e2b_code_interpreter import Sandbox, FilesystemEventType + +sandbox = Sandbox.create() +dirname = '/home/user' + +# Watch directory for changes +handle = sandbox.files.watch_dir(dirname) +# Trigger file write event +sandbox.files.write(f"{dirname}/my-file", "hello") + +# Retrieve the latest new events since the last `get_new_events()` call +events = handle.get_new_events() +for event in events: + print(event) + if event.type == FilesystemEventType.WRITE: + print(f"wrote to file {event.name}") +``` + + +### Recursive watching + +You can enable recursive watching using the parameter `recursive`. + + +When rapidly creating new folders (e.g., deeply nested path of folders), events other than `CREATE` might not be emitted. To avoid this behavior, create the required folder structure in advance. + + + +```js JavaScript & TypeScript highlight={13,17} +import { Sandbox, FilesystemEventType } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() +const dirname = '/home/user' + +// Start watching directory for changes +const handle = await sandbox.files.watchDir(dirname, async (event) => { + console.log(event) + if (event.type === FilesystemEventType.WRITE) { + console.log(`wrote to file ${event.name}`) + } +}, { + recursive: true +}) + +// Trigger file write event +await sandbox.files.write(`${dirname}/my-folder/my-file`, 'hello') +``` +```python Python highlight={7,9} +from e2b_code_interpreter import Sandbox, FilesystemEventType + +sandbox = Sandbox.create() +dirname = '/home/user' + +# Watch directory for changes +handle = sandbox.files.watch_dir(dirname, recursive=True) +# Trigger file write event +sandbox.files.write(f"{dirname}/my-folder/my-file", "hello") + +# Retrieve the latest new events since the last `get_new_events()` call +events = handle.get_new_events() +for event in events: + print(event) + if event.type == FilesystemEventType.WRITE: + print(f"wrote to file {event.name}") +``` + + +## Upload data + +You can upload data to the sandbox using the `files.write()` method. + +### Upload single file + + +```js JavaScript & TypeScript +import fs from 'fs' +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() + +// Read file from local filesystem +const content = fs.readFileSync('/local/path') +// Upload file to sandbox +await sandbox.files.write('/path/in/sandbox', content) +``` +```python Python +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() + +# Read file from local filesystem +with open("path/to/local/file", "rb") as file: + # Upload file to sandbox + sandbox.files.write("/path/in/sandbox", file) +``` + + +### Upload with pre-signed URL + +Sometimes, you may want to let users from unauthorized environments, like a browser, upload files to the sandbox. +For this use case, you can use pre-signed URLs to let users upload files securely. + +All you need to do is create a sandbox with the `secure: true` option. An upload URL will then be generated with a signature that allows only authorized users to upload files. +You can optionally set an expiration time for the URL so that it will be valid only for a limited time. + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +// Start a secured sandbox (all operations must be authorized by default) +const sandbox = await Sandbox.create(template, { secure: true }) + +// Create a pre-signed URL for file upload with a 10 second expiration +const publicUploadUrl = await sandbox.uploadUrl( + 'demo.txt', { + useSignatureExpiration: 10_000, // optional + }, +) + +// Upload a file with a pre-signed URL (this can be used in any environment, such as a browser) +const form = new FormData() +form.append('file', 'file content') + +await fetch(publicUploadUrl, { method: 'POST', body: form }) + +// File is now available in the sandbox and you can read it +const content = sandbox.files.read('/path/in/sandbox') +``` +```python Python +from e2b import Sandbox +import requests + +# Start a secured sandbox (all operations must be authorized by default) +sandbox = Sandbox.create(timeout=12_000, secure=True) + +# Create a pre-signed URL for file upload with a 10 second expiration +signed_url = sandbox.upload_url(path="demo.txt", user="user", use_signature_expiration=10_000) + +form_data = {"file":"file content"} +requests.post(signed_url, data=form_data) + +# File is now available in the sandbox and you can read it +content = sandbox.files.read('/path/in/sandbox') +``` + + +### Upload directory / multiple files + + +```js JavaScript & TypeScript +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() + +// Read all files in the directory and store their paths and contents in an array +const readDirectoryFiles = (directoryPath) => { + // Read all files in the local directory + const files = fs.readdirSync(directoryPath); + + // Map files to objects with path and data + const filesArray = files + .filter(file => { + const fullPath = path.join(directoryPath, file); + // Skip if it's a directory + return fs.statSync(fullPath).isFile(); + }) + .map(file => { + const filePath = path.join(directoryPath, file); + + // Read the content of each file + return { + path: filePath, + data: fs.readFileSync(filePath, 'utf8') + }; + }); + + return filesArray; +}; + +// Usage example +const files = readDirectoryFiles('/local/dir'); +console.log(files); +// [ +// { path: '/local/dir/file1.txt', data: 'File 1 contents...' }, +// { path: '/local/dir/file2.txt', data: 'File 2 contents...' }, +// ... +// ] + +await sandbox.files.write(files) +``` +```python Python +import os +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() + +def read_directory_files(directory_path): + files = [] + + # Iterate through all files in the directory + for filename in os.listdir(directory_path): + file_path = os.path.join(directory_path, filename) + + # Skip if it's a directory + if os.path.isfile(file_path): + # Read file contents in binary mode + with open(file_path, "rb") as file: + files.append({ + 'path': file_path, + 'data': file.read() + }) + + return files + +files = read_directory_files("/local/dir") +print(files) +# [ +# {"path": "/local/dir/file1.txt", "data": "File 1 contents..." }, +# { "path": "/local/dir/file2.txt", "data": "File 2 contents..." }, +# ... +# ] + +sandbox.files.write_files(files) +``` + + +## Download data + +You can download data from the sandbox using the `files.read()` method. + +### Download single file + + +```js JavaScript & TypeScript +import fs from 'fs' +import { Sandbox } from '@e2b/code-interpreter' + +const sandbox = await Sandbox.create() + +// Read file from sandbox +const content = await sandbox.files.read('/path/in/sandbox') +// Write file to local filesystem +fs.writeFileSync('/local/path', content) +``` +```python Python +from e2b_code_interpreter import Sandbox + +sandbox = Sandbox.create() + +# Read file from sandbox +content = sandbox.files.read('/path/in/sandbox') +# Write file to local filesystem +with open('/local/path', 'w') as file: + file.write(content) +``` + + +### Download with pre-signed URL + +Sometimes, you may want to let users from unauthorized environments, like a browser, download files from the sandbox. +For this use case, you can use pre-signed URLs to let users download files securely. + +All you need to do is create a sandbox with the `secure: true` option. A download URL will then be generated with a signature that allows only authorized users to access files. +You can optionally set an expiration time for the URL so that it will be valid only for a limited time. + + +```js JavaScript & TypeScript +import fs from 'fs' +import { Sandbox } from '@e2b/code-interpreter' + +// Start a secured sandbox (all operations must be authorized by default) +const sandbox = await Sandbox.create(template, { secure: true }) + +// Create a pre-signed URL for file download with a 10 second expiration +const publicUrl = await sandbox.downloadUrl( + 'demo.txt', { + useSignatureExpiration: 10_000, // optional + }, +) + +// Download a file with a pre-signed URL (this can be used in any environment, such as a browser) +const res = await fetch(publicUrl) +const content = await res.text() +``` +```python Python +from e2b import Sandbox + +# Start a secured sandbox (all operations must be authorized by default) +sandbox = Sandbox.create(timeout=12_000, secure=True) + +# Create a pre-signed URL for file download with a 10 second expiration +# The user only has to visit the URL to download the file, this also works in a browser. +signed_url = sbx.download_url(path="demo.txt", user="user", use_signature_expiration=10_000) +``` + diff --git a/docs/sandbox/mcp.mdx b/docs/sandbox/mcp.mdx new file mode 100644 index 00000000..d87a1c70 --- /dev/null +++ b/docs/sandbox/mcp.mdx @@ -0,0 +1,514 @@ +--- +title: "MCP gateway" +sidebarTitle: MCP gateway +icon: "plug" +description: "Connect E2B sandboxes to 200+ MCP tools — Browserbase, Exa, Notion, Stripe, GitHub, and more — through a built-in MCP gateway." +--- + +E2B provides a batteries-included MCP gateway that runs inside sandboxes, giving you type-safe access to 200+ MCP tools from the [Docker MCP Catalog](https://hub.docker.com/mcp) or [custom MCP servers](#custom-mcp-servers) through a unified interface. This integration gives developers instant access to tools like [Browserbase](https://www.browserbase.com/), [Exa](https://exa.ai/), [Notion](https://www.notion.so/), [Stripe](https://stripe.com/), or [GitHub](https://github.com/). + +The [Model Context Protocol (MCP)](https://modelcontextprotocol.io/docs/getting-started/intro) is an open standard for connecting AI models to external tools and data sources. E2B sandboxes provide an ideal environment for running MCP tools, giving AI full access to an internet-connected Linux machine where it can safely install packages, write files, run terminal commands, and AI-generated code. + + + + + +## Quickstart + +Create a sandbox with MCP servers configured and connect it to an AI agent. This example sets up Browserbase, Exa, and Airtable MCP servers and uses Claude to orchestrate them. + + + +```typescript TypeScript +import Sandbox from 'e2b' + +const sbx = await Sandbox.create({ + mcp: { + browserbase: { + apiKey: process.env.BROWSERBASE_API_KEY!, + geminiApiKey: process.env.GEMINI_API_KEY!, + projectId: process.env.BROWSERBASE_PROJECT_ID!, + }, + exa: { + apiKey: process.env.EXA_API_KEY!, + }, + airtable: { + airtableApiKey: process.env.AIRTABLE_API_KEY!, + }, + }, +}); + +const mcpUrl = sbx.getMcpUrl(); +const mcpToken = await sbx.getMcpToken(); + +// You can now connect the gateway to any MCP client, for example claude: +// This also works for your local claude! +await sbx.commands.run(`claude mcp add --transport http e2b-mcp-gateway ${mcpUrl} --header "Authorization: Bearer ${mcpToken}"`, { timeoutMs: 0, onStdout: console.log, onStderr: console.log }); + +await sbx.commands.run( + `echo 'Use browserbase and exa to research open positions at e2b.dev. Collect your findings in Airtable.' | claude -p --dangerously-skip-permissions`, + { timeoutMs: 0, onStdout: console.log, onStderr: console.log } +) + +``` + +```python Python +import asyncio +from e2b import AsyncSandbox +import os +import dotenv + +dotenv.load_dotenv() + +async def main(): + sbx = await AsyncSandbox.create(mcp={ + "browserbase": { + "apiKey": os.getenv("BROWSERBASE_API_KEY"), + "geminiApiKey": os.getenv("GEMINI_API_KEY"), + "projectId": os.getenv("BROWSERBASE_PROJECT_ID"), + }, + "exa": { + "apiKey": os.getenv("EXA_API_KEY"), + }, + "airtable": { + "airtableApiKey": os.getenv("AIRTABLE_API_KEY"), + }, + }) + + mcp_url = sbx.get_mcp_url() + mcp_token = await sbx.get_mcp_token() + + # You can now connect the gateway to any MCP client, for example claude: + # This also works for your local claude! + await sbx.commands.run(f'claude mcp add --transport http e2b-mcp-gateway {mcp_url} --header "Authorization: Bearer {mcp_token}"', timeout=0, on_stdout=print, on_stderr=print) + + await sbx.commands.run( + "echo 'Use browserbase and exa to research open positions at e2b.dev. Collect your findings in Airtable.' | claude -p --dangerously-skip-permissions", + timeout=0, on_stdout=print, on_stderr=print + ) + +if __name__ == "__main__": + asyncio.run(main()) +``` + + + +## Connecting to the MCP gateway + +You can connect to the MCPs running inside the sandbox both from outside and inside the sandbox. + +### From outside the sandbox + +To connect to the MCPs running inside the sandbox, use the `sandbox.getMcpUrl()` in JavaScript and `sandbox.get_mcp_url()` in Python. + + +```typescript TypeScript +import Sandbox from 'e2b' +import { Client } from '@modelcontextprotocol/sdk/client/index.js'; +import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; + +const sandbox = await Sandbox.create({ + mcp: { + browserbase: { + apiKey: process.env.BROWSERBASE_API_KEY!, + geminiApiKey: process.env.GEMINI_API_KEY!, + projectId: process.env.BROWSERBASE_PROJECT_ID!, + }, + exa: { + apiKey: process.env.EXA_API_KEY!, + }, + notion: { + internalIntegrationToken: process.env.NOTION_API_KEY!, + }, + }, +}); + +const client = new Client({ + name: 'e2b-mcp-client', + version: '1.0.0' +}); + +const transport = new StreamableHTTPClientTransport( + new URL(sandbox.getMcpUrl()), + { + requestInit: { + headers: { + 'Authorization': `Bearer ${await sandbox.getMcpToken()}` + } + } + } +); + +await client.connect(transport); + +const tools = await client.listTools(); +console.log('Available tools:', tools.tools.map(t => t.name)); + +await client.close(); +await sandbox.kill(); +``` + +```python Python +import os +import dotenv +dotenv.load_dotenv() +from e2b import AsyncSandbox +import asyncio +from datetime import timedelta +from mcp.client.session import ClientSession +from mcp.client.streamable_http import streamablehttp_client + +async def main(): + sandbox = await AsyncSandbox.create( + mcp={ + "browserbase": { + "apiKey": os.environ["BROWSERBASE_API_KEY"], + "geminiApiKey": os.environ["GEMINI_API_KEY"], + "projectId": os.environ["BROWSERBASE_PROJECT_ID"], + }, + "exa": { + "apiKey": os.environ["EXA_API_KEY"], + }, + "notion": { + "internalIntegrationToken": os.environ["NOTION_API_KEY"], + }, + } + ) + + async with streamablehttp_client( + url=sandbox.get_mcp_url(), + headers={"Authorization": f"Bearer {await sandbox.get_mcp_token()}"}, + timeout=timedelta(seconds=600) + ) as (read_stream, write_stream, _): + async with ClientSession(read_stream, write_stream) as session: + await session.initialize() + tools = await session.list_tools() + print(f"Available tools: {[tool.name for tool in tools.tools]}") + await sandbox.kill() + +if __name__ == "__main__": + asyncio.run(main()) +``` + + +### From inside the sandbox + +If you need to access the MCP gateway from within the sandbox itself, it's available at: +``` +http://localhost:50005/mcp +``` + +You'll need to include the Authorization header with the MCP token when making requests from inside the sandbox. How that is added depends on the MCP client you use. + +## MCP client integrations + +### Claude + +``` +claude mcp add --transport http e2b-mcp-gateway --header "Authorization: Bearer " +``` + +### OpenAI Agents + + +```typescript TypeScript +import { MCPServerStreamableHttp } from '@openai/agents'; + +const mcp = new MCPServerStreamableHttp({ + url: mcpUrl, + name: 'E2B MCP Gateway', + requestInit: { + headers: { + 'Authorization': `Bearer ${await sandbox.getMcpToken()}` + } + }, +}); +``` + +```python Python +import asyncio +import os +from e2b import AsyncSandbox +from agents.mcp import MCPServerStreamableHttp + +async def main(): + async with MCPServerStreamableHttp( + name="e2b-mcp-client", + params={ + "url": sandbox.get_mcp_url(), + "headers": {"Authorization": f"Bearer {await sandbox.get_mcp_token()}"}, + }, + ) as server: + tools = await server.list_tools() + print("Available tools:", [t.name for t in tools]) + + # Clean up + await sandbox.kill() + +asyncio.run(main()) +``` + + +### Official MCP client + + +```typescript TypeScript +import { Client } from '@modelcontextprotocol/sdk/client/index.js'; +import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; + +const client = new Client({ + name: 'e2b-mcp-client', + version: '1.0.0' +}); + +const transport = new StreamableHTTPClientTransport( + new URL(sandbox.getMcpUrl()), + { + requestInit: { + headers: { + 'Authorization': `Bearer ${await sandbox.getMcpToken()}` + } + } + } +); +await client.connect(transport); +``` +```python Python +import asyncio +from datetime import timedelta +from mcp.client.session import ClientSession +from mcp.client.streamable_http import streamablehttp_client + +async def main(): + async with streamablehttp_client( + url=sandbox.get_mcp_url(), + headers={"Authorization": f"Bearer {await sandbox.get_mcp_token()}"}, + timeout=timedelta(seconds=600) + ) as (read_stream, write_stream, _): + async with ClientSession(read_stream, write_stream) as session: + await session.initialize() + + tools = await session.list_tools() + print(f"Available tools: {[tool.name for tool in tools.tools]}") + await sandbox.kill() + +if __name__ == "__main__": + asyncio.run(main()) + +``` + + + +This list is not exhaustive. You can find more examples in the [E2B Cookbook](https://github.com/e2b-dev/e2b-cookbook). + +## Custom templates + +You can prepull MCP server Docker images during template build time to significantly improve runtime performance. + +When you build a template with prepulled MCP servers, the Docker images for those servers are downloaded and cached during the build process. This means when you create a sandbox from that template, the MCP servers are ready to use immediately without waiting for image downloads. + + +You must use the MCP gateway enabled template (`mcp-gateway`) as your base template to use this feature. + + +### Building a template with MCP servers + +Use the `addMcpServer()` method (TypeScript) or `add_mcp_server()` method (Python) to prepull MCP server images during template build. You can pass a single server or an array of servers. + +The server names (like `"browserbase"` and `"exa"`) correspond to the keys defined in the [Available Servers](/docs/sandbox/mcp/available-servers) documentation. + + + +```typescript JavaScript & TypeScript +import "dotenv/config"; +import { Template, defaultBuildLogger } from 'e2b'; + +export const template = Template() + .fromTemplate("mcp-gateway") + .addMcpServer(["browserbase", "exa"]); + +await Template.build(template, 'my-mcp-gateway', { + cpuCount: 8, + memoryMB: 8192, + onBuildLogs: defaultBuildLogger(), +}); +``` + +```python Python +from dotenv import load_dotenv +from e2b import Template, default_build_logger + +load_dotenv() + +template = ( + Template() + .from_template("mcp-gateway") + .add_mcp_server(["browserbase", "exa"]) +) + +Template.build( + template, + 'my-mcp-gateway', + cpu_count=8, + memory_mb=8192, + on_build_logs=default_build_logger(), +) +``` + + + +### Using the template + +Once built, create sandboxes from your template. You still need to provide the configuration for each MCP server. + + + +```typescript JavaScript & TypeScript +import { Sandbox } from 'e2b'; + +const sandbox = await Sandbox.create({ + template: "my-mcp-gateway", + mcp: { + browserbase: { + apiKey: process.env.BROWSERBASE_API_KEY!, + geminiApiKey: process.env.GEMINI_API_KEY!, + projectId: process.env.BROWSERBASE_PROJECT_ID!, + }, + exa: { + apiKey: process.env.EXA_API_KEY!, + }, + }, +}); + +``` + +```python Python +from e2b import Sandbox +import os + +sbx = Sandbox.create( + template="my-mcp-gateway", + mcp={ + "browserbase": { + "apiKey": os.getenv("BROWSERBASE_API_KEY"), + "geminiApiKey": os.getenv("GEMINI_API_KEY"), + "projectId": os.getenv("BROWSERBASE_PROJECT_ID"), + }, + "exa": { + "apiKey": os.getenv("EXA_API_KEY"), + }, + } +) +``` + + + +For more information about working with templates, see: +- [Template Quickstart](/docs/template/quickstart) - Get started with custom templates +- [Defining Templates](/docs/template/defining-template) - Learn all template configuration options +- [Template Build](/docs/template/build) - Understand the build process + +## Custom MCP servers + +In addition to the 200+ pre-built MCP servers from the [Docker MCP Catalog](https://hub.docker.com/mcp), you can run custom MCP servers directly from public GitHub repositories. + +When you specify a GitHub repository, E2B will: +1. Clone the repository into the sandbox +2. Run the `installCmd` (optional) to install dependencies +3. Run the `runCmd` to start the MCP server with stdio transport + +The `runCmd` must start an MCP server that follows the [MCP specification](https://modelcontextprotocol.io/specification/2025-06-18) and communicates via stdio (standard input/output). + + + +```typescript TypeScript +import Sandbox from 'e2b' + +const sandbox = await Sandbox.create({ + mcp: { + 'github/modelcontextprotocol/servers': { + installCmd: 'npm install', + runCmd: 'sudo npx -y @modelcontextprotocol/server-filesystem /root', + }, + }, +}); +``` + +```python Python +from e2b import Sandbox +import os + +sbx = Sandbox.create( + mcp={ + "github/modelcontextprotocol/servers": { + "install_cmd": "npm install", + "run_cmd": "sudo npx -y @modelcontextprotocol/server-filesystem /root", + }, + } +) +``` + + + +### Configuration + + + Optional command to run before starting the MCP server. Use this to install dependencies (e.g., `npm install`, `pip install -r requirements.txt`). + + + + Command to start the MCP server. Must launch a stdio-enabled MCP server. + + + +**Important for npx-based servers:** Always include `installCmd: 'npm install'` (or equivalent) when using `npx` in your `runCmd`. Without installing dependencies first, npx will try to use the local repository and fail. + + +### Troubleshooting + +If your custom MCP server doesn't work as expected: + +1. Explore the sandbox either via the [dashboard](https://e2b.dev/dashboard) or by connecting to it via `e2b connect ` +2. Check the gateway log file with `sudo cat /var/log/mcp-gateway/gateway.log`. + +## Debugging with MCP Inspector + +The [MCP Inspector](https://github.com/modelcontextprotocol/inspector) is a useful tool for debugging and testing your MCP server setup. Get the command to run: + +```bash +npx @modelcontextprotocol/inspector --transport http --url --header "Authorization: Bearer ${mcpToken}" +``` + +Run the command in your terminal. This will open a web interface where you can: +- Browse available tools +- Test tool calls with different parameters +- Inspect request/response payloads +- Debug connection issues + +## Examples + + + + Claude Code with MCP integration + + + Web automation agent with Browserbase + + + AI research using Groq and Exa + + + Research Agent using the OpenAI Agents framework + + + Basic MCP client connecting to an E2B Sandbox + + + Use custom MCP servers installed from GitHub + + + Create a custom E2B Sandbox with pre-installed MCP servers + + + +For the full list of available MCP servers, see [Available servers](/docs/sandbox/mcp/available-servers). diff --git a/docs/sandbox/mcp/available-servers.mdx b/docs/sandbox/mcp/available-servers.mdx new file mode 100644 index 00000000..34e36d5f --- /dev/null +++ b/docs/sandbox/mcp/available-servers.mdx @@ -0,0 +1,3547 @@ +--- +title: Available servers +description: Browse available MCP servers +--- + +E2B provides access to 200+ MCP servers from [Docker's catalog](https://hub.docker.com/mcp). You can also run [custom MCP servers](/docs/sandbox/mcp#custom-mcp-servers) inside the sandbox. + + +## Airtable + +Provides AI assistants with direct access to Airtable bases, allowing them to read schemas, query records, and interact with your Airtable data. Supports listing bases, retrieving table structures, and searching through records to help automate workflows and answer questions about your organized data. + +[View on Docker Hub](https://hub.docker.com/mcp/server/airtable-mcp-server/overview) + + + + + + + + + + + + +## Azure Kubernetes Service (AKS) + +Azure Kubernetes Service (AKS) official MCP server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/aks/overview) + + + + + Access level for the MCP server, One of [ readonly, readwrite, admin ] + + + + Comma-separated list of additional tools, One of [ helm, cilium ] + + + + Comma-separated list of namespaces to allow access to. If not specified, all namespaces are allowed. + + + + Path to the Azure configuration directory (e.g. /home/azureuser/.azure). Used for Azure CLI authentication, you should be logged in (e.g. run `az login`) on the host before starting the MCP server. + + + + Username or UID of the container user (format ``[:``] e.g. 10000), ensuring correct permissions to access the Azure and kubeconfig files. Leave empty to use default user in the container. + + + + Path to the kubeconfig file for the AKS cluster (e.g. /home/azureuser/.kube/config). Used to connect to the AKS cluster. + + + + + +## Apify + +Apify is the world's largest marketplace of tools for web scraping, data extraction, and web automation. You can extract structured data from social media, e-commerce, search engines, maps, travel sites, or any other website. + +[View on Docker Hub](https://hub.docker.com/mcp/server/apify-mcp-server/overview) + + + + + + + + Comma-separated list of tools to enable. Can be either a tool category, a specific tool, or an Apify Actor. For example: "actors,docs,apify/rag-web-browser". For more details visit https://mcp.apify.com. + + + + + +## Api-gateway + +A universal MCP (Model Context Protocol) server to integrate any API with Claude Desktop using only Docker configurations. + +[View on Docker Hub](https://hub.docker.com/mcp/server/mcp-api-gateway/overview) + + + + + + + + + + + + + + + +## ArXiv +The ArXiv MCP Server provides a comprehensive bridge between AI assistants and arXiv's research repository through the Model Context Protocol (MCP). + +**Features:** +- Search arXiv papers with advanced filtering +- Download and store papers locally as markdown +- Read and analyze paper content +- Deep research analysis prompts +- Local paper management and storage +- Enhanced tool descriptions optimized for local AI models +- Docker MCP Gateway compatible with detailed context + +[View on Docker Hub](https://hub.docker.com/mcp/server/arxiv-mcp-server/overview) + + + + + Directory path where downloaded papers will be stored + + + + +The ArXiv MCP Server provides a comprehensive bridge between AI assistants and arXiv's research repository through the Model Context Protocol (MCP). Features: • Search arXiv papers with advanced filtering • Download and store papers locally as markdown • Read and analyze paper content • Deep research analysis prompts • Local paper management and storage • Enhanced tool descriptions optimized for local AI models • Docker MCP Gateway compatible with detailed context Perfect for researchers, academics, and AI assistants conducting literature reviews and research analysis. **Recent Update**: Enhanced tool descriptions specifically designed to resolve local AI model confusion and improve Docker MCP Gateway compatibility. + +## ast-grep + +ast-grep is a fast and polyglot tool for code structural search, lint, rewriting at large scale. + +[View on Docker Hub](https://hub.docker.com/mcp/server/ast-grep/overview) + + + + + + + + + +## Astra DB + +An MCP server for Astra DB workloads. + +[View on Docker Hub](https://hub.docker.com/mcp/server/astra-db/overview) + + + + + + + + + + + + +## Astro Docs + +Access the latest Astro web framework documentation, guides, and API references. + +[View on Docker Hub](https://hub.docker.com/mcp/server/astro-docs/overview) + + + + +## Atlan + +MCP server for interacting with Atlan services including asset search, updates, and lineage traversal for comprehensive data governance and discovery. + +[View on Docker Hub](https://hub.docker.com/mcp/server/atlan/overview) + + + + + + + + + + + + +## Atlas Docs + +Provide LLMs hosted, clean markdown documentation of libraries and frameworks. + +[View on Docker Hub](https://hub.docker.com/mcp/server/atlas-docs/overview) + + + + + + + + + +## Atlassian + +Tools for Atlassian products (Confluence and Jira). This integration supports both Atlassian Cloud and Jira Server/Data Center deployments. + +[View on Docker Hub](https://hub.docker.com/mcp/server/atlassian/overview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Audiense Insights + +Audiense Insights MCP Server is a server based on the Model Context Protocol (MCP) that allows Claude and other MCP-compatible clients to interact with your Audiense Insights account. + +[View on Docker Hub](https://hub.docker.com/mcp/server/audiense-insights/overview) + + + + + + + + + + + + + + + +## AWS CDK + +AWS Cloud Development Kit (CDK) best practices, infrastructure as code patterns, and security compliance with CDK Nag. + +[View on Docker Hub](https://hub.docker.com/mcp/server/aws-cdk-mcp-server/overview) + + + + +## AWS Core + +Starting point for using the awslabs MCP servers. + +[View on Docker Hub](https://hub.docker.com/mcp/server/aws-core-mcp-server/overview) + + + + +## AWS Diagram + +Seamlessly create diagrams using the Python diagrams package DSL. This server allows you to generate AWS diagrams, sequence diagrams, flow diagrams, and class diagrams using Python code. + +[View on Docker Hub](https://hub.docker.com/mcp/server/aws-diagram/overview) + + + + +## AWS Documentation + +Tools to access AWS documentation, search for content, and get recommendations. + +[View on Docker Hub](https://hub.docker.com/mcp/server/aws-documentation/overview) + + + + +## AWS KB Retrieval (Archived) + +An MCP server implementation for retrieving information from the AWS Knowledge Base using the Bedrock Agent Runtime. + +[View on Docker Hub](https://hub.docker.com/mcp/server/aws-kb-retrieval-server/overview) + + + + + + + + + + + + +## AWS Terraform + +Terraform on AWS best practices, infrastructure as code patterns, and security compliance with Checkov. + +[View on Docker Hub](https://hub.docker.com/mcp/server/aws-terraform/overview) + + + + +## Azure + +The Azure MCP Server, bringing the power of Azure to your agents. + +[View on Docker Hub](https://hub.docker.com/mcp/server/azure/overview) + + + + +## Beagle security + +Connects with the Beagle Security backend using a user token to manage applications, run automated security tests, track vulnerabilities across environments, and gain intelligence from Application and API vulnerability data. + +[View on Docker Hub](https://hub.docker.com/mcp/server/beagle-security/overview) + + + + + + + + + +## Bitrefill + +A Model Context Protocol Server connector for Bitrefill public API, to enable AI agents to search and shop on Bitrefill. + +[View on Docker Hub](https://hub.docker.com/mcp/server/bitrefill/overview) + + + + + + + + + + + + +## Box + +An MCP server capable of interacting with the Box API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/box/overview) + + + + + + + + + + + + +## Brave Search + +Search the Web for pages, images, news, videos, and more using the Brave Search API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/brave/overview) + + + + + + + + + +## Browserbase + +Allow LLMs to control a browser with Browserbase and Stagehand for AI-powered web automation, intelligent data extraction, and screenshot capture. + +[View on Docker Hub](https://hub.docker.com/mcp/server/browserbase/overview) + + + + + + + + + + + + + + + +## Buildkite + +Buildkite MCP lets agents interact with Buildkite Builds, Jobs, Logs, Packages and Test Suites. + +[View on Docker Hub](https://hub.docker.com/mcp/server/buildkite/overview) + + + + + + + + + +## Camunda BPM process engine + +Tools to interact with the Camunda 7 Community Edition Engine using the Model Context Protocol (MCP). Whether you're automating workflows, querying process instances, or integrating with external systems, Camunda MCP Server is your agentic solution for seamless interaction with Camunda. + +[View on Docker Hub](https://hub.docker.com/mcp/server/camunda/overview) + + + + + + + + + +## CData Connect Cloud + +This fully functional MCP Server allows you to connect to any data source in Connect Cloud from Claude Desktop. + +[View on Docker Hub](https://hub.docker.com/mcp/server/cdata-connectcloud/overview) + + + + + + + + + + + + +## CharmHealth + +An MCP server for CharmHealth EHR that allows LLMs and MCP clients to interact with patient records, encounters, and practice information. + +[View on Docker Hub](https://hub.docker.com/mcp/server/charmhealth-mcp-server/overview) + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Chroma + +A Model Context Protocol (MCP) server implementation that provides database capabilities for Chroma. + +[View on Docker Hub](https://hub.docker.com/mcp/server/chroma/overview) + + + + + + + + + +## CircleCI + +A specialized server implementation for the Model Context Protocol (MCP) designed to integrate with CircleCI's development workflow. This project serves as a bridge between CircleCI's infrastructure and the Model Context Protocol, enabling enhanced AI-powered development experiences. + +[View on Docker Hub](https://hub.docker.com/mcp/server/circleci/overview) + + + + + + + + + + + + +## Official ClickHouse + +Official ClickHouse MCP Server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/clickhouse/overview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Close + +Streamline sales processes with integrated calling, email, SMS, and automated workflows for small and scaling businesses. + +[View on Docker Hub](https://hub.docker.com/mcp/server/close/overview) + + + + + + + + + +## Cloudflare Docs + +Access the latest documentation on Cloudflare products such as Workers, Pages, R2, D1, KV. + +[View on Docker Hub](https://hub.docker.com/mcp/server/cloudflare-docs/overview) + + + + +## Cloud Run MCP + +MCP server to deploy apps to Cloud Run. + +[View on Docker Hub](https://hub.docker.com/mcp/server/cloud-run-mcp/overview) + + + + + path to application-default credentials (eg $HOME/.config/gcloud/application_default_credentials.json ) + + + + + +## CockroachDB + +Enable AI agents to manage, monitor, and query CockroachDB using natural language. Perform complex database operations, cluster management, and query execution seamlessly through AI-driven workflows. Integrate effortlessly with MCP clients for scalable and high-performance data operations. + +[View on Docker Hub](https://hub.docker.com/mcp/server/cockroachdb/overview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Python Interpreter + +A Python-based execution tool that mimics a Jupyter notebook environment. It accepts code snippets, executes them, and maintains state across sessions — preserving variables, imports, and past results. Ideal for iterative development, debugging, or code execution. + +[View on Docker Hub](https://hub.docker.com/mcp/server/mcp-code-interpreter/overview) + + + + +## Context7 + +Context7 MCP Server -- Up-to-date code documentation for LLMs and AI code editors. + +[View on Docker Hub](https://hub.docker.com/mcp/server/context7/overview) + + + + +## Couchbase + +Couchbase is a distributed document database with a powerful search engine and in-built operational and analytical capabilities. + +[View on Docker Hub](https://hub.docker.com/mcp/server/couchbase/overview) + + + + + Bucket in the Couchbase cluster to use for the MCP server. + + + + Connection string for the Couchbase cluster. + + + + Setting to "true" (default) enables read-only query mode while running SQL++ queries. + + + + + + + Username for the Couchbase cluster with access to the bucket. + + + + + +## The official for Cylera. + +Brings context about device inventory, threats, risks and utilization powered by the Cylera Partner API into an LLM. + +[View on Docker Hub](https://hub.docker.com/mcp/server/cylera-mcp-server/overview) + + + + + + + + + + + + + + + +## Shodan + +A Model Context Protocol server that provides access to Shodan API functionality. + +[View on Docker Hub](https://hub.docker.com/mcp/server/cyreslab-ai-shodan/overview) + + + + + + + + + +## Dappier + +Enable fast, free real-time web search and access premium data from trusted media brands—news, financial markets, sports, entertainment, weather, and more. Build powerful AI agents with Dappier. + +[View on Docker Hub](https://hub.docker.com/mcp/server/dappier/overview) + + + + + + + + + +## Dappier Remote + +Enable fast, free real-time web search and access premium data from trusted media brands—news, financial markets, sports, entertainment, weather, and more. Build powerful AI agents with Dappier. + +[View on Docker Hub](https://hub.docker.com/mcp/server/dappier-remote/overview) + + + + + + + + + +## Dart AI + +Dart AI Model Context Protocol (MCP) server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/dart/overview) + + + + + + + + + + + + +## MCP Database Server + +Comprehensive database server supporting PostgreSQL, MySQL, and SQLite with natural language SQL query capabilities. Enables AI agents to interact with databases through both direct SQL and natural language queries. + +[View on Docker Hub](https://hub.docker.com/mcp/server/database-server/overview) + + + + + Connection string for your database. Examples: SQLite: sqlite+aiosqlite:///data/mydb.db, PostgreSQL: postgresql+asyncpg://user:password@localhost:5432/mydb, MySQL: mysql+aiomysql://user:password@localhost:3306/mydb + + + + + +## Databutton + +Databutton MCP Server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/databutton/overview) + + + + +## DeepWiki + +Tools for fetching and asking questions about GitHub repositories. + +[View on Docker Hub](https://hub.docker.com/mcp/server/deepwiki/overview) + + + + +## Descope + +The Descope Model Context Protocol (MCP) server provides an interface to interact with Descope's Management APIs, enabling the search and retrieval of project-related information. + +[View on Docker Hub](https://hub.docker.com/mcp/server/descope/overview) + + + + + + + + + + + + +## Desktop Commander + +Search, update, manage files and run terminal commands with AI. + +[View on Docker Hub](https://hub.docker.com/mcp/server/desktop-commander/overview) + + + + + List of directories that Desktop Commander can access + + + + + +## DevHub CMS + +DevHub CMS LLM integration through the Model Context Protocol. + +[View on Docker Hub](https://hub.docker.com/mcp/server/devhub-cms/overview) + + + + + + + + + + + + + + + +## Discord + +Interact with the Discord platform. + +[View on Docker Hub](https://hub.docker.com/mcp/server/mcp-discord/overview) + + + + + + + + + +## Docker Hub + +Docker Hub official MCP server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/dockerhub/overview) + + + + + + + + + + + + +## Dodo Payments + +Tools for cross-border payments, taxes, and compliance. + +[View on Docker Hub](https://hub.docker.com/mcp/server/dodo-payments/overview) + + + + + + + + + +## DreamFactory + +DreamFactory is a REST API generation platform with support for hundreds of data sources, including Microsoft SQL Server, MySQL, PostgreSQL, and MongoDB. The DreamFactory MCP Server makes it easy for users to securely interact with their data sources via an MCP client. + +[View on Docker Hub](https://hub.docker.com/mcp/server/dreamfactory-mcp/overview) + + + + + + + + + + + + +## DuckDuckGo + +A Model Context Protocol (MCP) server that provides web search capabilities through DuckDuckGo, with additional features for content fetching and parsing. + +[View on Docker Hub](https://hub.docker.com/mcp/server/duckduckgo/overview) + + + + +## Dynatrace + +This MCP Server allows interaction with the Dynatrace observability platform, brining real-time observability data directly into your development workflow. + +[View on Docker Hub](https://hub.docker.com/mcp/server/dynatrace-mcp-server/overview) + + + + + + + + + + + + + + + +## E2B + +Giving Claude ability to run code with E2B via MCP (Model Context Protocol). + +[View on Docker Hub](https://hub.docker.com/mcp/server/e2b/overview) + + + + + + + + + +## EduBase + +The EduBase MCP server enables Claude and other LLMs to interact with EduBase's comprehensive e-learning platform through the Model Context Protocol (MCP). + +[View on Docker Hub](https://hub.docker.com/mcp/server/edubase/overview) + + + + + + + + + + + + + + + +## Effect MCP + +Tools and resources for writing Effect code in Typescript. + +[View on Docker Hub](https://hub.docker.com/mcp/server/effect-mcp/overview) + + + + +## Elasticsearch + +Interact with your Elasticsearch indices through natural language conversations. + +[View on Docker Hub](https://hub.docker.com/mcp/server/elasticsearch/overview) + + + + + + + + + + + + +## Elevenlabs MCP + +Official ElevenLabs Model Context Protocol (MCP) server that enables interaction with powerful Text to Speech and audio processing APIs. + +[View on Docker Hub](https://hub.docker.com/mcp/server/elevenlabs/overview) + + + + + + + + + + + + +## EverArt (Archived) + +Image generation server using EverArt's API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/everart/overview) + + + + + + + + + +## Exa + +Exa MCP for web search and web crawling!. + +[View on Docker Hub](https://hub.docker.com/mcp/server/exa/overview) + + + + + + + + + +## Explorium B2B Data + +Discover companies, contacts, and business insights—powered by dozens of trusted external data sources. + +[View on Docker Hub](https://hub.docker.com/mcp/server/explorium/overview) + + + + + + + + + +## Fetch (Reference) + +Fetches a URL from the internet and extracts its contents as markdown. + +[View on Docker Hub](https://hub.docker.com/mcp/server/fetch/overview) + + + + +## Fibery + +Interact with your Fibery workspace. + +[View on Docker Hub](https://hub.docker.com/mcp/server/fibery/overview) + + + + + + + + + + + + +## Filesystem (Reference) + +Local filesystem access with configurable allowed paths. + +[View on Docker Hub](https://hub.docker.com/mcp/server/filesystem/overview) + + + + + + + + + +## Find-A-Domain + +Tools for finding domain names. + +[View on Docker Hub](https://hub.docker.com/mcp/server/find-a-domain/overview) + + + + +## Firecrawl + +🔥 Official Firecrawl MCP Server - Adds powerful web scraping and search to Cursor, Claude and any other LLM clients. + +[View on Docker Hub](https://hub.docker.com/mcp/server/firecrawl/overview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Firewalla + +Real-time network monitoring, security analysis, and firewall management through 28 specialized tools. Access security alerts, network flows, device status, and firewall rules directly from your Firewalla device. + +[View on Docker Hub](https://hub.docker.com/mcp/server/firewalla-mcp-server/overview) + + + + + Your Firewalla Box Global ID + + + + + + + Your Firewalla MSP domain (e.g., yourdomain.firewalla.net) + + + + + +## FlexPrice + +Official flexprice MCP Server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/flexprice/overview) + + + + + + + + + + + + +## Git (Reference) + +Git repository interaction and automation. + +[View on Docker Hub](https://hub.docker.com/mcp/server/git/overview) + + + + + + + + + +## GitHub (Archived) + +Tools for interacting with the GitHub API, enabling file operations, repository management, search functionality, and more. + +[View on Docker Hub](https://hub.docker.com/mcp/server/github/overview) + + + + + + + + + +## GitHub Chat + +A Model Context Protocol (MCP) for analyzing and querying GitHub repositories using the GitHub Chat API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/github-chat/overview) + + + + + + + + + +## GitHub Official + +Official GitHub MCP Server, by GitHub. Provides seamless integration with GitHub APIs, enabling advanced automation and interaction capabilities for developers and tools. + +[View on Docker Hub](https://hub.docker.com/mcp/server/github-official/overview) + + + + + + + + + +## GitLab (Archived) + +MCP Server for the GitLab API, enabling project management, file operations, and more. + +[View on Docker Hub](https://hub.docker.com/mcp/server/gitlab/overview) + + + + + + + + api url - optional for self-hosted instances + + + + + +## GitMCP + +Tools for interacting with Git repositories. + +[View on Docker Hub](https://hub.docker.com/mcp/server/gitmcp/overview) + + + + +## glif.app + +Easily run glif.app AI workflows inside your LLM: image generators, memes, selfies, and more. Glif supports all major multimedia AI models inside one app. + +[View on Docker Hub](https://hub.docker.com/mcp/server/glif/overview) + + + + + + + + + + + + + + + +## Gmail + +A Model Context Protocol server for Gmail operations using IMAP/SMTP with app password authentication. Supports listing messages, searching emails, and sending messages. To create your app password, visit your Google Account settings under Security > App Passwords. Or visit the link https://myaccount.google.com/apppasswords. + +[View on Docker Hub](https://hub.docker.com/mcp/server/gmail-mcp/overview) + + + + + Your Gmail email address + + + + + + + + +## Google Maps (Archived) + +Tools for interacting with the Google Maps API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/google-maps/overview) + + + + + + + + + +## Google Maps Comprehensive MCP + +Complete Google Maps integration with 8 tools including geocoding, places search, directions, elevation data, and more using Google's latest APIs. + +[View on Docker Hub](https://hub.docker.com/mcp/server/google-maps-comprehensive/overview) + + + + + + + + + +## Grafana + +MCP server for Grafana. + +[View on Docker Hub](https://hub.docker.com/mcp/server/grafana/overview) + + + + + + + + + + + + +## Gyazo + +Official Model Context Protocol server for Gyazo. + +[View on Docker Hub](https://hub.docker.com/mcp/server/gyazo/overview) + + + + + + + + + +## Hackernews mcp + +A Model Context Protocol (MCP) server that provides access to Hacker News stories, comments, and user data, with support for search and content retrieval. + +[View on Docker Hub](https://hub.docker.com/mcp/server/mcp-hackernews/overview) + + + + +## Hackle + +Model Context Protocol server for Hackle. + +[View on Docker Hub](https://hub.docker.com/mcp/server/hackle/overview) + + + + + + + + + +## Handwriting OCR + +Model Context Protocol (MCP) Server for Handwriting OCR. + +[View on Docker Hub](https://hub.docker.com/mcp/server/handwriting-ocr/overview) + + + + + + + + + +## Humanitarian Data Exchange + +HDX MCP Server provides access to humanitarian data through the Humanitarian Data Exchange (HDX) API - https://data.humdata.org/hapi. This server offers 33 specialized tools for retrieving humanitarian information including affected populations (refugees, IDPs, returnees), baseline demographics, food security indicators, conflict data, funding information, and operational presence across hundreds of countries and territories. See repository for instructions on getting a free HDX_APP_INDENTIFIER for access. + +[View on Docker Hub](https://hub.docker.com/mcp/server/hdx/overview) + + + + + + + + + +## Heroku + +Heroku Platform MCP Server using the Heroku CLI. + +[View on Docker Hub](https://hub.docker.com/mcp/server/heroku/overview) + + + + + + + + + +## Hostinger API + +Interact with Hostinger services over the Hostinger API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/hostinger-mcp-server/overview) + + + + + + + + + +## Hoverfly + +A Model Context Protocol (MCP) server that exposes Hoverfly as a programmable tool for AI assistants like Cursor, Claude, GitHub Copilot, and others supporting MCP. It enables dynamic mocking of third-party APIs to unblock development, automate testing, and simulate unavailable services during integration. + +[View on Docker Hub](https://hub.docker.com/mcp/server/hoverfly-mcp-server/overview) + + + + + + + + + +## HubSpot + +Unite marketing, sales, and customer service with AI-powered automation, lead management, and comprehensive analytics. + +[View on Docker Hub](https://hub.docker.com/mcp/server/hubspot/overview) + + + + + + + + + +## Hugging Face + +Tools for interacting with Hugging Face models, datasets, research papers, and more. + +[View on Docker Hub](https://hub.docker.com/mcp/server/hugging-face/overview) + + + + +## Hummingbot MCP: Trading Agent + +Hummingbot MCP is an open-source toolset that lets you control and monitor your Hummingbot trading bots through AI-powered commands and automation. + +[View on Docker Hub](https://hub.docker.com/mcp/server/hummingbot-mcp/overview) + + + + + + + + + + + + + + + +## Husqvarna Automower + +MCP Server for huqsvarna automower. + +[View on Docker Hub](https://hub.docker.com/mcp/server/husqvarna-automower/overview) + + + + + + + + + + + + +## Hyperbrowser + +A MCP server implementation for hyperbrowser. + +[View on Docker Hub](https://hub.docker.com/mcp/server/hyperbrowser/overview) + + + + + + + + + +## Hyperspell + +Hyperspell MCP Server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/hyperspell/overview) + + + + + + + + + + + + + + + +## Iaptic + +Model Context Protocol server for interacting with iaptic. + +[View on Docker Hub](https://hub.docker.com/mcp/server/iaptic/overview) + + + + + + + + + + + + +## Inspektor Gadget + +AI interface to troubleshoot and observe Kubernetes/Container workloads. + +[View on Docker Hub](https://hub.docker.com/mcp/server/inspektor-gadget/overview) + + + + + Comma-separated list of gadget images (trace_dns, trace_tcp, etc) to use, allowing control over which gadgets are available as MCP tools + + + + Path to the kubeconfig file for accessing Kubernetes clusters + + + + + +## Javadocs + +Access to Java, Kotlin, and Scala library documentation. + +[View on Docker Hub](https://hub.docker.com/mcp/server/javadocs/overview) + + + + +## JetBrains + +A model context protocol server to work with JetBrains IDEs: IntelliJ, PyCharm, WebStorm, etc. Also, works with Android Studio. + +[View on Docker Hub](https://hub.docker.com/mcp/server/jetbrains/overview) + + + + + + + + + +## Kafka Schema Registry MCP + +Comprehensive MCP server for Kafka Schema Registry operations. Features multi-registry support, schema contexts, migration tools, OAuth authentication, and 57+ tools for complete schema management. Supports SLIM_MODE for optimal performance. + +[View on Docker Hub](https://hub.docker.com/mcp/server/kafka-schema-reg-mcp/overview) + + + + + Schema Registry URL + + + + + + + + + + Enable SLIM_MODE for better performance + + + + Enable read-only mode + + + + + +## Kagi search + +The Official Model Context Protocol (MCP) server for Kagi search & other tools. + +[View on Docker Hub](https://hub.docker.com/mcp/server/kagisearch/overview) + + + + + + + + + + + + +## Keboola + +Keboola MCP Server is an open-source bridge between your Keboola project and modern AI tools. + +[View on Docker Hub](https://hub.docker.com/mcp/server/keboola-mcp/overview) + + + + + + + + + + + + +## Kong Konnect + +A Model Context Protocol (MCP) server for interacting with Kong Konnect APIs, allowing AI assistants to query and analyze Kong Gateway configurations, traffic, and analytics. + +[View on Docker Hub](https://hub.docker.com/mcp/server/kong/overview) + + + + + + + + + + + + +## Kubectl + +MCP Server that enables AI assistants to interact with Kubernetes clusters via kubectl operations. + +[View on Docker Hub](https://hub.docker.com/mcp/server/kubectl-mcp-server/overview) + + + + + + + + + +## Kubernetes + +Connect to a Kubernetes cluster and manage it. + +[View on Docker Hub](https://hub.docker.com/mcp/server/kubernetes/overview) + + + + + the path to the host .kube/config + + + + + +## Lara Translate + +Connect to Lara Translate API, enabling powerful translation capabilities with support for language detection and context-aware translations. + +[View on Docker Hub](https://hub.docker.com/mcp/server/lara/overview) + + + + + + + + + + + + +## LINE + +MCP server that integrates the LINE Messaging API to connect an AI Agent to the LINE Official Account. + +[View on Docker Hub](https://hub.docker.com/mcp/server/line/overview) + + + + + + + + + + + + +## LinkedIn + +This MCP server allows Claude and other AI assistants to access your LinkedIn. Scrape LinkedIn profiles and companies, get your recommended jobs, and perform job searches. Set your li_at LinkedIn cookie to use this server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/linkedin-mcp-server/overview) + + + + + + + + Custom user agent string (optional, helps avoid detection and cookie login issues) + + + + + +## LLM Text + +Discovers and retrieves llms.txt from websites. + +[View on Docker Hub](https://hub.docker.com/mcp/server/llmtxt/overview) + + + + +## Maestro + +A Model Context Protocol (MCP) server exposing Bitcoin blockchain data through the Maestro API platform. Provides tools to explore blocks, transactions, addresses, inscriptions, runes, and other metaprotocol data. + +[View on Docker Hub](https://hub.docker.com/mcp/server/maestro-mcp-server/overview) + + + + + + + + + +## Manifold + +Tools for accessing the Manifold Markets online prediction market platform. + +[View on Docker Hub](https://hub.docker.com/mcp/server/manifold/overview) + + + + +## Mapbox + +Transform any AI agent into a geospatially-aware system with Mapbox APIs. Provides geocoding, POI search, routing, travel time matrices, isochrones, and static map generation. + +[View on Docker Hub](https://hub.docker.com/mcp/server/mapbox/overview) + + + + + + + + + +## Mapbox Developer + +Direct access to Mapbox developer APIs for AI assistants. Enables style management, token management, GeoJSON preview, and other developer tools for building Mapbox applications. + +[View on Docker Hub](https://hub.docker.com/mcp/server/mapbox-devkit/overview) + + + + + + + + + +## Markdownify + +A Model Context Protocol server for converting almost anything to Markdown. + +[View on Docker Hub](https://hub.docker.com/mcp/server/markdownify/overview) + + + + + + + + + +## Markitdown + +A lightweight MCP server for calling MarkItDown. + +[View on Docker Hub](https://hub.docker.com/mcp/server/markitdown/overview) + + + + + + + + + +## Maven Tools + +JVM dependency intelligence for any build tool using Maven Central Repository. Includes Context7 integration for upgrade documentation and guidance. + +[View on Docker Hub](https://hub.docker.com/mcp/server/maven-tools-mcp/overview) + + + + +## Memory (Reference) + +Knowledge graph-based persistent memory system. + +[View on Docker Hub](https://hub.docker.com/mcp/server/memory/overview) + + + + +## Mercado Libre + +Provides access to Mercado Libre E-Commerce API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/mercado-libre/overview) + + + + + + + + + +## Mercado Pago + +Provides access to Mercado Pago Marketplace API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/mercado-pago/overview) + + + + + + + + + +## Metabase MCP + +A comprehensive MCP server for Metabase with 70+ tools. + +[View on Docker Hub](https://hub.docker.com/mcp/server/metabase/overview) + + + + + + + + + + + + + + + + + + +## Minecraft Wiki + +A MCP Server for browsing the official Minecraft Wiki!. + +[View on Docker Hub](https://hub.docker.com/mcp/server/minecraft-wiki/overview) + + + + +## MongoDB + +A Model Context Protocol server to connect to MongoDB databases and MongoDB Atlas Clusters. + +[View on Docker Hub](https://hub.docker.com/mcp/server/mongodb/overview) + + + + + + + + + +## MultiversX + +MCP Server for MultiversX. + +[View on Docker Hub](https://hub.docker.com/mcp/server/multiversx-mx/overview) + + + + + + + + + + + + +## Nasdaq Data Link + +MCP server to interact with the data feeds provided by the Nasdaq Data Link. Developed by the community and maintained by Stefano Amorelli. + +[View on Docker Hub](https://hub.docker.com/mcp/server/nasdaq-data-link/overview) + + + + + + + + + +## Needle + +Production-ready RAG service to search and retrieve data from your documents. + +[View on Docker Hub](https://hub.docker.com/mcp/server/needle-mcp/overview) + + + + + + + + + +## Neo4j Cloud Aura Api + +Manage Neo4j Aura database instances through the Neo4j Aura API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/neo4j-cloud-aura-api/overview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Neo4j Cypher + +Interact with Neo4j using Cypher graph queries. + +[View on Docker Hub](https://hub.docker.com/mcp/server/neo4j-cypher/overview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Neo4j Data Modeling + +MCP server that assists in creating, validating and visualizing graph data models. + +[View on Docker Hub](https://hub.docker.com/mcp/server/neo4j-data-modeling/overview) + + + + + + + + + + + + + + + + + + + + + + + + +## Neo4j Memory + +Provide persistent memory capabilities through Neo4j graph database integration. + +[View on Docker Hub](https://hub.docker.com/mcp/server/neo4j-memory/overview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Neon + +MCP server for interacting with Neon Management API and databases. + +[View on Docker Hub](https://hub.docker.com/mcp/server/neon/overview) + + + + + + + + + +## Node.js Sandbox + +A Node.js–based Model Context Protocol server that spins up disposable Docker containers to execute arbitrary JavaScript. + +[View on Docker Hub](https://hub.docker.com/mcp/server/node-code-sandbox/overview) + + + + +## Notion + +Official Notion MCP Server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/notion/overview) + + + + + + + + + +## Novita + +Seamless interaction with Novita AI platform resources. + +[View on Docker Hub](https://hub.docker.com/mcp/server/novita/overview) + + + + +## NPM Sentinel + +MCP server that enables intelligent NPM package analysis powered by AI. + +[View on Docker Hub](https://hub.docker.com/mcp/server/npm-sentinel/overview) + + + + +## Obsidian + +MCP server that interacts with Obsidian via the Obsidian rest API community plugin. + +[View on Docker Hub](https://hub.docker.com/mcp/server/obsidian/overview) + + + + + + + + + +## Okta + +Secure Okta identity and access management via Model Context Protocol (MCP). Access Okta users, groups, applications, logs, and policies through AI assistants with enterprise-grade security. + +[View on Docker Hub](https://hub.docker.com/mcp/server/okta-mcp-fctr/overview) + + + + + Okta organization URL (e.g., https://dev-123456.okta.com) + + + + Maximum concurrent requests to Okta API + + + + Logging level for server output + + + + + + + + +## omi-mcp + +A Model Context Protocol server for Omi interaction and automation. This server provides tools to read, search, and manipulate Memories and Conversations. + +[View on Docker Hub](https://hub.docker.com/mcp/server/omi/overview) + + + + + + + + + +## ONLYOFFICE DocSpace + +ONLYOFFICE DocSpace is a room-based collaborative platform which allows organizing a clear file structure depending on users' needs or project goals. + +[View on Docker Hub](https://hub.docker.com/mcp/server/onlyoffice-docspace/overview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## OpenAPI Toolkit for MCP + +Fetch, validate, and generate code or curl from any OpenAPI or Swagger spec - all from a single URL. + +[View on Docker Hub](https://hub.docker.com/mcp/server/openapi/overview) + + + + + + + + + +## OpenAPI Schema + +OpenAPI Schema Model Context Protocol Server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/openapi-schema/overview) + + + + + + + + + +## Airbnb Search + +MCP Server for searching Airbnb and get listing details. + +[View on Docker Hub](https://hub.docker.com/mcp/server/openbnb-airbnb/overview) + + + + +## OpenMesh + +Discover and connect to a curated marketplace of MCP servers for extending AI agent capabilities. + +[View on Docker Hub](https://hub.docker.com/mcp/server/openmesh/overview) + + + + +## Openweather + +A simple MCP service that provides current weather and 5-day forecast using the free OpenWeatherMap API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/openweather/overview) + + + + + + + + + +## OpenZeppelin Cairo Contracts + +Access to OpenZeppelin Cairo Contracts. + +[View on Docker Hub](https://hub.docker.com/mcp/server/openzeppelin-cairo/overview) + + + + +## OpenZeppelin Solidity Contracts + +Access to OpenZeppelin Solidity Contracts. + +[View on Docker Hub](https://hub.docker.com/mcp/server/openzeppelin-solidity/overview) + + + + +## OpenZeppelin Stellar Contracts + +Access to OpenZeppelin Stellar Contracts. + +[View on Docker Hub](https://hub.docker.com/mcp/server/openzeppelin-stellar/overview) + + + + +## OpenZeppelin Stylus Contracts + +Access to OpenZeppelin Stylus Contracts. + +[View on Docker Hub](https://hub.docker.com/mcp/server/openzeppelin-stylus/overview) + + + + +## Opik + +Model Context Protocol (MCP) implementation for Opik enabling seamless IDE integration and unified access to prompts, projects, traces, and metrics. + +[View on Docker Hub](https://hub.docker.com/mcp/server/opik/overview) + + + + + + + + + + + + + + + +## Opine + +A Model Context Protocol (MCP) server for querying deals and evaluations from the Opine CRM API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/opine-mcp-server/overview) + + + + + + + + + +## Oracle Database + +Connect to Oracle databases via MCP, providing secure read-only access with support for schema exploration, query execution, and metadata inspection. + +[View on Docker Hub](https://hub.docker.com/mcp/server/oracle/overview) + + + + + + + + + + + + + + + +## OSP Marketing Tools + +A Model Context Protocol (MCP) server that empowers LLMs to use some of Open Srategy Partners' core writing and product marketing techniques. + +[View on Docker Hub](https://hub.docker.com/mcp/server/osp_marketing_tools/overview) + + + + +## Oxylabs + +A Model Context Protocol (MCP) server that enables AI assistants like Claude to seamlessly access web data through Oxylabs' powerful web scraping technology. + +[View on Docker Hub](https://hub.docker.com/mcp/server/oxylabs/overview) + + + + + + + + + + + + +## Paper Search + +A MCP for searching and downloading academic papers from multiple sources like arXiv, PubMed, bioRxiv, etc. + +[View on Docker Hub](https://hub.docker.com/mcp/server/paper-search/overview) + + + + +## Perplexity + +Connector for Perplexity API, to enable real-time, web-wide research. + +[View on Docker Hub](https://hub.docker.com/mcp/server/perplexity-ask/overview) + + + + + + + + + +## Program Integrity Alliance + +An MCP server to help make U.S. Government open datasets AI-friendly. + +[View on Docker Hub](https://hub.docker.com/mcp/server/pia/overview) + + + + + + + + + +## Pinecone Assistant + +Pinecone Assistant MCP server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/pinecone/overview) + + + + + + + + + + + + +## ExecuteAutomation Playwright MCP + +Playwright Model Context Protocol Server - Tool to automate Browsers and APIs in Claude Desktop, Cline, Cursor IDE and More 🔌. + +[View on Docker Hub](https://hub.docker.com/mcp/server/playwright-mcp-server/overview) + + + + + + + + + +## Plugged.in MCP Proxy + +A unified MCP proxy that aggregates multiple MCP servers into one interface, enabling seamless tool discovery and management across all your AI interactions. Manage all your MCP servers from a single connection point with RAG capabilities and real-time notifications. + +[View on Docker Hub](https://hub.docker.com/mcp/server/pluggedin-mcp-proxy/overview) + + + + + Base URL for the Plugged.in API (optional, defaults to https://plugged.in for cloud or http://localhost:12005 for self-hosted) + + + + + + + + +## Polar Signals + +MCP server for Polar Signals Cloud continuous profiling platform, enabling AI assistants to analyze CPU performance, memory usage, and identify optimization opportunities in production systems. + +[View on Docker Hub](https://hub.docker.com/mcp/server/polar-signals/overview) + + + + + + + + + +## PomoDash + +Connect your AI assistant to PomoDash for seamless task and project management. + +[View on Docker Hub](https://hub.docker.com/mcp/server/pomodash/overview) + + + + + + + + + +## PostgreSQL readonly (Archived) + +Connect with read-only access to PostgreSQL databases. This server enables LLMs to inspect database schemas and execute read-only queries. + +[View on Docker Hub](https://hub.docker.com/mcp/server/postgres/overview) + + + + + + + + + +## Postman + +Postman's MCP server connects AI agents, assistants, and chatbots directly to your APIs on Postman. Use natural language to prompt AI to automate work across your Postman collections, environments, workspaces, and more. + +[View on Docker Hub](https://hub.docker.com/mcp/server/postman/overview) + + + + + + + + + +## Pref Editor + +Pref Editor is a tool for viewing and editing Android app preferences during development. + +[View on Docker Hub](https://hub.docker.com/mcp/server/pref-editor/overview) + + + + +## Prometheus + +A Model Context Protocol (MCP) server that enables AI assistants to query and analyze Prometheus metrics through standardized interfaces. Connect to your Prometheus instance to retrieve metrics, perform queries, and gain insights into your system's performance and health. + +[View on Docker Hub](https://hub.docker.com/mcp/server/prometheus/overview) + + + + + The URL of your Prometheus server + + + + + +## Puppeteer (Archived) + +Browser automation and web scraping using Puppeteer. + +[View on Docker Hub](https://hub.docker.com/mcp/server/puppeteer/overview) + + + + +## Python Refactoring Assistant + +Educational Python refactoring assistant that provides guided suggestions for AI assistants. Features: • Step-by-step refactoring instructions without modifying code • Comprehensive code analysis using professional tools (Rope, Radon, Vulture, Jedi, LibCST, Pyrefly) • Educational approach teaching refactoring patterns through guided practice • Support for both guide-only and apply-changes modes • Identifies long functions, high complexity, dead code, and type issues • Provides precise line numbers and specific refactoring instructions • Compatible with all AI assistants (Claude, GPT, Cursor, Continue, etc.) Perfect for developers learning refactoring patterns while maintaining full control over code changes. Acts as a refactoring mentor rather than an automated code modifier. + +[View on Docker Hub](https://hub.docker.com/mcp/server/mcp-python-refactoring/overview) + + + + +## QuantConnect + +The QuantConnect MCP Server is a bridge for AIs (such as Claude and OpenAI o3 Pro) to interact with our cloud platform. When equipped with our MCP, the AI can perform tasks on your behalf through our API such as updating projects, writing strategies, backtesting, and deploying strategies to production live-trading. + +[View on Docker Hub](https://hub.docker.com/mcp/server/quantconnect/overview) + + + + + + + + + + + + + + + +## Ramparts MCP Security Scanner + +A comprehensive security scanner for MCP servers with YARA rules and static analysis capabilities. + +[View on Docker Hub](https://hub.docker.com/mcp/server/ramparts/overview) + + + + +## Razorpay + +Razorpay's Official MCP Server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/razorpay/overview) + + + + + + + + + + + + +## Mcp reddit + +A comprehensive Model Context Protocol (MCP) server for Reddit integration. This server enables AI agents to interact with Reddit programmatically through a standardized interface. + +[View on Docker Hub](https://hub.docker.com/mcp/server/mcp-reddit/overview) + + + + + + + + + + + + + + + + + + +## Redis + +Access to Redis database operations. + +[View on Docker Hub](https://hub.docker.com/mcp/server/redis/overview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Redis Cloud + +MCP Server for Redis Cloud's API, allowing you to manage your Redis Cloud resources using natural language. + +[View on Docker Hub](https://hub.docker.com/mcp/server/redis-cloud/overview) + + + + + + + + + + + + +## Ref - up-to-date docs + +Ref powerful search tool connets your coding tools with documentation context. It includes an up-to-date index of public documentation and it can ingest your private documentation (eg. GitHub repos, PDFs) as well. + +[View on Docker Hub](https://hub.docker.com/mcp/server/ref/overview) + + + + + + + + + +## Remote MCP + +Tools for finding remote MCP servers. + +[View on Docker Hub](https://hub.docker.com/mcp/server/remote-mcp/overview) + + + + +## Render + +Interact with your Render resources via LLMs. + +[View on Docker Hub](https://hub.docker.com/mcp/server/render/overview) + + + + + + + + + +## Send emails + +Send emails directly from Cursor with this email sending MCP server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/resend/overview) + + + + + + + + comma separated list of reply to email addresses + + + + sender email address + + + + + +## RISKEN + +RISKEN's official MCP Server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/risken/overview) + + + + + + + + + + + + +## Root.io Vulnerability Remediation MCP + +MCP server that provides container image vulnerability scanning and remediation capabilities through Root.io. + +[View on Docker Hub](https://hub.docker.com/mcp/server/root/overview) + + + + + + + + + +## WiseVision ROS2 + +Python server implementing Model Context Protocol (MCP) for ROS2. + +[View on Docker Hub](https://hub.docker.com/mcp/server/ros2/overview) + + + + +## Rube + +Access to Rube's catalog of remote MCP servers. + +[View on Docker Hub](https://hub.docker.com/mcp/server/rube/overview) + + + + + + + + + +## Blazing-fast, asynchronous for seamless filesystem operations. + +The Rust MCP Filesystem is a high-performance, asynchronous, and lightweight Model Context Protocol (MCP) server built in Rust for secure and efficient filesystem operations. Designed with security in mind, it operates in read-only mode by default and restricts clients from updating allowed directories via MCP Roots unless explicitly enabled, ensuring robust protection against unauthorized access. Leveraging asynchronous I/O, it delivers blazingly fast performance with a minimal resource footprint. Optimized for token efficiency, the Rust MCP Filesystem enables large language models (LLMs) to precisely target searches and edits within specific sections of large files and restrict operations by file size range, making it ideal for efficient file exploration, automation, and system integration. + +[View on Docker Hub](https://hub.docker.com/mcp/server/rust-mcp-filesystem/overview) + + + + + Enable read/write mode. If false, the app operates in read-only mode. + + + + List of directories that rust-mcp-filesystem can access. + + + + Enable dynamic directory access control via MCP client-side Roots. + + + + + +## SchemaCrawler AI + +The SchemaCrawler AI MCP Server enables natural language interaction with your database schema using an MCP client in "Agent" mode. It allows users to explore tables, columns, foreign keys, triggers, stored procedures and more simply by asking questions like "Explain the code for the interest calculation stored procedure". You can also ask it to help with SQL, since it knows your schema. This is ideal for developers, DBAs, and data analysts who want to streamline schema comprehension and query development without diving into dense documentation. + +[View on Docker Hub](https://hub.docker.com/mcp/server/schemacrawler-ai/overview) + + + + + --info-level How much database metadata to retrieve + + + + + + + + + + + + + --database Database to connect to (optional) + + + + --host Database host (optional) + + + + --port Database port (optional) + + + + --server SchemaCrawler database plugin + + + + --url JDBC URL for database connection + + + + Host volume to map within the Docker container + + + + + +## Schogini MCP Image Border + +This adds a border to an image and returns base64 encoded image. + +[View on Docker Hub](https://hub.docker.com/mcp/server/schogini-mcp-image-border/overview) + + + + +## ScrapeGraph + +ScapeGraph MCP Server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/scrapegraph/overview) + + + + + + + + + +## Scrapezy + +A Model Context Protocol server for Scrapezy that enables AI models to extract structured data from websites. + +[View on Docker Hub](https://hub.docker.com/mcp/server/scrapezy/overview) + + + + + + + + + +## Securenote.link + +SecureNote.link MCP Server - allowing AI agents to securely share sensitive information through end-to-end encrypted notes. + +[View on Docker Hub](https://hub.docker.com/mcp/server/securenote-link-mcp-server/overview) + + + + +## Semgrep + +MCP server for using Semgrep to scan code for security vulnerabilities. + +[View on Docker Hub](https://hub.docker.com/mcp/server/semgrep/overview) + + + + +## Sentry (Archived) + +A Model Context Protocol server for retrieving and analyzing issues from Sentry.io. This server provides tools to inspect error reports, stacktraces, and other debugging information from your Sentry account. + +[View on Docker Hub](https://hub.docker.com/mcp/server/sentry/overview) + + + + + + + + + +## Sequa.AI + +Stop stitching context for Copilot and Cursor. With Sequa MCP, your AI tools know your entire codebase and docs out of the box. + +[View on Docker Hub](https://hub.docker.com/mcp/server/sequa/overview) + + + + + + + + + + + + +## Sequential Thinking (Reference) + +Dynamic and reflective problem-solving through thought sequences. + +[View on Docker Hub](https://hub.docker.com/mcp/server/sequentialthinking/overview) + + + + +## Short.io + +Access to Short.io's link shortener and analytics tools. + +[View on Docker Hub](https://hub.docker.com/mcp/server/short-io/overview) + + + + + + + + + +## SimpleCheckList + +Advanced SimpleCheckList with MCP server and SQLite database for comprehensive task management. Features: • Complete project and task management system • Hierarchical organization (Projects → Groups → Task Lists → Tasks → Subtasks) • SQLite database for data persistence • RESTful API with comprehensive endpoints • MCP protocol compliance for AI assistant integration • Docker-optimized deployment with stability improvements **v1.0.1 Update**: Enhanced Docker stability with improved container lifecycle management. Default mode optimized for containerized deployment with reliable startup and shutdown processes. Perfect for AI assistants managing complex project workflows and task hierarchies. + +[View on Docker Hub](https://hub.docker.com/mcp/server/simplechecklist/overview) + + + + +## Singlestore + +MCP server for interacting with SingleStore Management API and services. + +[View on Docker Hub](https://hub.docker.com/mcp/server/singlestore/overview) + + + + + + + + + +## Slack (Archived) + +Interact with Slack Workspaces over the Slack API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/slack/overview) + + + + + + + + + + + + + + + +## SmartBear + +MCP server for AI access to SmartBear tools, including BugSnag, Reflect, API Hub, PactFlow. + +[View on Docker Hub](https://hub.docker.com/mcp/server/smartbear/overview) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## SonarQube + +Interact with SonarQube Cloud, Server and Community build over the web API. Analyze code to identify quality and security issues. + +[View on Docker Hub](https://hub.docker.com/mcp/server/sonarqube/overview) + + + + + Organization key for SonarQube Cloud, not required for SonarQube Server or Community Build + + + + + + + URL of the SonarQube instance, to provide only for SonarQube Server or Community Build + + + + + +## SQLite (Archived) + +Database interaction and business intelligence capabilities. + +[View on Docker Hub](https://hub.docker.com/mcp/server/SQLite/overview) + + + + +## StackGen + +AI-powered DevOps assistant for managing cloud infrastructure and applications. + +[View on Docker Hub](https://hub.docker.com/mcp/server/stackgen/overview) + + + + + + + + URL of your StackGen instance + + + + + +## StackHawk + +A Model Context Protocol (MCP) server for integrating with StackHawk's security scanning platform. Provides security analytics, YAML configuration management, sensitive data/threat surface analysis, and anti-hallucination tools for LLMs. + +[View on Docker Hub](https://hub.docker.com/mcp/server/stackhawk/overview) + + + + + + + + + +## Stripe + +Interact with Stripe services over the Stripe API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/stripe/overview) + + + + + + + + + +## Supadata + +Official Supadata MCP Server - Adds powerful video & web scraping to Cursor, Claude and any other LLM clients. + +[View on Docker Hub](https://hub.docker.com/mcp/server/supadata/overview) + + + + + + + + + +## Suzieq MCP + +MCP Server to interact with a SuzieQ network observability instance via its REST API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/suzieq/overview) + + + + + + + + + + + + +## Task orchestrator + +Model Context Protocol (MCP) server for comprehensive task and feature management, providing AI assistants with a structured, context-efficient way to interact with project data. + +[View on Docker Hub](https://hub.docker.com/mcp/server/task-orchestrator/overview) + + + + +## Tavily + +The Tavily MCP server provides seamless interaction with the tavily-search and tavily-extract tools, real-time web search capabilities through the tavily-search tool and Intelligent data extraction from web pages via the tavily-extract tool. + +[View on Docker Hub](https://hub.docker.com/mcp/server/tavily/overview) + + + + + + + + + +## Teamwork + +Tools for Teamwork.com products. + +[View on Docker Hub](https://hub.docker.com/mcp/server/teamwork/overview) + + + + + + + + + +## Telnyx + +Enables interaction with powerful telephony, messaging, and AI assistant APIs. + +[View on Docker Hub](https://hub.docker.com/mcp/server/telnyx/overview) + + + + + + + + + +## Tembo + +MCP server for Tembo Cloud's platform API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/tembo/overview) + + + + + + + + + +## Hashicorp Terraform + +The Terraform MCP Server provides seamless integration with Terraform ecosystem, enabling advanced automation and interaction capabilities for Infrastructure as Code (IaC) development. + +[View on Docker Hub](https://hub.docker.com/mcp/server/terraform/overview) + + + + +## Text-to-GraphQL + +Transform natural language queries into GraphQL queries using an AI agent. Provides schema management, query validation, execution, and history tracking. + +[View on Docker Hub](https://hub.docker.com/mcp/server/text-to-graphql/overview) + + + + + + + + Authentication method for GraphQL API + + + + + + + OpenAI model to use + + + + Model temperature for responses + + + + + + + + +## Tigris Data + +Tigris is a globally distributed S3-compatible object storage service that provides low latency anywhere in the world, enabling developers to store and access any amount of data for a wide range of use cases. + +[View on Docker Hub](https://hub.docker.com/mcp/server/tigris/overview) + + + + + + + + + + + + + + + +## Time (Reference) + +Time and timezone conversion capabilities. + +[View on Docker Hub](https://hub.docker.com/mcp/server/time/overview) + + + + +## Triplewhale + +Triplewhale MCP Server. + +[View on Docker Hub](https://hub.docker.com/mcp/server/triplewhale/overview) + + + + + + + + + +## Unreal Engine + +A comprehensive Model Context Protocol (MCP) server that enables AI assistants to control Unreal Engine via Remote Control API. Built with TypeScript and designed for game development automation. + +[View on Docker Hub](https://hub.docker.com/mcp/server/unreal-engine-mcp-server/overview) + + + + + Logging level + + + + Unreal Engine host address. Use: host.docker.internal for local UE on Windows/Mac Docker, 127.0.0.1 for Linux without Docker, or actual IP address (e.g., 192.168.1.100) for remote UE + + + + Remote Control HTTP port + + + + Remote Control WebSocket port + + + + + +## VeyraX + +VeyraX MCP is the only connection you need to access all your tools in any MCP-compatible environment. + +[View on Docker Hub](https://hub.docker.com/mcp/server/veyrax/overview) + + + + + + + + + +## Vizro + +provides tools and templates to create a functioning Vizro chart or dashboard step by step. + +[View on Docker Hub](https://hub.docker.com/mcp/server/vizro/overview) + + + + +## Vuln nist + +This MCP server exposes tools to query the NVD/CVE REST API and return formatted text results suitable for LLM consumption via the MCP protocol. It includes automatic query chunking for large date ranges and parallel processing for improved performance. + +[View on Docker Hub](https://hub.docker.com/mcp/server/vuln-nist-mcp-server/overview) + + + + +## Wayfound MCP + +Wayfound’s MCP server allows business users to govern, supervise, and improve AI Agents. + +[View on Docker Hub](https://hub.docker.com/mcp/server/wayfound/overview) + + + + + + + + + +## Webflow + +Model Context Protocol (MCP) server for the Webflow Data API. + +[View on Docker Hub](https://hub.docker.com/mcp/server/webflow/overview) + + + + + + + + + +## Wikipedia + +A Model Context Protocol (MCP) server that retrieves information from Wikipedia to provide context to LLMs. + +[View on Docker Hub](https://hub.docker.com/mcp/server/wikipedia-mcp/overview) + + + + +## WolframAlpha + +Connect your chat repl to wolfram alpha computational intelligence. + +[View on Docker Hub](https://hub.docker.com/mcp/server/wolfram-alpha/overview) + + + + + + + + + +## YouTube transcripts + +Retrieves transcripts for given YouTube video URLs. + +[View on Docker Hub](https://hub.docker.com/mcp/server/youtube_transcript/overview) + + + + +## Zerodha Kite Connect + +MCP server for Zerodha Kite Connect API - India's leading stock broker trading platform. Execute trades, manage portfolios, and access real-time market data for NSE, BSE, and other Indian exchanges. + +[View on Docker Hub](https://hub.docker.com/mcp/server/zerodha-kite/overview) + + + + + Access token obtained after OAuth authentication (optional - can be generated at runtime) + + + + Your Kite Connect API key from the developer console + + + + + + + OAuth redirect URL configured in your Kite Connect app + + + + diff --git a/docs/sandbox/security.mdx b/docs/sandbox/security.mdx index 9976aa06..3785c5bc 100644 --- a/docs/sandbox/security.mdx +++ b/docs/sandbox/security.mdx @@ -39,7 +39,7 @@ When secure access is enabled, you must provide an authentication token that was Each call to the sandbox controller must include an additional header `X-Access-Token` with the access token value returned during sandbox creation. -For sandbox [upload](/docs/filesystem/upload#upload-with-pre-signed-url) and [download](/docs/filesystem/download#download-with-pre-signed-url) URLs, you need to generate pre-signed URLs. We are advising to use SDK for generating presigned URLs. +For sandbox [upload](/docs/sandbox/filesystem#upload-with-pre-signed-url) and [download](/docs/sandbox/filesystem#download-with-pre-signed-url) URLs, you need to generate pre-signed URLs. We are advising to use SDK for generating presigned URLs. ### Disable secure access diff --git a/docs/sandbox/volumes.mdx b/docs/sandbox/volumes.mdx new file mode 100644 index 00000000..efa8be9c --- /dev/null +++ b/docs/sandbox/volumes.mdx @@ -0,0 +1,583 @@ +--- +title: "Volumes" +sidebarTitle: Volumes +icon: "hard-drive" +description: "Persistent storage volumes for E2B sandboxes — create, mount, read, write, and manage files that persist across sandbox sessions." +--- + +Volumes is currently in private beta. + +Volumes provide persistent storage that exists independently of any single sandbox. Data written to a volume survives sandbox shutdowns and can be shared across multiple sandbox sessions. Use volumes when you need files, datasets, or other artifacts to persist beyond the lifecycle of an individual sandbox. + +## Managing volumes + +### Create a volume + + +Volume names can only contain letters, numbers, and hyphens. + + + +```js JavaScript & TypeScript +import { Volume } from 'e2b' + +const volume = await Volume.create('my-volume') +console.log(volume.volumeId) // Volume ID +console.log(volume.name) // 'my-volume' +``` +```python Python +from e2b import Volume + +volume = Volume.create('my-volume') +print(volume.volume_id) # Volume ID +print(volume.name) # 'my-volume' +``` + + +### Connect to an existing volume + +You can connect to an existing volume by its ID using the `connect()` method. + + +```js JavaScript & TypeScript +import { Volume } from 'e2b' + +const volume = await Volume.connect('volume-id') +console.log(volume.volumeId) // Volume ID +console.log(volume.name) // Volume name +``` +```python Python +from e2b import Volume + +volume = Volume.connect('volume-id') +print(volume.volume_id) # Volume ID +print(volume.name) # Volume name +``` + + +### List volumes + + +```js JavaScript & TypeScript +import { Volume } from 'e2b' + +const volumes = await Volume.list() +console.log(volumes) +// [{ volumeId: '...', name: 'my-volume' }, ...] +``` +```python Python +from e2b import Volume + +volumes = Volume.list() +print(volumes) +# [VolumeInfo(volume_id='...', name='my-volume'), ...] +``` + + +### Get volume info + + +```js JavaScript & TypeScript +import { Volume } from 'e2b' + +const info = await Volume.getInfo('volume-id') +console.log(info) +// { volumeId: '...', name: 'my-volume' } +``` +```python Python +from e2b import Volume + +info = Volume.get_info('volume-id') +print(info) +# VolumeInfo(volume_id='...', name='my-volume') +``` + + +### Destroy a volume + + +```js JavaScript & TypeScript +import { Volume } from 'e2b' + +const success = await Volume.destroy('volume-id') +console.log(success) // true +``` +```python Python +from e2b import Volume + +success = Volume.destroy('volume-id') +print(success) # True +``` + + +## Mounting volumes + +You can mount one or more volumes to a sandbox when creating it. The keys of the `volumeMounts` / `volume_mounts` object are the mount paths inside the sandbox. + + +```js JavaScript & TypeScript +import { Volume, Sandbox } from 'e2b' + +const volume = await Volume.create('my-volume') + +// You can pass a Volume object +const sandbox = await Sandbox.create({ + volumeMounts: { + '/mnt/my-data': volume, + }, +}) + +// Or pass the volume name directly +const sandbox = await Sandbox.create({ + volumeMounts: { + '/mnt/my-data': 'my-volume', + }, +}) + +// Files written to /mnt/my-data inside the sandbox are persisted in the volume +await sandbox.files.write('/mnt/my-data/hello.txt', 'Hello, world!') +``` +```python Python +from e2b import Volume, Sandbox + +volume = Volume.create('my-volume') + +# You can pass a Volume object +sandbox = Sandbox.create( + volume_mounts={ + '/mnt/my-data': volume, + }, +) + +# Or pass the volume name directly +sandbox = Sandbox.create( + volume_mounts={ + '/mnt/my-data': 'my-volume', + }, +) + +# Files written to /mnt/my-data inside the sandbox are persisted in the volume +sandbox.files.write('/mnt/my-data/hello.txt', 'Hello, world!') +``` + + +## Read and write files + +### Reading files + +You can read files from a volume using the `readFile()` / `read_file()` method. + + +```js JavaScript & TypeScript +import { Volume } from 'e2b' + +const volume = await Volume.create('my-volume') + +const content = await volume.readFile('/path/to/file') +console.log(content) +``` +```python Python +from e2b import Volume + +volume = Volume.create('my-volume') + +content = volume.read_file('/path/to/file') +print(content) +``` + + +### Writing files + +You can write files to a volume using the `writeFile()` / `write_file()` method. + + +```js JavaScript & TypeScript +import { Volume } from 'e2b' + +const volume = await Volume.create('my-volume') + +await volume.writeFile('/path/to/file', 'file content') +``` +```python Python +from e2b import Volume + +volume = Volume.create('my-volume') + +volume.write_file('/path/to/file', 'file content') +``` + + +### Creating directories + +You can create directories in a volume using the `makeDir()` / `make_dir()` method. + + +```js JavaScript & TypeScript +import { Volume } from 'e2b' + +const volume = await Volume.create('my-volume') + +await volume.makeDir('/path/to/dir') + +// Create nested directories with force option +await volume.makeDir('/path/to/nested/dir', { force: true }) +``` +```python Python +from e2b import Volume + +volume = Volume.create('my-volume') + +volume.make_dir('/path/to/dir') + +# Create nested directories with force option +volume.make_dir('/path/to/nested/dir', force=True) +``` + + +### Listing directory contents + +You can list the contents of a directory in a volume using the `list()` method. + + +```js JavaScript & TypeScript +import { Volume } from 'e2b' + +const volume = await Volume.create('my-volume') + +const entries = await volume.list('/path/to/dir') +console.log(entries) +// [ +// { name: 'file.txt', type: 'file', path: '/path/to/dir/file.txt', size: 13, ... }, +// { name: 'subdir', type: 'directory', path: '/path/to/dir/subdir', size: 0, ... }, +// ] +``` +```python Python +from e2b import Volume + +volume = Volume.create('my-volume') + +entries = volume.list('/path/to/dir') +print(entries) +# [ +# VolumeEntryStat(name='file.txt', type_='file', path='/path/to/dir/file.txt', size=13, ...), +# VolumeEntryStat(name='subdir', type_='directory', path='/path/to/dir/subdir', size=0, ...), +# ] +``` + + +### Removing files or directories + +You can remove files or directories from a volume using the `remove()` method. + + +```js JavaScript & TypeScript +import { Volume } from 'e2b' + +const volume = await Volume.create('my-volume') + +// Remove a file +await volume.remove('/path/to/file') + +// Remove a directory recursively +await volume.remove('/path/to/dir', { recursive: true }) +``` +```python Python +from e2b import Volume + +volume = Volume.create('my-volume') + +# Remove a file +volume.remove('/path/to/file') + +# Remove a directory recursively +volume.remove('/path/to/dir', recursive=True) +``` + + +## File and directory metadata + +You can get information about a file or directory in a volume using the `getInfo()` / `get_info()` method. + +### File metadata + + +```js JavaScript & TypeScript +import { Volume } from 'e2b' + +const volume = await Volume.create('my-volume') + +// Create a new file +await volume.writeFile('/test_file.txt', 'Hello, world!') + +// Get information about the file +const info = await volume.getInfo('/test_file.txt') + +console.log(info) +// { +// name: 'test_file.txt', +// type: 'file', +// path: '/test_file.txt', +// size: 13, +// mode: 0o644, +// uid: 0, +// gid: 0, +// atime: 2025-05-26T12:00:00.000Z, +// mtime: 2025-05-26T12:00:00.000Z, +// ctime: 2025-05-26T12:00:00.000Z, +// } +``` +```python Python +from e2b import Volume + +volume = Volume.create('my-volume') + +# Create a new file +volume.write_file('/test_file.txt', 'Hello, world!') + +# Get information about the file +info = volume.get_info('/test_file.txt') + +print(info) +# VolumeEntryStat( +# name='test_file.txt', +# type_='file', +# path='/test_file.txt', +# size=13, +# mode=0o644, +# uid=0, +# gid=0, +# atime=datetime(2025, 5, 26, 12, 0, 0), +# mtime=datetime(2025, 5, 26, 12, 0, 0), +# ctime=datetime(2025, 5, 26, 12, 0, 0), +# ) +``` + + +### Directory metadata + + +```js JavaScript & TypeScript +import { Volume } from 'e2b' + +const volume = await Volume.create('my-volume') + +// Create a new directory +await volume.makeDir('/test_dir') + +// Get information about the directory +const info = await volume.getInfo('/test_dir') + +console.log(info) +// { +// name: 'test_dir', +// type: 'directory', +// path: '/test_dir', +// size: 0, +// mode: 0o755, +// uid: 0, +// gid: 0, +// atime: 2025-05-26T12:00:00.000Z, +// mtime: 2025-05-26T12:00:00.000Z, +// ctime: 2025-05-26T12:00:00.000Z, +// } +``` +```python Python +from e2b import Volume + +volume = Volume.create('my-volume') + +# Create a new directory +volume.make_dir('/test_dir') + +# Get information about the directory +info = volume.get_info('/test_dir') + +print(info) +# VolumeEntryStat( +# name='test_dir', +# type_='directory', +# path='/test_dir', +# size=0, +# mode=0o755, +# uid=0, +# gid=0, +# atime=datetime(2025, 5, 26, 12, 0, 0), +# mtime=datetime(2025, 5, 26, 12, 0, 0), +# ctime=datetime(2025, 5, 26, 12, 0, 0), +# ) +``` + + +### Checking if a path exists + +You can check whether a file or directory exists in a volume using the `exists()` method. + + +```js JavaScript & TypeScript +import { Volume } from 'e2b' + +const volume = await Volume.create('my-volume') + +const fileExists = await volume.exists('/test_file.txt') +console.log(fileExists) // true or false +``` +```python Python +from e2b import Volume + +volume = Volume.create('my-volume') + +file_exists = volume.exists('/test_file.txt') +print(file_exists) # True or False +``` + + +### Updating metadata + +You can update file or directory metadata such as user ID, group ID, and permissions mode using the `updateMetadata()` / `update_metadata()` method. + + +```js JavaScript & TypeScript +import { Volume } from 'e2b' + +const volume = await Volume.create('my-volume') + +await volume.writeFile('/test_file.txt', 'Hello, world!') + +const updated = await volume.updateMetadata('/test_file.txt', { uid: 1000, gid: 1000, mode: 0o600 }) + +console.log(updated) +// { +// name: 'test_file.txt', +// type: 'file', +// path: '/test_file.txt', +// size: 13, +// mode: 0o600, +// uid: 1000, +// gid: 1000, +// ... +// } +``` +```python Python +from e2b import Volume + +volume = Volume.create('my-volume') + +volume.write_file('/test_file.txt', 'Hello, world!') + +updated = volume.update_metadata('/test_file.txt', uid=1000, gid=1000, mode=0o600) + +print(updated) +# VolumeEntryStat( +# name='test_file.txt', +# type_='file', +# path='/test_file.txt', +# size=13, +# mode=0o600, +# uid=1000, +# gid=1000, +# ... +# ) +``` + + +## Upload data + +You can upload data to a volume using the `writeFile()` / `write_file()` method. + +### Upload single file + + +```js JavaScript & TypeScript +import fs from 'fs' +import { Volume } from 'e2b' + +const volume = await Volume.create('my-volume') + +// Read file from local filesystem +const content = fs.readFileSync('/local/path') +// Upload file to volume +await volume.writeFile('/path/in/volume', content) +``` +```python Python +from e2b import Volume + +volume = Volume.create('my-volume') + +# Read file from local filesystem +with open('/local/path', 'rb') as file: + # Upload file to volume + volume.write_file('/path/in/volume', file) +``` + + +### Upload directory / multiple files + + +```js JavaScript & TypeScript +import fs from 'fs' +import path from 'path' +import { Volume } from 'e2b' + +const volume = await Volume.create('my-volume') + +const directoryPath = '/local/dir' +const files = fs.readdirSync(directoryPath) + +for (const file of files) { + const fullPath = path.join(directoryPath, file) + + // Skip directories + if (!fs.statSync(fullPath).isFile()) continue + + const content = fs.readFileSync(fullPath) + await volume.writeFile(`/upload/${file}`, content) +} +``` +```python Python +import os +from e2b import Volume + +volume = Volume.create('my-volume') + +directory_path = '/local/dir' + +for filename in os.listdir(directory_path): + file_path = os.path.join(directory_path, filename) + + # Skip directories + if not os.path.isfile(file_path): + continue + + with open(file_path, 'rb') as file: + volume.write_file(f'/upload/{filename}', file) +``` + + +## Download data + +You can download data from a volume using the `readFile()` / `read_file()` method. + + +```js JavaScript & TypeScript +import fs from 'fs' +import { Volume } from 'e2b' + +const volume = await Volume.create('my-volume') + +// Read file from volume +const content = await volume.readFile('/path/in/volume') +// Write file to local filesystem +fs.writeFileSync('/local/path', content) +``` +```python Python +from e2b import Volume + +volume = Volume.create('my-volume') + +# Read file from volume +content = volume.read_file('/path/in/volume') +# Write file to local filesystem +with open('/local/path', 'w') as file: + file.write(content) +``` + From 8fe73257dec81bef6867fe3e148f243656045b6c Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Mon, 30 Mar 2026 11:52:54 +0200 Subject: [PATCH 14/22] docs: remove old Filesystem, Volumes, Commands, and MCP source files Old pages are now consolidated into sandbox section pages. Redirects preserve all old URLs. --- docs/commands.mdx | 23 - docs/commands/background.mdx | 45 - docs/commands/streaming.mdx | 33 - docs/filesystem.mdx | 13 - docs/filesystem/download.mdx | 70 - docs/filesystem/info.mdx | 116 -- docs/filesystem/read-write.mdx | 71 - docs/filesystem/upload.mdx | 160 -- docs/filesystem/watch.mdx | 98 - docs/mcp.mdx | 108 - docs/mcp/available-servers.mdx | 3547 -------------------------------- docs/mcp/custom-servers.mdx | 69 - docs/mcp/custom-templates.mdx | 112 - docs/mcp/examples.mdx | 28 - docs/mcp/quickstart.mdx | 222 -- docs/volumes.mdx | 73 - docs/volumes/download.mdx | 31 - docs/volumes/info.mdx | 188 -- docs/volumes/manage.mdx | 102 - docs/volumes/mount.mdx | 52 - docs/volumes/read-write.mdx | 135 -- docs/volumes/upload.mdx | 75 - 22 files changed, 5371 deletions(-) delete mode 100644 docs/commands.mdx delete mode 100644 docs/commands/background.mdx delete mode 100644 docs/commands/streaming.mdx delete mode 100644 docs/filesystem.mdx delete mode 100644 docs/filesystem/download.mdx delete mode 100644 docs/filesystem/info.mdx delete mode 100644 docs/filesystem/read-write.mdx delete mode 100644 docs/filesystem/upload.mdx delete mode 100644 docs/filesystem/watch.mdx delete mode 100644 docs/mcp.mdx delete mode 100644 docs/mcp/available-servers.mdx delete mode 100644 docs/mcp/custom-servers.mdx delete mode 100644 docs/mcp/custom-templates.mdx delete mode 100644 docs/mcp/examples.mdx delete mode 100644 docs/mcp/quickstart.mdx delete mode 100644 docs/volumes.mdx delete mode 100644 docs/volumes/download.mdx delete mode 100644 docs/volumes/info.mdx delete mode 100644 docs/volumes/manage.mdx delete mode 100644 docs/volumes/mount.mdx delete mode 100644 docs/volumes/read-write.mdx delete mode 100644 docs/volumes/upload.mdx diff --git a/docs/commands.mdx b/docs/commands.mdx deleted file mode 100644 index ffd1c222..00000000 --- a/docs/commands.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: "Running commands in sandbox" -sidebarTitle: Overview ---- - -You can run terminal commands inside the sandbox using the `commands.run()` method. - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() -const result = await sandbox.commands.run('ls -l') -console.log(result) -``` -```python Python -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() -result = sandbox.commands.run('ls -l') -print(result) -``` - diff --git a/docs/commands/background.mdx b/docs/commands/background.mdx deleted file mode 100644 index 66e40b71..00000000 --- a/docs/commands/background.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: "Running commands in background" -sidebarTitle: Run commands in background ---- - -To run commands in background, pass the `background` option to the `commands.run()` method. This will return immediately and the command will continue to run in the sandbox. -You can then later kill the command using the `commands.kill()` method. - - -```js JavaScript & TypeScript highlight={7} -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() - -// Start the command in the background -const command = await sandbox.commands.run('echo hello; sleep 10; echo world', { - background: true, - onStdout: (data) => { - console.log(data) - }, -}) - -// Kill the command -await command.kill() -``` -```python Python highlight={6} -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() - -# Start the command in the background -command = sandbox.commands.run('echo hello; sleep 10; echo world', background=True) - -# Get stdout and stderr from the command running in the background. -# You can run this code in a separate thread or use command.wait() to wait for the command to finish. -for stdout, stderr, _ in command: - if stdout: - print(stdout) - if stderr: - print(stderr) - -# Kill the command -command.kill() -``` - diff --git a/docs/commands/streaming.mdx b/docs/commands/streaming.mdx deleted file mode 100644 index 39bb2531..00000000 --- a/docs/commands/streaming.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "Streaming command output" -sidebarTitle: Streaming ---- - -To stream command output as it is being executed, pass the `onStdout`, `onStderr` callbacks to the `commands.run()` method in JavaScript -or the `on_stdout`, `on_stderr` callbacks to the `commands.run()` method in Python. - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() - -const result = await sandbox.commands.run('echo hello; sleep 1; echo world', { - onStdout: (data) => { - console.log(data) - }, - onStderr: (data) => { - console.log(data) - }, -}) -console.log(result) -``` -```python Python -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() - -result = sandbox.commands.run('echo hello; sleep 1; echo world', on_stdout=lambda data: print(data), on_stderr=lambda data: print(data)) -print(result) -``` - diff --git a/docs/filesystem.mdx b/docs/filesystem.mdx deleted file mode 100644 index ad4e1810..00000000 --- a/docs/filesystem.mdx +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: "Filesystem" -sidebarTitle: Overview ---- - -Each E2B Sandbox has its own isolated filesystem. The [Hobby tier](https://e2b.dev/pricing) sandboxes come with 10 GB of the free disk space and [Pro tier](https://e2b.dev/pricing) sandboxes come with 20 GB. - -With E2B SDK you can: -- [Read and write files to the sandbox.](/docs/filesystem/read-write) -- [Watch directory for changes.](/docs/filesystem/watch) -- [Upload data to the sandbox.](/docs/filesystem/upload) -- [Download data from the sandbox.](/docs/filesystem/download) - diff --git a/docs/filesystem/download.mdx b/docs/filesystem/download.mdx deleted file mode 100644 index bdd178e8..00000000 --- a/docs/filesystem/download.mdx +++ /dev/null @@ -1,70 +0,0 @@ ---- -sidebarTitle: Download data -title: "Download data from sandbox" ---- - -You can download data from the sandbox using the `files.read()` method. - - -```js JavaScript & TypeScript -import fs from 'fs' -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() - -// Read file from sandbox -const content = await sandbox.files.read('/path/in/sandbox') -// Write file to local filesystem -fs.writeFileSync('/local/path', content) -``` -```python Python -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() - -# Read file from sandbox -content = sandbox.files.read('/path/in/sandbox') -# Write file to local filesystem -with open('/local/path', 'w') as file: - file.write(content) -``` - - - -## Download with pre-signed URL - -Sometimes, you may want to let users from unauthorized environments, like a browser, download files from the sandbox. -For this use case, you can use pre-signed URLs to let users download files securely. - -All you need to do is create a sandbox with the `secure: true` option. A download URL will then be generated with a signature that allows only authorized users to access files. -You can optionally set an expiration time for the URL so that it will be valid only for a limited time. - -```js JavaScript & TypeScript -import fs from 'fs' -import { Sandbox } from '@e2b/code-interpreter' - -// Start a secured sandbox (all operations must be authorized by default) -const sandbox = await Sandbox.create(template, { secure: true }) - -// Create a pre-signed URL for file download with a 10 second expiration -const publicUrl = await sandbox.downloadUrl( - 'demo.txt', { - useSignatureExpiration: 10_000, // optional - }, -) - -// Download a file with a pre-signed URL (this can be used in any environment, such as a browser) -const res = await fetch(publicUrl) -const content = await res.text() -``` -```python Python -from e2b import Sandbox - -# Start a secured sandbox (all operations must be authorized by default) -sandbox = Sandbox.create(timeout=12_000, secure=True) - -# Create a pre-signed URL for file download with a 10 second expiration -# The user only has to visit the URL to download the file, this also works in a browser. -signed_url = sbx.download_url(path="demo.txt", user="user", use_signature_expiration=10_000) -``` - diff --git a/docs/filesystem/info.mdx b/docs/filesystem/info.mdx deleted file mode 100644 index 47d8858e..00000000 --- a/docs/filesystem/info.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: "Get information about a file or directory" -sidebarTitle: File & directory metadata ---- - -You can get information about a file or directory using the `files.getInfo()` / `files.get_info()` methods. Information such as file name, type, and path is returned. - -### Getting information about a file - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() - -// Create a new file -await sandbox.files.write('test_file.txt', 'Hello, world!') - -// Get information about the file -const info = await sandbox.files.getInfo('test_file.txt') - -console.log(info) -// { -// name: 'test_file.txt', -// type: 'file', -// path: '/home/user/test_file.txt', -// size: 13, -// mode: 0o644, -// permissions: '-rw-r--r--', -// owner: 'user', -// group: 'user', -// modifiedTime: '2025-05-26T12:00:00.000Z', -// symlinkTarget: null -// } -``` -```python Python -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() - -# Create a new file -sandbox.files.write('test_file', 'Hello, world!') - -# Get information about the file -info = sandbox.files.get_info('test_file') - -print(info) -# EntryInfo( -# name='test_file.txt', -# type=, -# path='/home/user/test_file.txt', -# size=13, -# mode=0o644, -# permissions='-rw-r--r--', -# owner='user', -# group='user', -# modified_time='2025-05-26T12:00:00.000Z', -# symlink_target=None -# ) -``` - - -### Getting information about a directory - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() - -// Create a new directory -await sandbox.files.makeDir('test_dir') - -// Get information about the directory -const info = await sandbox.files.getInfo('test_dir') - -console.log(info) -// { -// name: 'test_dir', -// type: 'dir', -// path: '/home/user/test_dir', -// size: 0, -// mode: 0o755, -// permissions: 'drwxr-xr-x', -// owner: 'user', -// group: 'user', -// modifiedTime: '2025-05-26T12:00:00.000Z', -// symlinkTarget: null -// } -``` -```python Python -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() - -# Create a new directory -sandbox.files.make_dir('test_dir') - -# Get information about the directory -info = sandbox.files.get_info('test_dir') - -print(info) -# EntryInfo( -# name='test_dir', -# type=, -# path='/home/user/test_dir', -# size=0, -# mode=0o755, -# permissions='drwxr-xr-x', -# owner='user', -# group='user', -# modified_time='2025-05-26T12:00:00.000Z', -# symlink_target=None -# ) -``` - diff --git a/docs/filesystem/read-write.mdx b/docs/filesystem/read-write.mdx deleted file mode 100644 index 187b1503..00000000 --- a/docs/filesystem/read-write.mdx +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: "Read & write files" -sidebarTitle: Read & write ---- - -## Reading files - -You can read files from the sandbox filesystem using the `files.read()` method. - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() -const fileContent = await sandbox.files.read('/path/to/file') -``` -```python Python -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() -file_content = sandbox.files.read('/path/to/file') -``` - - -## Writing single files - -You can write single files to the sandbox filesystem using the `files.write()` method. - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() - -await sandbox.files.write('/path/to/file', 'file content') -``` -```python Python -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() - -sandbox.files.write('/path/to/file', 'file content') -``` - - -## Writing multiple files - -You can also write multiple files to the sandbox. - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() - -await sandbox.files.write([ - { path: '/path/to/a', data: 'file content' }, - { path: '/another/path/to/b', data: 'file content' } -]) -``` -```python Python -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() - -sandbox.files.write_files([ - { "path": "/path/to/a", "data": "file content" }, - { "path": "another/path/to/b", "data": "file content" } -]) -``` - \ No newline at end of file diff --git a/docs/filesystem/upload.mdx b/docs/filesystem/upload.mdx deleted file mode 100644 index befeb989..00000000 --- a/docs/filesystem/upload.mdx +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: "Upload data to sandbox" -sidebarTitle: Upload data ---- - -You can upload data to the sandbox using the `files.write()` method. - -## Upload single file - - -```js JavaScript & TypeScript -import fs from 'fs' -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() - -// Read file from local filesystem -const content = fs.readFileSync('/local/path') -// Upload file to sandbox -await sandbox.files.write('/path/in/sandbox', content) -``` -```python Python -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() - -# Read file from local filesystem -with open("path/to/local/file", "rb") as file: - # Upload file to sandbox - sandbox.files.write("/path/in/sandbox", file) -``` - - -## Upload with pre-signed URL - -Sometimes, you may want to let users from unauthorized environments, like a browser, upload files to the sandbox. -For this use case, you can use pre-signed URLs to let users upload files securely. - -All you need to do is create a sandbox with the `secure: true` option. An upload URL will then be generated with a signature that allows only authorized users to upload files. -You can optionally set an expiration time for the URL so that it will be valid only for a limited time. - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -// Start a secured sandbox (all operations must be authorized by default) -const sandbox = await Sandbox.create(template, { secure: true }) - -// Create a pre-signed URL for file upload with a 10 second expiration -const publicUploadUrl = await sandbox.uploadUrl( - 'demo.txt', { - useSignatureExpiration: 10_000, // optional - }, -) - -// Upload a file with a pre-signed URL (this can be used in any environment, such as a browser) -const form = new FormData() -form.append('file', 'file content') - -await fetch(publicUploadUrl, { method: 'POST', body: form }) - -// File is now available in the sandbox and you can read it -const content = sandbox.files.read('/path/in/sandbox') -``` -```python Python -from e2b import Sandbox -import requests - -# Start a secured sandbox (all operations must be authorized by default) -sandbox = Sandbox.create(timeout=12_000, secure=True) - -# Create a pre-signed URL for file upload with a 10 second expiration -signed_url = sandbox.upload_url(path="demo.txt", user="user", use_signature_expiration=10_000) - -form_data = {"file":"file content"} -requests.post(signed_url, data=form_data) - -# File is now available in the sandbox and you can read it -content = sandbox.files.read('/path/in/sandbox') -``` - - -## Upload directory / multiple files - - -```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() - -// Read all files in the directory and store their paths and contents in an array -const readDirectoryFiles = (directoryPath) => { - // Read all files in the local directory - const files = fs.readdirSync(directoryPath); - - // Map files to objects with path and data - const filesArray = files - .filter(file => { - const fullPath = path.join(directoryPath, file); - // Skip if it's a directory - return fs.statSync(fullPath).isFile(); - }) - .map(file => { - const filePath = path.join(directoryPath, file); - - // Read the content of each file - return { - path: filePath, - data: fs.readFileSync(filePath, 'utf8') - }; - }); - - return filesArray; -}; - -// Usage example -const files = readDirectoryFiles('/local/dir'); -console.log(files); -// [ -// { path: '/local/dir/file1.txt', data: 'File 1 contents...' }, -// { path: '/local/dir/file2.txt', data: 'File 2 contents...' }, -// ... -// ] - -await sandbox.files.write(files) -``` -```python Python -import os -from e2b_code_interpreter import Sandbox - -sandbox = Sandbox.create() - -def read_directory_files(directory_path): - files = [] - - # Iterate through all files in the directory - for filename in os.listdir(directory_path): - file_path = os.path.join(directory_path, filename) - - # Skip if it's a directory - if os.path.isfile(file_path): - # Read file contents in binary mode - with open(file_path, "rb") as file: - files.append({ - 'path': file_path, - 'data': file.read() - }) - - return files - -files = read_directory_files("/local/dir") -print(files) -# [ -# {"path": "/local/dir/file1.txt", "data": "File 1 contents..." }, -# { "path": "/local/dir/file2.txt", "data": "File 2 contents..." }, -# ... -# ] - -sandbox.files.write_files(files) -``` - \ No newline at end of file diff --git a/docs/filesystem/watch.mdx b/docs/filesystem/watch.mdx deleted file mode 100644 index 55fe18f4..00000000 --- a/docs/filesystem/watch.mdx +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: "Watch sandbox directory for changes" -sidebarTitle: Watch directory for changes ---- - -You can watch a directory for changes using the `files.watchDir()` method in JavaScript and `files.watch_dir()` method in Python. - - -Since events are tracked asynchronously, their delivery may be delayed. -It's recommended not to collect or close watcher immediately after making a change. - - - -```js JavaScript & TypeScript highlight={7-12} -import { Sandbox, FilesystemEventType } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() -const dirname = '/home/user' - -// Start watching directory for changes -const handle = await sandbox.files.watchDir(dirname, async (event) => { - console.log(event) - if (event.type === FilesystemEventType.WRITE) { - console.log(`wrote to file ${event.name}`) - } -}) - -// Trigger file write event -await sandbox.files.write(`${dirname}/my-file`, 'hello') -``` -```python Python highlight={7,12-16} -from e2b_code_interpreter import Sandbox, FilesystemEventType - -sandbox = Sandbox.create() -dirname = '/home/user' - -# Watch directory for changes -handle = sandbox.files.watch_dir(dirname) -# Trigger file write event -sandbox.files.write(f"{dirname}/my-file", "hello") - -# Retrieve the latest new events since the last `get_new_events()` call -events = handle.get_new_events() -for event in events: - print(event) - if event.type == FilesystemEventType.WRITE: - print(f"wrote to file {event.name}") -``` - - - -## Recursive watching - -You can enable recursive watching using the parameter `recursive`. - - -When rapidly creating new folders (e.g., deeply nested path of folders), events other than `CREATE` might not be emitted. To avoid this behavior, create the required folder structure in advance. - - - -```js JavaScript & TypeScript highlight={13,17} -import { Sandbox, FilesystemEventType } from '@e2b/code-interpreter' - -const sandbox = await Sandbox.create() -const dirname = '/home/user' - -// Start watching directory for changes -const handle = await sandbox.files.watchDir(dirname, async (event) => { - console.log(event) - if (event.type === FilesystemEventType.WRITE) { - console.log(`wrote to file ${event.name}`) - } -}, { - recursive: true -}) - -// Trigger file write event -await sandbox.files.write(`${dirname}/my-folder/my-file`, 'hello') -``` -```python Python highlight={7,9} -from e2b_code_interpreter import Sandbox, FilesystemEventType - -sandbox = Sandbox.create() -dirname = '/home/user' - -# Watch directory for changes -handle = sandbox.files.watch_dir(dirname, recursive=True) -# Trigger file write event -sandbox.files.write(f"{dirname}/my-folder/my-file", "hello") - -# Retrieve the latest new events since the last `get_new_events()` call -events = handle.get_new_events() -for event in events: - print(event) - if event.type == FilesystemEventType.WRITE: - print(f"wrote to file {event.name}") -``` - diff --git a/docs/mcp.mdx b/docs/mcp.mdx deleted file mode 100644 index 8c0b969c..00000000 --- a/docs/mcp.mdx +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Overview -description: Connect to 200+ tools through the Model Context Protocol ---- - -E2B provides a batteries-included MCP gateway that runs inside sandboxes, giving you type-safe access to 200+ MCP tools from the [Docker MCP Catalog](https://hub.docker.com/mcp) or [custom MCPs](/docs/mcp/custom-servers) through a unified interface. This integration gives developers instant access to tools like [Browserbase](https://www.browserbase.com/), [Exa](https://exa.ai/), [Notion](https://www.notion.so/), [Stripe](https://stripe.com/), or [GitHub](https://github.com/). - -The [Model Context Protocol (MCP)](https://modelcontextprotocol.io/docs/getting-started/intro) is an open standard for connecting AI models to external tools and data sources. E2B sandboxes provide an ideal environment for running MCP tools, giving AI full access to an internet-connected Linux machine where it can safely install packages, write files, run terminal commands, and AI-generated code. - - - - - - - -```typescript TypeScript -import Sandbox from 'e2b' - -const sbx = await Sandbox.create({ - mcp: { - browserbase: { - apiKey: process.env.BROWSERBASE_API_KEY!, - geminiApiKey: process.env.GEMINI_API_KEY!, - projectId: process.env.BROWSERBASE_PROJECT_ID!, - }, - exa: { - apiKey: process.env.EXA_API_KEY!, - }, - airtable: { - airtableApiKey: process.env.AIRTABLE_API_KEY!, - }, - }, -}); - -const mcpUrl = sbx.getMcpUrl(); -const mcpToken = await sbx.getMcpToken(); - -// You can now connect the gateway to any MCP client, for example claude: -// This also works for your local claude! -await sbx.commands.run(`claude mcp add --transport http e2b-mcp-gateway ${mcpUrl} --header "Authorization: Bearer ${mcpToken}"`, { timeoutMs: 0, onStdout: console.log, onStderr: console.log }); - -await sbx.commands.run( - `echo 'Use browserbase and exa to research open positions at e2b.dev. Collect your findings in Airtable.' | claude -p --dangerously-skip-permissions`, - { timeoutMs: 0, onStdout: console.log, onStderr: console.log } -) - -``` - -```python Python -import asyncio -from e2b import AsyncSandbox -import os -import dotenv - -dotenv.load_dotenv() - -async def main(): - sbx = await AsyncSandbox.create(mcp={ - "browserbase": { - "apiKey": os.getenv("BROWSERBASE_API_KEY"), - "geminiApiKey": os.getenv("GEMINI_API_KEY"), - "projectId": os.getenv("BROWSERBASE_PROJECT_ID"), - }, - "exa": { - "apiKey": os.getenv("EXA_API_KEY"), - }, - "airtable": { - "airtableApiKey": os.getenv("AIRTABLE_API_KEY"), - }, - }) - - mcp_url = sbx.get_mcp_url() - mcp_token = await sbx.get_mcp_token() - - # You can now connect the gateway to any MCP client, for example claude: - # This also works for your local claude! - await sbx.commands.run(f'claude mcp add --transport http e2b-mcp-gateway {mcp_url} --header "Authorization: Bearer {mcp_token}"', timeout=0, on_stdout=print, on_stderr=print) - - await sbx.commands.run( - "echo 'Use browserbase and exa to research open positions at e2b.dev. Collect your findings in Airtable.' | claude -p --dangerously-skip-permissions", - timeout=0, on_stdout=print, on_stderr=print - ) - -if __name__ == "__main__": - asyncio.run(main()) -``` - - - -## Documentation - - - - Get started with MCP - - - Browse 200+ pre-built MCP servers - - - Prepull MCP servers for faster runtime - - - Use custom MCP servers from GitHub - - - See examples - - diff --git a/docs/mcp/available-servers.mdx b/docs/mcp/available-servers.mdx deleted file mode 100644 index 7cf25ad5..00000000 --- a/docs/mcp/available-servers.mdx +++ /dev/null @@ -1,3547 +0,0 @@ ---- -title: Available servers -description: Browse available MCP servers ---- - -E2B provides access to 200+ MCP servers from [Docker's catalog](https://hub.docker.com/mcp). You can also run [custom MCP servers](/docs/mcp/custom-servers) inside the sandbox. - - -## Airtable - -Provides AI assistants with direct access to Airtable bases, allowing them to read schemas, query records, and interact with your Airtable data. Supports listing bases, retrieving table structures, and searching through records to help automate workflows and answer questions about your organized data. - -[View on Docker Hub](https://hub.docker.com/mcp/server/airtable-mcp-server/overview) - - - - - - - - - - - - -## Azure Kubernetes Service (AKS) - -Azure Kubernetes Service (AKS) official MCP server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/aks/overview) - - - - - Access level for the MCP server, One of [ readonly, readwrite, admin ] - - - - Comma-separated list of additional tools, One of [ helm, cilium ] - - - - Comma-separated list of namespaces to allow access to. If not specified, all namespaces are allowed. - - - - Path to the Azure configuration directory (e.g. /home/azureuser/.azure). Used for Azure CLI authentication, you should be logged in (e.g. run `az login`) on the host before starting the MCP server. - - - - Username or UID of the container user (format ``[:``] e.g. 10000), ensuring correct permissions to access the Azure and kubeconfig files. Leave empty to use default user in the container. - - - - Path to the kubeconfig file for the AKS cluster (e.g. /home/azureuser/.kube/config). Used to connect to the AKS cluster. - - - - - -## Apify - -Apify is the world's largest marketplace of tools for web scraping, data extraction, and web automation. You can extract structured data from social media, e-commerce, search engines, maps, travel sites, or any other website. - -[View on Docker Hub](https://hub.docker.com/mcp/server/apify-mcp-server/overview) - - - - - - - - Comma-separated list of tools to enable. Can be either a tool category, a specific tool, or an Apify Actor. For example: "actors,docs,apify/rag-web-browser". For more details visit https://mcp.apify.com. - - - - - -## Api-gateway - -A universal MCP (Model Context Protocol) server to integrate any API with Claude Desktop using only Docker configurations. - -[View on Docker Hub](https://hub.docker.com/mcp/server/mcp-api-gateway/overview) - - - - - - - - - - - - - - - -## ArXiv -The ArXiv MCP Server provides a comprehensive bridge between AI assistants and arXiv's research repository through the Model Context Protocol (MCP). - -**Features:** -- Search arXiv papers with advanced filtering -- Download and store papers locally as markdown -- Read and analyze paper content -- Deep research analysis prompts -- Local paper management and storage -- Enhanced tool descriptions optimized for local AI models -- Docker MCP Gateway compatible with detailed context - -[View on Docker Hub](https://hub.docker.com/mcp/server/arxiv-mcp-server/overview) - - - - - Directory path where downloaded papers will be stored - - - - -The ArXiv MCP Server provides a comprehensive bridge between AI assistants and arXiv's research repository through the Model Context Protocol (MCP). Features: • Search arXiv papers with advanced filtering • Download and store papers locally as markdown • Read and analyze paper content • Deep research analysis prompts • Local paper management and storage • Enhanced tool descriptions optimized for local AI models • Docker MCP Gateway compatible with detailed context Perfect for researchers, academics, and AI assistants conducting literature reviews and research analysis. **Recent Update**: Enhanced tool descriptions specifically designed to resolve local AI model confusion and improve Docker MCP Gateway compatibility. - -## ast-grep - -ast-grep is a fast and polyglot tool for code structural search, lint, rewriting at large scale. - -[View on Docker Hub](https://hub.docker.com/mcp/server/ast-grep/overview) - - - - - - - - - -## Astra DB - -An MCP server for Astra DB workloads. - -[View on Docker Hub](https://hub.docker.com/mcp/server/astra-db/overview) - - - - - - - - - - - - -## Astro Docs - -Access the latest Astro web framework documentation, guides, and API references. - -[View on Docker Hub](https://hub.docker.com/mcp/server/astro-docs/overview) - - - - -## Atlan - -MCP server for interacting with Atlan services including asset search, updates, and lineage traversal for comprehensive data governance and discovery. - -[View on Docker Hub](https://hub.docker.com/mcp/server/atlan/overview) - - - - - - - - - - - - -## Atlas Docs - -Provide LLMs hosted, clean markdown documentation of libraries and frameworks. - -[View on Docker Hub](https://hub.docker.com/mcp/server/atlas-docs/overview) - - - - - - - - - -## Atlassian - -Tools for Atlassian products (Confluence and Jira). This integration supports both Atlassian Cloud and Jira Server/Data Center deployments. - -[View on Docker Hub](https://hub.docker.com/mcp/server/atlassian/overview) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## Audiense Insights - -Audiense Insights MCP Server is a server based on the Model Context Protocol (MCP) that allows Claude and other MCP-compatible clients to interact with your Audiense Insights account. - -[View on Docker Hub](https://hub.docker.com/mcp/server/audiense-insights/overview) - - - - - - - - - - - - - - - -## AWS CDK - -AWS Cloud Development Kit (CDK) best practices, infrastructure as code patterns, and security compliance with CDK Nag. - -[View on Docker Hub](https://hub.docker.com/mcp/server/aws-cdk-mcp-server/overview) - - - - -## AWS Core - -Starting point for using the awslabs MCP servers. - -[View on Docker Hub](https://hub.docker.com/mcp/server/aws-core-mcp-server/overview) - - - - -## AWS Diagram - -Seamlessly create diagrams using the Python diagrams package DSL. This server allows you to generate AWS diagrams, sequence diagrams, flow diagrams, and class diagrams using Python code. - -[View on Docker Hub](https://hub.docker.com/mcp/server/aws-diagram/overview) - - - - -## AWS Documentation - -Tools to access AWS documentation, search for content, and get recommendations. - -[View on Docker Hub](https://hub.docker.com/mcp/server/aws-documentation/overview) - - - - -## AWS KB Retrieval (Archived) - -An MCP server implementation for retrieving information from the AWS Knowledge Base using the Bedrock Agent Runtime. - -[View on Docker Hub](https://hub.docker.com/mcp/server/aws-kb-retrieval-server/overview) - - - - - - - - - - - - -## AWS Terraform - -Terraform on AWS best practices, infrastructure as code patterns, and security compliance with Checkov. - -[View on Docker Hub](https://hub.docker.com/mcp/server/aws-terraform/overview) - - - - -## Azure - -The Azure MCP Server, bringing the power of Azure to your agents. - -[View on Docker Hub](https://hub.docker.com/mcp/server/azure/overview) - - - - -## Beagle security - -Connects with the Beagle Security backend using a user token to manage applications, run automated security tests, track vulnerabilities across environments, and gain intelligence from Application and API vulnerability data. - -[View on Docker Hub](https://hub.docker.com/mcp/server/beagle-security/overview) - - - - - - - - - -## Bitrefill - -A Model Context Protocol Server connector for Bitrefill public API, to enable AI agents to search and shop on Bitrefill. - -[View on Docker Hub](https://hub.docker.com/mcp/server/bitrefill/overview) - - - - - - - - - - - - -## Box - -An MCP server capable of interacting with the Box API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/box/overview) - - - - - - - - - - - - -## Brave Search - -Search the Web for pages, images, news, videos, and more using the Brave Search API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/brave/overview) - - - - - - - - - -## Browserbase - -Allow LLMs to control a browser with Browserbase and Stagehand for AI-powered web automation, intelligent data extraction, and screenshot capture. - -[View on Docker Hub](https://hub.docker.com/mcp/server/browserbase/overview) - - - - - - - - - - - - - - - -## Buildkite - -Buildkite MCP lets agents interact with Buildkite Builds, Jobs, Logs, Packages and Test Suites. - -[View on Docker Hub](https://hub.docker.com/mcp/server/buildkite/overview) - - - - - - - - - -## Camunda BPM process engine - -Tools to interact with the Camunda 7 Community Edition Engine using the Model Context Protocol (MCP). Whether you're automating workflows, querying process instances, or integrating with external systems, Camunda MCP Server is your agentic solution for seamless interaction with Camunda. - -[View on Docker Hub](https://hub.docker.com/mcp/server/camunda/overview) - - - - - - - - - -## CData Connect Cloud - -This fully functional MCP Server allows you to connect to any data source in Connect Cloud from Claude Desktop. - -[View on Docker Hub](https://hub.docker.com/mcp/server/cdata-connectcloud/overview) - - - - - - - - - - - - -## CharmHealth - -An MCP server for CharmHealth EHR that allows LLMs and MCP clients to interact with patient records, encounters, and practice information. - -[View on Docker Hub](https://hub.docker.com/mcp/server/charmhealth-mcp-server/overview) - - - - - - - - - - - - - - - - - - - - - - - - - - - -## Chroma - -A Model Context Protocol (MCP) server implementation that provides database capabilities for Chroma. - -[View on Docker Hub](https://hub.docker.com/mcp/server/chroma/overview) - - - - - - - - - -## CircleCI - -A specialized server implementation for the Model Context Protocol (MCP) designed to integrate with CircleCI's development workflow. This project serves as a bridge between CircleCI's infrastructure and the Model Context Protocol, enabling enhanced AI-powered development experiences. - -[View on Docker Hub](https://hub.docker.com/mcp/server/circleci/overview) - - - - - - - - - - - - -## Official ClickHouse - -Official ClickHouse MCP Server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/clickhouse/overview) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## Close - -Streamline sales processes with integrated calling, email, SMS, and automated workflows for small and scaling businesses. - -[View on Docker Hub](https://hub.docker.com/mcp/server/close/overview) - - - - - - - - - -## Cloudflare Docs - -Access the latest documentation on Cloudflare products such as Workers, Pages, R2, D1, KV. - -[View on Docker Hub](https://hub.docker.com/mcp/server/cloudflare-docs/overview) - - - - -## Cloud Run MCP - -MCP server to deploy apps to Cloud Run. - -[View on Docker Hub](https://hub.docker.com/mcp/server/cloud-run-mcp/overview) - - - - - path to application-default credentials (eg $HOME/.config/gcloud/application_default_credentials.json ) - - - - - -## CockroachDB - -Enable AI agents to manage, monitor, and query CockroachDB using natural language. Perform complex database operations, cluster management, and query execution seamlessly through AI-driven workflows. Integrate effortlessly with MCP clients for scalable and high-performance data operations. - -[View on Docker Hub](https://hub.docker.com/mcp/server/cockroachdb/overview) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## Python Interpreter - -A Python-based execution tool that mimics a Jupyter notebook environment. It accepts code snippets, executes them, and maintains state across sessions — preserving variables, imports, and past results. Ideal for iterative development, debugging, or code execution. - -[View on Docker Hub](https://hub.docker.com/mcp/server/mcp-code-interpreter/overview) - - - - -## Context7 - -Context7 MCP Server -- Up-to-date code documentation for LLMs and AI code editors. - -[View on Docker Hub](https://hub.docker.com/mcp/server/context7/overview) - - - - -## Couchbase - -Couchbase is a distributed document database with a powerful search engine and in-built operational and analytical capabilities. - -[View on Docker Hub](https://hub.docker.com/mcp/server/couchbase/overview) - - - - - Bucket in the Couchbase cluster to use for the MCP server. - - - - Connection string for the Couchbase cluster. - - - - Setting to "true" (default) enables read-only query mode while running SQL++ queries. - - - - - - - Username for the Couchbase cluster with access to the bucket. - - - - - -## The official for Cylera. - -Brings context about device inventory, threats, risks and utilization powered by the Cylera Partner API into an LLM. - -[View on Docker Hub](https://hub.docker.com/mcp/server/cylera-mcp-server/overview) - - - - - - - - - - - - - - - -## Shodan - -A Model Context Protocol server that provides access to Shodan API functionality. - -[View on Docker Hub](https://hub.docker.com/mcp/server/cyreslab-ai-shodan/overview) - - - - - - - - - -## Dappier - -Enable fast, free real-time web search and access premium data from trusted media brands—news, financial markets, sports, entertainment, weather, and more. Build powerful AI agents with Dappier. - -[View on Docker Hub](https://hub.docker.com/mcp/server/dappier/overview) - - - - - - - - - -## Dappier Remote - -Enable fast, free real-time web search and access premium data from trusted media brands—news, financial markets, sports, entertainment, weather, and more. Build powerful AI agents with Dappier. - -[View on Docker Hub](https://hub.docker.com/mcp/server/dappier-remote/overview) - - - - - - - - - -## Dart AI - -Dart AI Model Context Protocol (MCP) server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/dart/overview) - - - - - - - - - - - - -## MCP Database Server - -Comprehensive database server supporting PostgreSQL, MySQL, and SQLite with natural language SQL query capabilities. Enables AI agents to interact with databases through both direct SQL and natural language queries. - -[View on Docker Hub](https://hub.docker.com/mcp/server/database-server/overview) - - - - - Connection string for your database. Examples: SQLite: sqlite+aiosqlite:///data/mydb.db, PostgreSQL: postgresql+asyncpg://user:password@localhost:5432/mydb, MySQL: mysql+aiomysql://user:password@localhost:3306/mydb - - - - - -## Databutton - -Databutton MCP Server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/databutton/overview) - - - - -## DeepWiki - -Tools for fetching and asking questions about GitHub repositories. - -[View on Docker Hub](https://hub.docker.com/mcp/server/deepwiki/overview) - - - - -## Descope - -The Descope Model Context Protocol (MCP) server provides an interface to interact with Descope's Management APIs, enabling the search and retrieval of project-related information. - -[View on Docker Hub](https://hub.docker.com/mcp/server/descope/overview) - - - - - - - - - - - - -## Desktop Commander - -Search, update, manage files and run terminal commands with AI. - -[View on Docker Hub](https://hub.docker.com/mcp/server/desktop-commander/overview) - - - - - List of directories that Desktop Commander can access - - - - - -## DevHub CMS - -DevHub CMS LLM integration through the Model Context Protocol. - -[View on Docker Hub](https://hub.docker.com/mcp/server/devhub-cms/overview) - - - - - - - - - - - - - - - -## Discord - -Interact with the Discord platform. - -[View on Docker Hub](https://hub.docker.com/mcp/server/mcp-discord/overview) - - - - - - - - - -## Docker Hub - -Docker Hub official MCP server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/dockerhub/overview) - - - - - - - - - - - - -## Dodo Payments - -Tools for cross-border payments, taxes, and compliance. - -[View on Docker Hub](https://hub.docker.com/mcp/server/dodo-payments/overview) - - - - - - - - - -## DreamFactory - -DreamFactory is a REST API generation platform with support for hundreds of data sources, including Microsoft SQL Server, MySQL, PostgreSQL, and MongoDB. The DreamFactory MCP Server makes it easy for users to securely interact with their data sources via an MCP client. - -[View on Docker Hub](https://hub.docker.com/mcp/server/dreamfactory-mcp/overview) - - - - - - - - - - - - -## DuckDuckGo - -A Model Context Protocol (MCP) server that provides web search capabilities through DuckDuckGo, with additional features for content fetching and parsing. - -[View on Docker Hub](https://hub.docker.com/mcp/server/duckduckgo/overview) - - - - -## Dynatrace - -This MCP Server allows interaction with the Dynatrace observability platform, brining real-time observability data directly into your development workflow. - -[View on Docker Hub](https://hub.docker.com/mcp/server/dynatrace-mcp-server/overview) - - - - - - - - - - - - - - - -## E2B - -Giving Claude ability to run code with E2B via MCP (Model Context Protocol). - -[View on Docker Hub](https://hub.docker.com/mcp/server/e2b/overview) - - - - - - - - - -## EduBase - -The EduBase MCP server enables Claude and other LLMs to interact with EduBase's comprehensive e-learning platform through the Model Context Protocol (MCP). - -[View on Docker Hub](https://hub.docker.com/mcp/server/edubase/overview) - - - - - - - - - - - - - - - -## Effect MCP - -Tools and resources for writing Effect code in Typescript. - -[View on Docker Hub](https://hub.docker.com/mcp/server/effect-mcp/overview) - - - - -## Elasticsearch - -Interact with your Elasticsearch indices through natural language conversations. - -[View on Docker Hub](https://hub.docker.com/mcp/server/elasticsearch/overview) - - - - - - - - - - - - -## Elevenlabs MCP - -Official ElevenLabs Model Context Protocol (MCP) server that enables interaction with powerful Text to Speech and audio processing APIs. - -[View on Docker Hub](https://hub.docker.com/mcp/server/elevenlabs/overview) - - - - - - - - - - - - -## EverArt (Archived) - -Image generation server using EverArt's API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/everart/overview) - - - - - - - - - -## Exa - -Exa MCP for web search and web crawling!. - -[View on Docker Hub](https://hub.docker.com/mcp/server/exa/overview) - - - - - - - - - -## Explorium B2B Data - -Discover companies, contacts, and business insights—powered by dozens of trusted external data sources. - -[View on Docker Hub](https://hub.docker.com/mcp/server/explorium/overview) - - - - - - - - - -## Fetch (Reference) - -Fetches a URL from the internet and extracts its contents as markdown. - -[View on Docker Hub](https://hub.docker.com/mcp/server/fetch/overview) - - - - -## Fibery - -Interact with your Fibery workspace. - -[View on Docker Hub](https://hub.docker.com/mcp/server/fibery/overview) - - - - - - - - - - - - -## Filesystem (Reference) - -Local filesystem access with configurable allowed paths. - -[View on Docker Hub](https://hub.docker.com/mcp/server/filesystem/overview) - - - - - - - - - -## Find-A-Domain - -Tools for finding domain names. - -[View on Docker Hub](https://hub.docker.com/mcp/server/find-a-domain/overview) - - - - -## Firecrawl - -🔥 Official Firecrawl MCP Server - Adds powerful web scraping and search to Cursor, Claude and any other LLM clients. - -[View on Docker Hub](https://hub.docker.com/mcp/server/firecrawl/overview) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## Firewalla - -Real-time network monitoring, security analysis, and firewall management through 28 specialized tools. Access security alerts, network flows, device status, and firewall rules directly from your Firewalla device. - -[View on Docker Hub](https://hub.docker.com/mcp/server/firewalla-mcp-server/overview) - - - - - Your Firewalla Box Global ID - - - - - - - Your Firewalla MSP domain (e.g., yourdomain.firewalla.net) - - - - - -## FlexPrice - -Official flexprice MCP Server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/flexprice/overview) - - - - - - - - - - - - -## Git (Reference) - -Git repository interaction and automation. - -[View on Docker Hub](https://hub.docker.com/mcp/server/git/overview) - - - - - - - - - -## GitHub (Archived) - -Tools for interacting with the GitHub API, enabling file operations, repository management, search functionality, and more. - -[View on Docker Hub](https://hub.docker.com/mcp/server/github/overview) - - - - - - - - - -## GitHub Chat - -A Model Context Protocol (MCP) for analyzing and querying GitHub repositories using the GitHub Chat API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/github-chat/overview) - - - - - - - - - -## GitHub Official - -Official GitHub MCP Server, by GitHub. Provides seamless integration with GitHub APIs, enabling advanced automation and interaction capabilities for developers and tools. - -[View on Docker Hub](https://hub.docker.com/mcp/server/github-official/overview) - - - - - - - - - -## GitLab (Archived) - -MCP Server for the GitLab API, enabling project management, file operations, and more. - -[View on Docker Hub](https://hub.docker.com/mcp/server/gitlab/overview) - - - - - - - - api url - optional for self-hosted instances - - - - - -## GitMCP - -Tools for interacting with Git repositories. - -[View on Docker Hub](https://hub.docker.com/mcp/server/gitmcp/overview) - - - - -## glif.app - -Easily run glif.app AI workflows inside your LLM: image generators, memes, selfies, and more. Glif supports all major multimedia AI models inside one app. - -[View on Docker Hub](https://hub.docker.com/mcp/server/glif/overview) - - - - - - - - - - - - - - - -## Gmail - -A Model Context Protocol server for Gmail operations using IMAP/SMTP with app password authentication. Supports listing messages, searching emails, and sending messages. To create your app password, visit your Google Account settings under Security > App Passwords. Or visit the link https://myaccount.google.com/apppasswords. - -[View on Docker Hub](https://hub.docker.com/mcp/server/gmail-mcp/overview) - - - - - Your Gmail email address - - - - - - - - -## Google Maps (Archived) - -Tools for interacting with the Google Maps API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/google-maps/overview) - - - - - - - - - -## Google Maps Comprehensive MCP - -Complete Google Maps integration with 8 tools including geocoding, places search, directions, elevation data, and more using Google's latest APIs. - -[View on Docker Hub](https://hub.docker.com/mcp/server/google-maps-comprehensive/overview) - - - - - - - - - -## Grafana - -MCP server for Grafana. - -[View on Docker Hub](https://hub.docker.com/mcp/server/grafana/overview) - - - - - - - - - - - - -## Gyazo - -Official Model Context Protocol server for Gyazo. - -[View on Docker Hub](https://hub.docker.com/mcp/server/gyazo/overview) - - - - - - - - - -## Hackernews mcp - -A Model Context Protocol (MCP) server that provides access to Hacker News stories, comments, and user data, with support for search and content retrieval. - -[View on Docker Hub](https://hub.docker.com/mcp/server/mcp-hackernews/overview) - - - - -## Hackle - -Model Context Protocol server for Hackle. - -[View on Docker Hub](https://hub.docker.com/mcp/server/hackle/overview) - - - - - - - - - -## Handwriting OCR - -Model Context Protocol (MCP) Server for Handwriting OCR. - -[View on Docker Hub](https://hub.docker.com/mcp/server/handwriting-ocr/overview) - - - - - - - - - -## Humanitarian Data Exchange - -HDX MCP Server provides access to humanitarian data through the Humanitarian Data Exchange (HDX) API - https://data.humdata.org/hapi. This server offers 33 specialized tools for retrieving humanitarian information including affected populations (refugees, IDPs, returnees), baseline demographics, food security indicators, conflict data, funding information, and operational presence across hundreds of countries and territories. See repository for instructions on getting a free HDX_APP_INDENTIFIER for access. - -[View on Docker Hub](https://hub.docker.com/mcp/server/hdx/overview) - - - - - - - - - -## Heroku - -Heroku Platform MCP Server using the Heroku CLI. - -[View on Docker Hub](https://hub.docker.com/mcp/server/heroku/overview) - - - - - - - - - -## Hostinger API - -Interact with Hostinger services over the Hostinger API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/hostinger-mcp-server/overview) - - - - - - - - - -## Hoverfly - -A Model Context Protocol (MCP) server that exposes Hoverfly as a programmable tool for AI assistants like Cursor, Claude, GitHub Copilot, and others supporting MCP. It enables dynamic mocking of third-party APIs to unblock development, automate testing, and simulate unavailable services during integration. - -[View on Docker Hub](https://hub.docker.com/mcp/server/hoverfly-mcp-server/overview) - - - - - - - - - -## HubSpot - -Unite marketing, sales, and customer service with AI-powered automation, lead management, and comprehensive analytics. - -[View on Docker Hub](https://hub.docker.com/mcp/server/hubspot/overview) - - - - - - - - - -## Hugging Face - -Tools for interacting with Hugging Face models, datasets, research papers, and more. - -[View on Docker Hub](https://hub.docker.com/mcp/server/hugging-face/overview) - - - - -## Hummingbot MCP: Trading Agent - -Hummingbot MCP is an open-source toolset that lets you control and monitor your Hummingbot trading bots through AI-powered commands and automation. - -[View on Docker Hub](https://hub.docker.com/mcp/server/hummingbot-mcp/overview) - - - - - - - - - - - - - - - -## Husqvarna Automower - -MCP Server for huqsvarna automower. - -[View on Docker Hub](https://hub.docker.com/mcp/server/husqvarna-automower/overview) - - - - - - - - - - - - -## Hyperbrowser - -A MCP server implementation for hyperbrowser. - -[View on Docker Hub](https://hub.docker.com/mcp/server/hyperbrowser/overview) - - - - - - - - - -## Hyperspell - -Hyperspell MCP Server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/hyperspell/overview) - - - - - - - - - - - - - - - -## Iaptic - -Model Context Protocol server for interacting with iaptic. - -[View on Docker Hub](https://hub.docker.com/mcp/server/iaptic/overview) - - - - - - - - - - - - -## Inspektor Gadget - -AI interface to troubleshoot and observe Kubernetes/Container workloads. - -[View on Docker Hub](https://hub.docker.com/mcp/server/inspektor-gadget/overview) - - - - - Comma-separated list of gadget images (trace_dns, trace_tcp, etc) to use, allowing control over which gadgets are available as MCP tools - - - - Path to the kubeconfig file for accessing Kubernetes clusters - - - - - -## Javadocs - -Access to Java, Kotlin, and Scala library documentation. - -[View on Docker Hub](https://hub.docker.com/mcp/server/javadocs/overview) - - - - -## JetBrains - -A model context protocol server to work with JetBrains IDEs: IntelliJ, PyCharm, WebStorm, etc. Also, works with Android Studio. - -[View on Docker Hub](https://hub.docker.com/mcp/server/jetbrains/overview) - - - - - - - - - -## Kafka Schema Registry MCP - -Comprehensive MCP server for Kafka Schema Registry operations. Features multi-registry support, schema contexts, migration tools, OAuth authentication, and 57+ tools for complete schema management. Supports SLIM_MODE for optimal performance. - -[View on Docker Hub](https://hub.docker.com/mcp/server/kafka-schema-reg-mcp/overview) - - - - - Schema Registry URL - - - - - - - - - - Enable SLIM_MODE for better performance - - - - Enable read-only mode - - - - - -## Kagi search - -The Official Model Context Protocol (MCP) server for Kagi search & other tools. - -[View on Docker Hub](https://hub.docker.com/mcp/server/kagisearch/overview) - - - - - - - - - - - - -## Keboola - -Keboola MCP Server is an open-source bridge between your Keboola project and modern AI tools. - -[View on Docker Hub](https://hub.docker.com/mcp/server/keboola-mcp/overview) - - - - - - - - - - - - -## Kong Konnect - -A Model Context Protocol (MCP) server for interacting with Kong Konnect APIs, allowing AI assistants to query and analyze Kong Gateway configurations, traffic, and analytics. - -[View on Docker Hub](https://hub.docker.com/mcp/server/kong/overview) - - - - - - - - - - - - -## Kubectl - -MCP Server that enables AI assistants to interact with Kubernetes clusters via kubectl operations. - -[View on Docker Hub](https://hub.docker.com/mcp/server/kubectl-mcp-server/overview) - - - - - - - - - -## Kubernetes - -Connect to a Kubernetes cluster and manage it. - -[View on Docker Hub](https://hub.docker.com/mcp/server/kubernetes/overview) - - - - - the path to the host .kube/config - - - - - -## Lara Translate - -Connect to Lara Translate API, enabling powerful translation capabilities with support for language detection and context-aware translations. - -[View on Docker Hub](https://hub.docker.com/mcp/server/lara/overview) - - - - - - - - - - - - -## LINE - -MCP server that integrates the LINE Messaging API to connect an AI Agent to the LINE Official Account. - -[View on Docker Hub](https://hub.docker.com/mcp/server/line/overview) - - - - - - - - - - - - -## LinkedIn - -This MCP server allows Claude and other AI assistants to access your LinkedIn. Scrape LinkedIn profiles and companies, get your recommended jobs, and perform job searches. Set your li_at LinkedIn cookie to use this server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/linkedin-mcp-server/overview) - - - - - - - - Custom user agent string (optional, helps avoid detection and cookie login issues) - - - - - -## LLM Text - -Discovers and retrieves llms.txt from websites. - -[View on Docker Hub](https://hub.docker.com/mcp/server/llmtxt/overview) - - - - -## Maestro - -A Model Context Protocol (MCP) server exposing Bitcoin blockchain data through the Maestro API platform. Provides tools to explore blocks, transactions, addresses, inscriptions, runes, and other metaprotocol data. - -[View on Docker Hub](https://hub.docker.com/mcp/server/maestro-mcp-server/overview) - - - - - - - - - -## Manifold - -Tools for accessing the Manifold Markets online prediction market platform. - -[View on Docker Hub](https://hub.docker.com/mcp/server/manifold/overview) - - - - -## Mapbox - -Transform any AI agent into a geospatially-aware system with Mapbox APIs. Provides geocoding, POI search, routing, travel time matrices, isochrones, and static map generation. - -[View on Docker Hub](https://hub.docker.com/mcp/server/mapbox/overview) - - - - - - - - - -## Mapbox Developer - -Direct access to Mapbox developer APIs for AI assistants. Enables style management, token management, GeoJSON preview, and other developer tools for building Mapbox applications. - -[View on Docker Hub](https://hub.docker.com/mcp/server/mapbox-devkit/overview) - - - - - - - - - -## Markdownify - -A Model Context Protocol server for converting almost anything to Markdown. - -[View on Docker Hub](https://hub.docker.com/mcp/server/markdownify/overview) - - - - - - - - - -## Markitdown - -A lightweight MCP server for calling MarkItDown. - -[View on Docker Hub](https://hub.docker.com/mcp/server/markitdown/overview) - - - - - - - - - -## Maven Tools - -JVM dependency intelligence for any build tool using Maven Central Repository. Includes Context7 integration for upgrade documentation and guidance. - -[View on Docker Hub](https://hub.docker.com/mcp/server/maven-tools-mcp/overview) - - - - -## Memory (Reference) - -Knowledge graph-based persistent memory system. - -[View on Docker Hub](https://hub.docker.com/mcp/server/memory/overview) - - - - -## Mercado Libre - -Provides access to Mercado Libre E-Commerce API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/mercado-libre/overview) - - - - - - - - - -## Mercado Pago - -Provides access to Mercado Pago Marketplace API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/mercado-pago/overview) - - - - - - - - - -## Metabase MCP - -A comprehensive MCP server for Metabase with 70+ tools. - -[View on Docker Hub](https://hub.docker.com/mcp/server/metabase/overview) - - - - - - - - - - - - - - - - - - -## Minecraft Wiki - -A MCP Server for browsing the official Minecraft Wiki!. - -[View on Docker Hub](https://hub.docker.com/mcp/server/minecraft-wiki/overview) - - - - -## MongoDB - -A Model Context Protocol server to connect to MongoDB databases and MongoDB Atlas Clusters. - -[View on Docker Hub](https://hub.docker.com/mcp/server/mongodb/overview) - - - - - - - - - -## MultiversX - -MCP Server for MultiversX. - -[View on Docker Hub](https://hub.docker.com/mcp/server/multiversx-mx/overview) - - - - - - - - - - - - -## Nasdaq Data Link - -MCP server to interact with the data feeds provided by the Nasdaq Data Link. Developed by the community and maintained by Stefano Amorelli. - -[View on Docker Hub](https://hub.docker.com/mcp/server/nasdaq-data-link/overview) - - - - - - - - - -## Needle - -Production-ready RAG service to search and retrieve data from your documents. - -[View on Docker Hub](https://hub.docker.com/mcp/server/needle-mcp/overview) - - - - - - - - - -## Neo4j Cloud Aura Api - -Manage Neo4j Aura database instances through the Neo4j Aura API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/neo4j-cloud-aura-api/overview) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## Neo4j Cypher - -Interact with Neo4j using Cypher graph queries. - -[View on Docker Hub](https://hub.docker.com/mcp/server/neo4j-cypher/overview) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## Neo4j Data Modeling - -MCP server that assists in creating, validating and visualizing graph data models. - -[View on Docker Hub](https://hub.docker.com/mcp/server/neo4j-data-modeling/overview) - - - - - - - - - - - - - - - - - - - - - - - - -## Neo4j Memory - -Provide persistent memory capabilities through Neo4j graph database integration. - -[View on Docker Hub](https://hub.docker.com/mcp/server/neo4j-memory/overview) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## Neon - -MCP server for interacting with Neon Management API and databases. - -[View on Docker Hub](https://hub.docker.com/mcp/server/neon/overview) - - - - - - - - - -## Node.js Sandbox - -A Node.js–based Model Context Protocol server that spins up disposable Docker containers to execute arbitrary JavaScript. - -[View on Docker Hub](https://hub.docker.com/mcp/server/node-code-sandbox/overview) - - - - -## Notion - -Official Notion MCP Server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/notion/overview) - - - - - - - - - -## Novita - -Seamless interaction with Novita AI platform resources. - -[View on Docker Hub](https://hub.docker.com/mcp/server/novita/overview) - - - - -## NPM Sentinel - -MCP server that enables intelligent NPM package analysis powered by AI. - -[View on Docker Hub](https://hub.docker.com/mcp/server/npm-sentinel/overview) - - - - -## Obsidian - -MCP server that interacts with Obsidian via the Obsidian rest API community plugin. - -[View on Docker Hub](https://hub.docker.com/mcp/server/obsidian/overview) - - - - - - - - - -## Okta - -Secure Okta identity and access management via Model Context Protocol (MCP). Access Okta users, groups, applications, logs, and policies through AI assistants with enterprise-grade security. - -[View on Docker Hub](https://hub.docker.com/mcp/server/okta-mcp-fctr/overview) - - - - - Okta organization URL (e.g., https://dev-123456.okta.com) - - - - Maximum concurrent requests to Okta API - - - - Logging level for server output - - - - - - - - -## omi-mcp - -A Model Context Protocol server for Omi interaction and automation. This server provides tools to read, search, and manipulate Memories and Conversations. - -[View on Docker Hub](https://hub.docker.com/mcp/server/omi/overview) - - - - - - - - - -## ONLYOFFICE DocSpace - -ONLYOFFICE DocSpace is a room-based collaborative platform which allows organizing a clear file structure depending on users' needs or project goals. - -[View on Docker Hub](https://hub.docker.com/mcp/server/onlyoffice-docspace/overview) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## OpenAPI Toolkit for MCP - -Fetch, validate, and generate code or curl from any OpenAPI or Swagger spec - all from a single URL. - -[View on Docker Hub](https://hub.docker.com/mcp/server/openapi/overview) - - - - - - - - - -## OpenAPI Schema - -OpenAPI Schema Model Context Protocol Server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/openapi-schema/overview) - - - - - - - - - -## Airbnb Search - -MCP Server for searching Airbnb and get listing details. - -[View on Docker Hub](https://hub.docker.com/mcp/server/openbnb-airbnb/overview) - - - - -## OpenMesh - -Discover and connect to a curated marketplace of MCP servers for extending AI agent capabilities. - -[View on Docker Hub](https://hub.docker.com/mcp/server/openmesh/overview) - - - - -## Openweather - -A simple MCP service that provides current weather and 5-day forecast using the free OpenWeatherMap API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/openweather/overview) - - - - - - - - - -## OpenZeppelin Cairo Contracts - -Access to OpenZeppelin Cairo Contracts. - -[View on Docker Hub](https://hub.docker.com/mcp/server/openzeppelin-cairo/overview) - - - - -## OpenZeppelin Solidity Contracts - -Access to OpenZeppelin Solidity Contracts. - -[View on Docker Hub](https://hub.docker.com/mcp/server/openzeppelin-solidity/overview) - - - - -## OpenZeppelin Stellar Contracts - -Access to OpenZeppelin Stellar Contracts. - -[View on Docker Hub](https://hub.docker.com/mcp/server/openzeppelin-stellar/overview) - - - - -## OpenZeppelin Stylus Contracts - -Access to OpenZeppelin Stylus Contracts. - -[View on Docker Hub](https://hub.docker.com/mcp/server/openzeppelin-stylus/overview) - - - - -## Opik - -Model Context Protocol (MCP) implementation for Opik enabling seamless IDE integration and unified access to prompts, projects, traces, and metrics. - -[View on Docker Hub](https://hub.docker.com/mcp/server/opik/overview) - - - - - - - - - - - - - - - -## Opine - -A Model Context Protocol (MCP) server for querying deals and evaluations from the Opine CRM API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/opine-mcp-server/overview) - - - - - - - - - -## Oracle Database - -Connect to Oracle databases via MCP, providing secure read-only access with support for schema exploration, query execution, and metadata inspection. - -[View on Docker Hub](https://hub.docker.com/mcp/server/oracle/overview) - - - - - - - - - - - - - - - -## OSP Marketing Tools - -A Model Context Protocol (MCP) server that empowers LLMs to use some of Open Srategy Partners' core writing and product marketing techniques. - -[View on Docker Hub](https://hub.docker.com/mcp/server/osp_marketing_tools/overview) - - - - -## Oxylabs - -A Model Context Protocol (MCP) server that enables AI assistants like Claude to seamlessly access web data through Oxylabs' powerful web scraping technology. - -[View on Docker Hub](https://hub.docker.com/mcp/server/oxylabs/overview) - - - - - - - - - - - - -## Paper Search - -A MCP for searching and downloading academic papers from multiple sources like arXiv, PubMed, bioRxiv, etc. - -[View on Docker Hub](https://hub.docker.com/mcp/server/paper-search/overview) - - - - -## Perplexity - -Connector for Perplexity API, to enable real-time, web-wide research. - -[View on Docker Hub](https://hub.docker.com/mcp/server/perplexity-ask/overview) - - - - - - - - - -## Program Integrity Alliance - -An MCP server to help make U.S. Government open datasets AI-friendly. - -[View on Docker Hub](https://hub.docker.com/mcp/server/pia/overview) - - - - - - - - - -## Pinecone Assistant - -Pinecone Assistant MCP server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/pinecone/overview) - - - - - - - - - - - - -## ExecuteAutomation Playwright MCP - -Playwright Model Context Protocol Server - Tool to automate Browsers and APIs in Claude Desktop, Cline, Cursor IDE and More 🔌. - -[View on Docker Hub](https://hub.docker.com/mcp/server/playwright-mcp-server/overview) - - - - - - - - - -## Plugged.in MCP Proxy - -A unified MCP proxy that aggregates multiple MCP servers into one interface, enabling seamless tool discovery and management across all your AI interactions. Manage all your MCP servers from a single connection point with RAG capabilities and real-time notifications. - -[View on Docker Hub](https://hub.docker.com/mcp/server/pluggedin-mcp-proxy/overview) - - - - - Base URL for the Plugged.in API (optional, defaults to https://plugged.in for cloud or http://localhost:12005 for self-hosted) - - - - - - - - -## Polar Signals - -MCP server for Polar Signals Cloud continuous profiling platform, enabling AI assistants to analyze CPU performance, memory usage, and identify optimization opportunities in production systems. - -[View on Docker Hub](https://hub.docker.com/mcp/server/polar-signals/overview) - - - - - - - - - -## PomoDash - -Connect your AI assistant to PomoDash for seamless task and project management. - -[View on Docker Hub](https://hub.docker.com/mcp/server/pomodash/overview) - - - - - - - - - -## PostgreSQL readonly (Archived) - -Connect with read-only access to PostgreSQL databases. This server enables LLMs to inspect database schemas and execute read-only queries. - -[View on Docker Hub](https://hub.docker.com/mcp/server/postgres/overview) - - - - - - - - - -## Postman - -Postman's MCP server connects AI agents, assistants, and chatbots directly to your APIs on Postman. Use natural language to prompt AI to automate work across your Postman collections, environments, workspaces, and more. - -[View on Docker Hub](https://hub.docker.com/mcp/server/postman/overview) - - - - - - - - - -## Pref Editor - -Pref Editor is a tool for viewing and editing Android app preferences during development. - -[View on Docker Hub](https://hub.docker.com/mcp/server/pref-editor/overview) - - - - -## Prometheus - -A Model Context Protocol (MCP) server that enables AI assistants to query and analyze Prometheus metrics through standardized interfaces. Connect to your Prometheus instance to retrieve metrics, perform queries, and gain insights into your system's performance and health. - -[View on Docker Hub](https://hub.docker.com/mcp/server/prometheus/overview) - - - - - The URL of your Prometheus server - - - - - -## Puppeteer (Archived) - -Browser automation and web scraping using Puppeteer. - -[View on Docker Hub](https://hub.docker.com/mcp/server/puppeteer/overview) - - - - -## Python Refactoring Assistant - -Educational Python refactoring assistant that provides guided suggestions for AI assistants. Features: • Step-by-step refactoring instructions without modifying code • Comprehensive code analysis using professional tools (Rope, Radon, Vulture, Jedi, LibCST, Pyrefly) • Educational approach teaching refactoring patterns through guided practice • Support for both guide-only and apply-changes modes • Identifies long functions, high complexity, dead code, and type issues • Provides precise line numbers and specific refactoring instructions • Compatible with all AI assistants (Claude, GPT, Cursor, Continue, etc.) Perfect for developers learning refactoring patterns while maintaining full control over code changes. Acts as a refactoring mentor rather than an automated code modifier. - -[View on Docker Hub](https://hub.docker.com/mcp/server/mcp-python-refactoring/overview) - - - - -## QuantConnect - -The QuantConnect MCP Server is a bridge for AIs (such as Claude and OpenAI o3 Pro) to interact with our cloud platform. When equipped with our MCP, the AI can perform tasks on your behalf through our API such as updating projects, writing strategies, backtesting, and deploying strategies to production live-trading. - -[View on Docker Hub](https://hub.docker.com/mcp/server/quantconnect/overview) - - - - - - - - - - - - - - - -## Ramparts MCP Security Scanner - -A comprehensive security scanner for MCP servers with YARA rules and static analysis capabilities. - -[View on Docker Hub](https://hub.docker.com/mcp/server/ramparts/overview) - - - - -## Razorpay - -Razorpay's Official MCP Server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/razorpay/overview) - - - - - - - - - - - - -## Mcp reddit - -A comprehensive Model Context Protocol (MCP) server for Reddit integration. This server enables AI agents to interact with Reddit programmatically through a standardized interface. - -[View on Docker Hub](https://hub.docker.com/mcp/server/mcp-reddit/overview) - - - - - - - - - - - - - - - - - - -## Redis - -Access to Redis database operations. - -[View on Docker Hub](https://hub.docker.com/mcp/server/redis/overview) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## Redis Cloud - -MCP Server for Redis Cloud's API, allowing you to manage your Redis Cloud resources using natural language. - -[View on Docker Hub](https://hub.docker.com/mcp/server/redis-cloud/overview) - - - - - - - - - - - - -## Ref - up-to-date docs - -Ref powerful search tool connets your coding tools with documentation context. It includes an up-to-date index of public documentation and it can ingest your private documentation (eg. GitHub repos, PDFs) as well. - -[View on Docker Hub](https://hub.docker.com/mcp/server/ref/overview) - - - - - - - - - -## Remote MCP - -Tools for finding remote MCP servers. - -[View on Docker Hub](https://hub.docker.com/mcp/server/remote-mcp/overview) - - - - -## Render - -Interact with your Render resources via LLMs. - -[View on Docker Hub](https://hub.docker.com/mcp/server/render/overview) - - - - - - - - - -## Send emails - -Send emails directly from Cursor with this email sending MCP server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/resend/overview) - - - - - - - - comma separated list of reply to email addresses - - - - sender email address - - - - - -## RISKEN - -RISKEN's official MCP Server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/risken/overview) - - - - - - - - - - - - -## Root.io Vulnerability Remediation MCP - -MCP server that provides container image vulnerability scanning and remediation capabilities through Root.io. - -[View on Docker Hub](https://hub.docker.com/mcp/server/root/overview) - - - - - - - - - -## WiseVision ROS2 - -Python server implementing Model Context Protocol (MCP) for ROS2. - -[View on Docker Hub](https://hub.docker.com/mcp/server/ros2/overview) - - - - -## Rube - -Access to Rube's catalog of remote MCP servers. - -[View on Docker Hub](https://hub.docker.com/mcp/server/rube/overview) - - - - - - - - - -## Blazing-fast, asynchronous for seamless filesystem operations. - -The Rust MCP Filesystem is a high-performance, asynchronous, and lightweight Model Context Protocol (MCP) server built in Rust for secure and efficient filesystem operations. Designed with security in mind, it operates in read-only mode by default and restricts clients from updating allowed directories via MCP Roots unless explicitly enabled, ensuring robust protection against unauthorized access. Leveraging asynchronous I/O, it delivers blazingly fast performance with a minimal resource footprint. Optimized for token efficiency, the Rust MCP Filesystem enables large language models (LLMs) to precisely target searches and edits within specific sections of large files and restrict operations by file size range, making it ideal for efficient file exploration, automation, and system integration. - -[View on Docker Hub](https://hub.docker.com/mcp/server/rust-mcp-filesystem/overview) - - - - - Enable read/write mode. If false, the app operates in read-only mode. - - - - List of directories that rust-mcp-filesystem can access. - - - - Enable dynamic directory access control via MCP client-side Roots. - - - - - -## SchemaCrawler AI - -The SchemaCrawler AI MCP Server enables natural language interaction with your database schema using an MCP client in "Agent" mode. It allows users to explore tables, columns, foreign keys, triggers, stored procedures and more simply by asking questions like "Explain the code for the interest calculation stored procedure". You can also ask it to help with SQL, since it knows your schema. This is ideal for developers, DBAs, and data analysts who want to streamline schema comprehension and query development without diving into dense documentation. - -[View on Docker Hub](https://hub.docker.com/mcp/server/schemacrawler-ai/overview) - - - - - --info-level How much database metadata to retrieve - - - - - - - - - - - - - --database Database to connect to (optional) - - - - --host Database host (optional) - - - - --port Database port (optional) - - - - --server SchemaCrawler database plugin - - - - --url JDBC URL for database connection - - - - Host volume to map within the Docker container - - - - - -## Schogini MCP Image Border - -This adds a border to an image and returns base64 encoded image. - -[View on Docker Hub](https://hub.docker.com/mcp/server/schogini-mcp-image-border/overview) - - - - -## ScrapeGraph - -ScapeGraph MCP Server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/scrapegraph/overview) - - - - - - - - - -## Scrapezy - -A Model Context Protocol server for Scrapezy that enables AI models to extract structured data from websites. - -[View on Docker Hub](https://hub.docker.com/mcp/server/scrapezy/overview) - - - - - - - - - -## Securenote.link - -SecureNote.link MCP Server - allowing AI agents to securely share sensitive information through end-to-end encrypted notes. - -[View on Docker Hub](https://hub.docker.com/mcp/server/securenote-link-mcp-server/overview) - - - - -## Semgrep - -MCP server for using Semgrep to scan code for security vulnerabilities. - -[View on Docker Hub](https://hub.docker.com/mcp/server/semgrep/overview) - - - - -## Sentry (Archived) - -A Model Context Protocol server for retrieving and analyzing issues from Sentry.io. This server provides tools to inspect error reports, stacktraces, and other debugging information from your Sentry account. - -[View on Docker Hub](https://hub.docker.com/mcp/server/sentry/overview) - - - - - - - - - -## Sequa.AI - -Stop stitching context for Copilot and Cursor. With Sequa MCP, your AI tools know your entire codebase and docs out of the box. - -[View on Docker Hub](https://hub.docker.com/mcp/server/sequa/overview) - - - - - - - - - - - - -## Sequential Thinking (Reference) - -Dynamic and reflective problem-solving through thought sequences. - -[View on Docker Hub](https://hub.docker.com/mcp/server/sequentialthinking/overview) - - - - -## Short.io - -Access to Short.io's link shortener and analytics tools. - -[View on Docker Hub](https://hub.docker.com/mcp/server/short-io/overview) - - - - - - - - - -## SimpleCheckList - -Advanced SimpleCheckList with MCP server and SQLite database for comprehensive task management. Features: • Complete project and task management system • Hierarchical organization (Projects → Groups → Task Lists → Tasks → Subtasks) • SQLite database for data persistence • RESTful API with comprehensive endpoints • MCP protocol compliance for AI assistant integration • Docker-optimized deployment with stability improvements **v1.0.1 Update**: Enhanced Docker stability with improved container lifecycle management. Default mode optimized for containerized deployment with reliable startup and shutdown processes. Perfect for AI assistants managing complex project workflows and task hierarchies. - -[View on Docker Hub](https://hub.docker.com/mcp/server/simplechecklist/overview) - - - - -## Singlestore - -MCP server for interacting with SingleStore Management API and services. - -[View on Docker Hub](https://hub.docker.com/mcp/server/singlestore/overview) - - - - - - - - - -## Slack (Archived) - -Interact with Slack Workspaces over the Slack API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/slack/overview) - - - - - - - - - - - - - - - -## SmartBear - -MCP server for AI access to SmartBear tools, including BugSnag, Reflect, API Hub, PactFlow. - -[View on Docker Hub](https://hub.docker.com/mcp/server/smartbear/overview) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## SonarQube - -Interact with SonarQube Cloud, Server and Community build over the web API. Analyze code to identify quality and security issues. - -[View on Docker Hub](https://hub.docker.com/mcp/server/sonarqube/overview) - - - - - Organization key for SonarQube Cloud, not required for SonarQube Server or Community Build - - - - - - - URL of the SonarQube instance, to provide only for SonarQube Server or Community Build - - - - - -## SQLite (Archived) - -Database interaction and business intelligence capabilities. - -[View on Docker Hub](https://hub.docker.com/mcp/server/SQLite/overview) - - - - -## StackGen - -AI-powered DevOps assistant for managing cloud infrastructure and applications. - -[View on Docker Hub](https://hub.docker.com/mcp/server/stackgen/overview) - - - - - - - - URL of your StackGen instance - - - - - -## StackHawk - -A Model Context Protocol (MCP) server for integrating with StackHawk's security scanning platform. Provides security analytics, YAML configuration management, sensitive data/threat surface analysis, and anti-hallucination tools for LLMs. - -[View on Docker Hub](https://hub.docker.com/mcp/server/stackhawk/overview) - - - - - - - - - -## Stripe - -Interact with Stripe services over the Stripe API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/stripe/overview) - - - - - - - - - -## Supadata - -Official Supadata MCP Server - Adds powerful video & web scraping to Cursor, Claude and any other LLM clients. - -[View on Docker Hub](https://hub.docker.com/mcp/server/supadata/overview) - - - - - - - - - -## Suzieq MCP - -MCP Server to interact with a SuzieQ network observability instance via its REST API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/suzieq/overview) - - - - - - - - - - - - -## Task orchestrator - -Model Context Protocol (MCP) server for comprehensive task and feature management, providing AI assistants with a structured, context-efficient way to interact with project data. - -[View on Docker Hub](https://hub.docker.com/mcp/server/task-orchestrator/overview) - - - - -## Tavily - -The Tavily MCP server provides seamless interaction with the tavily-search and tavily-extract tools, real-time web search capabilities through the tavily-search tool and Intelligent data extraction from web pages via the tavily-extract tool. - -[View on Docker Hub](https://hub.docker.com/mcp/server/tavily/overview) - - - - - - - - - -## Teamwork - -Tools for Teamwork.com products. - -[View on Docker Hub](https://hub.docker.com/mcp/server/teamwork/overview) - - - - - - - - - -## Telnyx - -Enables interaction with powerful telephony, messaging, and AI assistant APIs. - -[View on Docker Hub](https://hub.docker.com/mcp/server/telnyx/overview) - - - - - - - - - -## Tembo - -MCP server for Tembo Cloud's platform API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/tembo/overview) - - - - - - - - - -## Hashicorp Terraform - -The Terraform MCP Server provides seamless integration with Terraform ecosystem, enabling advanced automation and interaction capabilities for Infrastructure as Code (IaC) development. - -[View on Docker Hub](https://hub.docker.com/mcp/server/terraform/overview) - - - - -## Text-to-GraphQL - -Transform natural language queries into GraphQL queries using an AI agent. Provides schema management, query validation, execution, and history tracking. - -[View on Docker Hub](https://hub.docker.com/mcp/server/text-to-graphql/overview) - - - - - - - - Authentication method for GraphQL API - - - - - - - OpenAI model to use - - - - Model temperature for responses - - - - - - - - -## Tigris Data - -Tigris is a globally distributed S3-compatible object storage service that provides low latency anywhere in the world, enabling developers to store and access any amount of data for a wide range of use cases. - -[View on Docker Hub](https://hub.docker.com/mcp/server/tigris/overview) - - - - - - - - - - - - - - - -## Time (Reference) - -Time and timezone conversion capabilities. - -[View on Docker Hub](https://hub.docker.com/mcp/server/time/overview) - - - - -## Triplewhale - -Triplewhale MCP Server. - -[View on Docker Hub](https://hub.docker.com/mcp/server/triplewhale/overview) - - - - - - - - - -## Unreal Engine - -A comprehensive Model Context Protocol (MCP) server that enables AI assistants to control Unreal Engine via Remote Control API. Built with TypeScript and designed for game development automation. - -[View on Docker Hub](https://hub.docker.com/mcp/server/unreal-engine-mcp-server/overview) - - - - - Logging level - - - - Unreal Engine host address. Use: host.docker.internal for local UE on Windows/Mac Docker, 127.0.0.1 for Linux without Docker, or actual IP address (e.g., 192.168.1.100) for remote UE - - - - Remote Control HTTP port - - - - Remote Control WebSocket port - - - - - -## VeyraX - -VeyraX MCP is the only connection you need to access all your tools in any MCP-compatible environment. - -[View on Docker Hub](https://hub.docker.com/mcp/server/veyrax/overview) - - - - - - - - - -## Vizro - -provides tools and templates to create a functioning Vizro chart or dashboard step by step. - -[View on Docker Hub](https://hub.docker.com/mcp/server/vizro/overview) - - - - -## Vuln nist - -This MCP server exposes tools to query the NVD/CVE REST API and return formatted text results suitable for LLM consumption via the MCP protocol. It includes automatic query chunking for large date ranges and parallel processing for improved performance. - -[View on Docker Hub](https://hub.docker.com/mcp/server/vuln-nist-mcp-server/overview) - - - - -## Wayfound MCP - -Wayfound’s MCP server allows business users to govern, supervise, and improve AI Agents. - -[View on Docker Hub](https://hub.docker.com/mcp/server/wayfound/overview) - - - - - - - - - -## Webflow - -Model Context Protocol (MCP) server for the Webflow Data API. - -[View on Docker Hub](https://hub.docker.com/mcp/server/webflow/overview) - - - - - - - - - -## Wikipedia - -A Model Context Protocol (MCP) server that retrieves information from Wikipedia to provide context to LLMs. - -[View on Docker Hub](https://hub.docker.com/mcp/server/wikipedia-mcp/overview) - - - - -## WolframAlpha - -Connect your chat repl to wolfram alpha computational intelligence. - -[View on Docker Hub](https://hub.docker.com/mcp/server/wolfram-alpha/overview) - - - - - - - - - -## YouTube transcripts - -Retrieves transcripts for given YouTube video URLs. - -[View on Docker Hub](https://hub.docker.com/mcp/server/youtube_transcript/overview) - - - - -## Zerodha Kite Connect - -MCP server for Zerodha Kite Connect API - India's leading stock broker trading platform. Execute trades, manage portfolios, and access real-time market data for NSE, BSE, and other Indian exchanges. - -[View on Docker Hub](https://hub.docker.com/mcp/server/zerodha-kite/overview) - - - - - Access token obtained after OAuth authentication (optional - can be generated at runtime) - - - - Your Kite Connect API key from the developer console - - - - - - - OAuth redirect URL configured in your Kite Connect app - - - - diff --git a/docs/mcp/custom-servers.mdx b/docs/mcp/custom-servers.mdx deleted file mode 100644 index 6549761e..00000000 --- a/docs/mcp/custom-servers.mdx +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Custom servers -description: Use custom MCP servers from GitHub repositories ---- - -In addition to the 200+ pre-built MCP servers from the [Docker MCP Catalog](https://hub.docker.com/mcp), you can run custom MCP servers directly from public GitHub repositories. - -## How it works - -When you specify a GitHub repository, E2B will: -1. Clone the repository into the sandbox -2. Run the `installCmd` (optional) to install dependencies -3. Run the `runCmd` to start the MCP server with stdio transport - -The `runCmd` must start an MCP server that follows the [MCP specification](https://modelcontextprotocol.io/specification/2025-06-18) and communicates via stdio (standard input/output). - -## Using a custom MCP server - - - -```typescript TypeScript -import Sandbox from 'e2b' - -const sandbox = await Sandbox.create({ - mcp: { - 'github/modelcontextprotocol/servers': { - installCmd: 'npm install', - runCmd: 'sudo npx -y @modelcontextprotocol/server-filesystem /root', - }, - }, -}); -``` - -```python Python -from e2b import Sandbox -import os - -sbx = Sandbox.create( - mcp={ - "github/modelcontextprotocol/servers": { - "install_cmd": "npm install", - "run_cmd": "sudo npx -y @modelcontextprotocol/server-filesystem /root", - }, - } -) -``` - - - -## Configuration - - - Optional command to run before starting the MCP server. Use this to install dependencies (e.g., `npm install`, `pip install -r requirements.txt`). - - - - Command to start the MCP server. Must launch a stdio-enabled MCP server. - - - -**Important for npx-based servers:** Always include `installCmd: 'npm install'` (or equivalent) when using `npx` in your `runCmd`. Without installing dependencies first, npx will try to use the local repository and fail. - - -## Troubleshooting - -If your custom MCP server doesn't work as expected: - -1. Explore the sandbox either via the [dashboard](https://e2b.dev/dashboard) or by connecting to it via `e2b connect ` -2. Check the gateway log file with `sudo cat /var/log/mcp-gateway/gateway.log`. \ No newline at end of file diff --git a/docs/mcp/custom-templates.mdx b/docs/mcp/custom-templates.mdx deleted file mode 100644 index d23ca86e..00000000 --- a/docs/mcp/custom-templates.mdx +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Custom templates -description: Use MCP servers with custom sandbox templates ---- - -You can prepull MCP server Docker images during template build time to significantly improve runtime performance. -## How it works - -When you build a template with prepulled MCP servers, the Docker images for those servers are downloaded and cached during the build process. This means when you create a sandbox from that template, the MCP servers are ready to use immediately without waiting for image downloads. - - -You must use the MCP gateway enabled template (`mcp-gateway`) as your base template to use this feature. - - -## Building a template with MCP servers - -Use the `addMcpServer()` method (TypeScript) or `add_mcp_server()` method (Python) to prepull MCP server images during template build. You can pass a single server or an array of servers. - -The server names (like `"browserbase"` and `"exa"`) correspond to the keys defined in the [Available Servers](/docs/mcp/available-servers) documentation. - - - -```typescript JavaScript & TypeScript -import "dotenv/config"; -import { Template, defaultBuildLogger } from 'e2b'; - -export const template = Template() - .fromTemplate("mcp-gateway") - .addMcpServer(["browserbase", "exa"]); - -await Template.build(template, 'my-mcp-gateway', { - cpuCount: 8, - memoryMB: 8192, - onBuildLogs: defaultBuildLogger(), -}); -``` - -```python Python -from dotenv import load_dotenv -from e2b import Template, default_build_logger - -load_dotenv() - -template = ( - Template() - .from_template("mcp-gateway") - .add_mcp_server(["browserbase", "exa"]) -) - -Template.build( - template, - 'my-mcp-gateway', - cpu_count=8, - memory_mb=8192, - on_build_logs=default_build_logger(), -) -``` - - - -## Using the template - -Once built, create sandboxes from your template. You still need to provide the configuration for each MCP server. - - - -```typescript JavaScript & TypeScript -import { Sandbox } from 'e2b'; - -const sandbox = await Sandbox.create({ - template: "my-mcp-gateway", - mcp: { - browserbase: { - apiKey: process.env.BROWSERBASE_API_KEY!, - geminiApiKey: process.env.GEMINI_API_KEY!, - projectId: process.env.BROWSERBASE_PROJECT_ID!, - }, - exa: { - apiKey: process.env.EXA_API_KEY!, - }, - }, -}); - -``` - -```python Python -from e2b import Sandbox -import os - -sbx = Sandbox.create( - template="my-mcp-gateway", - mcp={ - "browserbase": { - "apiKey": os.getenv("BROWSERBASE_API_KEY"), - "geminiApiKey": os.getenv("GEMINI_API_KEY"), - "projectId": os.getenv("BROWSERBASE_PROJECT_ID"), - }, - "exa": { - "apiKey": os.getenv("EXA_API_KEY"), - }, - } -) -``` - - - -## Learn more - -For more information about working with templates, see: -- [Template Quickstart](/docs/template/quickstart) - Get started with custom templates -- [Defining Templates](/docs/template/defining-template) - Learn all template configuration options -- [Template Build](/docs/template/build) - Understand the build process diff --git a/docs/mcp/examples.mdx b/docs/mcp/examples.mdx deleted file mode 100644 index a13f35d5..00000000 --- a/docs/mcp/examples.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Examples -description: Example projects using MCP servers in E2B sandboxes ---- - - - - Claude Code with MCP integration - - - Web automation agent with Browserbase - - - AI research using Groq and Exa - - - Research Agent using the OpenAI Agents framework - - - Basic MCP client connecting to an E2B Sandbox - - - Use custom MCP servers installed from GitHub - - - Create a custom E2B Sandbox with pre-installed MCP servers - - diff --git a/docs/mcp/quickstart.mdx b/docs/mcp/quickstart.mdx deleted file mode 100644 index 5cb03677..00000000 --- a/docs/mcp/quickstart.mdx +++ /dev/null @@ -1,222 +0,0 @@ ---- -title: Quickstart -description: Get started with MCP integration ---- - -You can connect to the MCPs running inside the sandbox both from outside and inside the sandbox. - -## From outside the sandbox - -To connect to the MCPs running inside the sandbox, use the `sandbox.getMcpUrl()` in JavaScript and `sandbox.get_mcp_url()` in Python. - -```typescript TypeScript -import Sandbox from 'e2b' -import { Client } from '@modelcontextprotocol/sdk/client/index.js'; -import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; - -const sandbox = await Sandbox.create({ - mcp: { - browserbase: { - apiKey: process.env.BROWSERBASE_API_KEY!, - geminiApiKey: process.env.GEMINI_API_KEY!, - projectId: process.env.BROWSERBASE_PROJECT_ID!, - }, - exa: { - apiKey: process.env.EXA_API_KEY!, - }, - notion: { - internalIntegrationToken: process.env.NOTION_API_KEY!, - }, - }, -}); - -const client = new Client({ - name: 'e2b-mcp-client', - version: '1.0.0' -}); - -const transport = new StreamableHTTPClientTransport( - new URL(sandbox.getMcpUrl()), - { - requestInit: { - headers: { - 'Authorization': `Bearer ${await sandbox.getMcpToken()}` - } - } - } -); - -await client.connect(transport); - -const tools = await client.listTools(); -console.log('Available tools:', tools.tools.map(t => t.name)); - -await client.close(); -await sandbox.kill(); -``` - -```python Python -import os -import dotenv -dotenv.load_dotenv() -from e2b import AsyncSandbox -import asyncio -from datetime import timedelta -from mcp.client.session import ClientSession -from mcp.client.streamable_http import streamablehttp_client - -async def main(): - sandbox = await AsyncSandbox.create( - mcp={ - "browserbase": { - "apiKey": os.environ["BROWSERBASE_API_KEY"], - "geminiApiKey": os.environ["GEMINI_API_KEY"], - "projectId": os.environ["BROWSERBASE_PROJECT_ID"], - }, - "exa": { - "apiKey": os.environ["EXA_API_KEY"], - }, - "notion": { - "internalIntegrationToken": os.environ["NOTION_API_KEY"], - }, - } - ) - - async with streamablehttp_client( - url=sandbox.get_mcp_url(), - headers={"Authorization": f"Bearer {await sandbox.get_mcp_token()}"}, - timeout=timedelta(seconds=600) - ) as (read_stream, write_stream, _): - async with ClientSession(read_stream, write_stream) as session: - await session.initialize() - tools = await session.list_tools() - print(f"Available tools: {[tool.name for tool in tools.tools]}") - await sandbox.kill() - -if __name__ == "__main__": - asyncio.run(main()) -``` - - -## From inside the sandbox - -If you need to access the MCP gateway from within the sandbox itself, it's available at: -``` -http://localhost:50005/mcp -``` - -You'll need to include the Authorization header with the MCP token when making requests from inside the sandbox. How that is added depends on the MCP client you use: - -## Claude - -``` -claude mcp add --transport http e2b-mcp-gateway --header "Authorization: Bearer " -``` - -## OpenAI Agents - - -```typescript TypeScript -import { MCPServerStreamableHttp } from '@openai/agents'; - -const mcp = new MCPServerStreamableHttp({ - url: mcpUrl, - name: 'E2B MCP Gateway', - requestInit: { - headers: { - 'Authorization': `Bearer ${await sandbox.getMcpToken()}` - } - }, -}); -``` - -```python Python -import asyncio -import os -from e2b import AsyncSandbox -from agents.mcp import MCPServerStreamableHttp - -async def main(): - async with MCPServerStreamableHttp( - name="e2b-mcp-client", - params={ - "url": sandbox.get_mcp_url(), - "headers": {"Authorization": f"Bearer {await sandbox.get_mcp_token()}"}, - }, - ) as server: - tools = await server.list_tools() - print("Available tools:", [t.name for t in tools]) - - # Clean up - await sandbox.kill() - -asyncio.run(main()) -``` - - -## Official MCP client - - -```typescript Typescript -import { Client } from '@modelcontextprotocol/sdk/client/index.js'; -import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; - -const client = new Client({ - name: 'e2b-mcp-client', - version: '1.0.0' -}); - -const transport = new StreamableHTTPClientTransport( - new URL(sandbox.getMcpUrl()), - { - requestInit: { - headers: { - 'Authorization': `Bearer ${await sandbox.getMcpToken()}` - } - } - } -); -await client.connect(transport); -``` -```python Python -import asyncio -from datetime import timedelta -from mcp.client.session import ClientSession -from mcp.client.streamable_http import streamablehttp_client - -async def main(): - async with streamablehttp_client( - url=sandbox.get_mcp_url(), - headers={"Authorization": f"Bearer {await sandbox.get_mcp_token()}"}, - timeout=timedelta(seconds=600) - ) as (read_stream, write_stream, _): - async with ClientSession(read_stream, write_stream) as session: - await session.initialize() - - tools = await session.list_tools() - print(f"Available tools: {[tool.name for tool in tools.tools]}") - await sandbox.kill() - -if __name__ == "__main__": - asyncio.run(main()) - -``` - - - -This list is not exhaustive. You can find more examples in the [E2B Cookbook](https://github.com/e2b-dev/e2b-cookbook). - - -## Debugging with MCP Inspector - -The [MCP Inspector](https://github.com/modelcontextprotocol/inspector) is a useful tool for debugging and testing your MCP server setup. Get the command to run: - -```bash -npx @modelcontextprotocol/inspector --transport http --url --header "Authorization: Bearer ${mcpToken}" -``` - -Run the command in your terminal. This will open a web interface where you can: -- Browse available tools -- Test tool calls with different parameters -- Inspect request/response payloads -- Debug connection issues diff --git a/docs/volumes.mdx b/docs/volumes.mdx deleted file mode 100644 index 8f98d378..00000000 --- a/docs/volumes.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: "Volumes" -sidebarTitle: Overview ---- - - -Volumes are currently in private beta. -If you'd like access, please reach out to us at [support@e2b.dev](mailto:support@e2b.dev). - - -Volumes provide persistent storage that exists independently of sandbox lifecycles. Data written to a volume persists even after a sandbox is shut down, and volumes can be mounted to multiple sandboxes over time. - -**One volume shared across multiple sandboxes** - -```mermaid actions={false} -graph LR - V1[Volume A] --- S1[Sandbox 1] - V1 --- S2[Sandbox 2] - V1 --- S3[Sandbox 3] -``` - -**Each sandbox with its own volume** - -```mermaid actions={false} -graph LR - V2[Volume A] --- S4[Sandbox 1] - V3[Volume B] --- S5[Sandbox 2] -``` - -**Standalone usage via SDK** - -```mermaid actions={false} -graph LR - SDK[SDK] --- V4[Volume A] - SDK --- V5[Volume B] -``` - -When a volume is mounted to a sandbox, files can be read and written directly at the mount path. The SDK methods are meant to be used when the volume is not mounted to any sandbox. - -With E2B SDK you can: -- [Manage volumes.](/docs/volumes/manage) -- [Mount volumes to sandboxes.](/docs/volumes/mount) -- [Read and write files to a volume.](/docs/volumes/read-write) -- [Get file and directory metadata.](/docs/volumes/info) -- [Upload data to a volume.](/docs/volumes/upload) -- [Download data from a volume.](/docs/volumes/download) - -## Example - - -```js JavaScript & TypeScript -import { Volume, Sandbox } from 'e2b' - -const volume = await Volume.create('my-volume') - -const sandbox = await Sandbox.create({ - volumeMounts: { - '/mnt/my-data': volume, - }, -}) -``` -```python Python -from e2b import Volume, Sandbox - -volume = Volume.create('my-volume') - -sandbox = Sandbox.create( - volume_mounts={ - '/mnt/my-data': volume, - }, -) -``` - diff --git a/docs/volumes/download.mdx b/docs/volumes/download.mdx deleted file mode 100644 index e2f0ef2a..00000000 --- a/docs/volumes/download.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "Download data from volume" -sidebarTitle: Download data ---- - -You can download data from a volume using the `readFile()` / `read_file()` method. - - -```js JavaScript & TypeScript -import fs from 'fs' -import { Volume } from 'e2b' - -const volume = await Volume.create('my-volume') - -// Read file from volume -const content = await volume.readFile('/path/in/volume') -// Write file to local filesystem -fs.writeFileSync('/local/path', content) -``` -```python Python -from e2b import Volume - -volume = Volume.create('my-volume') - -# Read file from volume -content = volume.read_file('/path/in/volume') -# Write file to local filesystem -with open('/local/path', 'w') as file: - file.write(content) -``` - diff --git a/docs/volumes/info.mdx b/docs/volumes/info.mdx deleted file mode 100644 index 91c3c7ef..00000000 --- a/docs/volumes/info.mdx +++ /dev/null @@ -1,188 +0,0 @@ ---- -title: "Get information about a file or directory" -sidebarTitle: File & directory metadata ---- - -You can get information about a file or directory in a volume using the `getInfo()` / `get_info()` method. - -### Getting information about a file - - -```js JavaScript & TypeScript -import { Volume } from 'e2b' - -const volume = await Volume.create('my-volume') - -// Create a new file -await volume.writeFile('/test_file.txt', 'Hello, world!') - -// Get information about the file -const info = await volume.getInfo('/test_file.txt') - -console.log(info) -// { -// name: 'test_file.txt', -// type: 'file', -// path: '/test_file.txt', -// size: 13, -// mode: 0o644, -// uid: 0, -// gid: 0, -// atime: 2025-05-26T12:00:00.000Z, -// mtime: 2025-05-26T12:00:00.000Z, -// ctime: 2025-05-26T12:00:00.000Z, -// } -``` -```python Python -from e2b import Volume - -volume = Volume.create('my-volume') - -# Create a new file -volume.write_file('/test_file.txt', 'Hello, world!') - -# Get information about the file -info = volume.get_info('/test_file.txt') - -print(info) -# VolumeEntryStat( -# name='test_file.txt', -# type_='file', -# path='/test_file.txt', -# size=13, -# mode=0o644, -# uid=0, -# gid=0, -# atime=datetime(2025, 5, 26, 12, 0, 0), -# mtime=datetime(2025, 5, 26, 12, 0, 0), -# ctime=datetime(2025, 5, 26, 12, 0, 0), -# ) -``` - - -### Getting information about a directory - - -```js JavaScript & TypeScript -import { Volume } from 'e2b' - -const volume = await Volume.create('my-volume') - -// Create a new directory -await volume.makeDir('/test_dir') - -// Get information about the directory -const info = await volume.getInfo('/test_dir') - -console.log(info) -// { -// name: 'test_dir', -// type: 'directory', -// path: '/test_dir', -// size: 0, -// mode: 0o755, -// uid: 0, -// gid: 0, -// atime: 2025-05-26T12:00:00.000Z, -// mtime: 2025-05-26T12:00:00.000Z, -// ctime: 2025-05-26T12:00:00.000Z, -// } -``` -```python Python -from e2b import Volume - -volume = Volume.create('my-volume') - -# Create a new directory -volume.make_dir('/test_dir') - -# Get information about the directory -info = volume.get_info('/test_dir') - -print(info) -# VolumeEntryStat( -# name='test_dir', -# type_='directory', -# path='/test_dir', -# size=0, -# mode=0o755, -# uid=0, -# gid=0, -# atime=datetime(2025, 5, 26, 12, 0, 0), -# mtime=datetime(2025, 5, 26, 12, 0, 0), -# ctime=datetime(2025, 5, 26, 12, 0, 0), -# ) -``` - - -### Checking if a path exists - -You can check whether a file or directory exists in a volume using the `exists()` method. - - -```js JavaScript & TypeScript -import { Volume } from 'e2b' - -const volume = await Volume.create('my-volume') - -const fileExists = await volume.exists('/test_file.txt') -console.log(fileExists) // true or false -``` -```python Python -from e2b import Volume - -volume = Volume.create('my-volume') - -file_exists = volume.exists('/test_file.txt') -print(file_exists) # True or False -``` - - -### Updating metadata - -You can update file or directory metadata such as user ID, group ID, and permissions mode using the `updateMetadata()` / `update_metadata()` method. - - -```js JavaScript & TypeScript -import { Volume } from 'e2b' - -const volume = await Volume.create('my-volume') - -await volume.writeFile('/test_file.txt', 'Hello, world!') - -const updated = await volume.updateMetadata('/test_file.txt', { uid: 1000, gid: 1000, mode: 0o600 }) - -console.log(updated) -// { -// name: 'test_file.txt', -// type: 'file', -// path: '/test_file.txt', -// size: 13, -// mode: 0o600, -// uid: 1000, -// gid: 1000, -// ... -// } -``` -```python Python -from e2b import Volume - -volume = Volume.create('my-volume') - -volume.write_file('/test_file.txt', 'Hello, world!') - -updated = volume.update_metadata('/test_file.txt', uid=1000, gid=1000, mode=0o600) - -print(updated) -# VolumeEntryStat( -# name='test_file.txt', -# type_='file', -# path='/test_file.txt', -# size=13, -# mode=0o600, -# uid=1000, -# gid=1000, -# ... -# ) -``` - diff --git a/docs/volumes/manage.mdx b/docs/volumes/manage.mdx deleted file mode 100644 index 08bbd6c0..00000000 --- a/docs/volumes/manage.mdx +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: "Managing volumes" ---- - -## Create a volume - - -Volume names can only contain letters, numbers, and hyphens. - - - -```js JavaScript & TypeScript -import { Volume } from 'e2b' - -const volume = await Volume.create('my-volume') -console.log(volume.volumeId) // Volume ID -console.log(volume.name) // 'my-volume' -``` -```python Python -from e2b import Volume - -volume = Volume.create('my-volume') -print(volume.volume_id) # Volume ID -print(volume.name) # 'my-volume' -``` - - -## Connect to an existing volume - -You can connect to an existing volume by its ID using the `connect()` method. - - -```js JavaScript & TypeScript -import { Volume } from 'e2b' - -const volume = await Volume.connect('volume-id') -console.log(volume.volumeId) // Volume ID -console.log(volume.name) // Volume name -``` -```python Python -from e2b import Volume - -volume = Volume.connect('volume-id') -print(volume.volume_id) # Volume ID -print(volume.name) # Volume name -``` - - -## List volumes - - -```js JavaScript & TypeScript -import { Volume } from 'e2b' - -const volumes = await Volume.list() -console.log(volumes) -// [{ volumeId: '...', name: 'my-volume' }, ...] -``` -```python Python -from e2b import Volume - -volumes = Volume.list() -print(volumes) -# [VolumeInfo(volume_id='...', name='my-volume'), ...] -``` - - -## Get volume info - - -```js JavaScript & TypeScript -import { Volume } from 'e2b' - -const info = await Volume.getInfo('volume-id') -console.log(info) -// { volumeId: '...', name: 'my-volume' } -``` -```python Python -from e2b import Volume - -info = Volume.get_info('volume-id') -print(info) -# VolumeInfo(volume_id='...', name='my-volume') -``` - - -## Destroy a volume - - -```js JavaScript & TypeScript -import { Volume } from 'e2b' - -const success = await Volume.destroy('volume-id') -console.log(success) // true -``` -```python Python -from e2b import Volume - -success = Volume.destroy('volume-id') -print(success) # True -``` - diff --git a/docs/volumes/mount.mdx b/docs/volumes/mount.mdx deleted file mode 100644 index ab02a12d..00000000 --- a/docs/volumes/mount.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: "Mounting volumes" ---- - -You can mount one or more volumes to a sandbox when creating it. The keys of the `volumeMounts` / `volume_mounts` object are the mount paths inside the sandbox. - - -```js JavaScript & TypeScript -import { Volume, Sandbox } from 'e2b' - -const volume = await Volume.create('my-volume') - -// You can pass a Volume object -const sandbox = await Sandbox.create({ - volumeMounts: { - '/mnt/my-data': volume, - }, -}) - -// Or pass the volume name directly -const sandbox = await Sandbox.create({ - volumeMounts: { - '/mnt/my-data': 'my-volume', - }, -}) - -// Files written to /mnt/my-data inside the sandbox are persisted in the volume -await sandbox.files.write('/mnt/my-data/hello.txt', 'Hello, world!') -``` -```python Python -from e2b import Volume, Sandbox - -volume = Volume.create('my-volume') - -# You can pass a Volume object -sandbox = Sandbox.create( - volume_mounts={ - '/mnt/my-data': volume, - }, -) - -# Or pass the volume name directly -sandbox = Sandbox.create( - volume_mounts={ - '/mnt/my-data': 'my-volume', - }, -) - -# Files written to /mnt/my-data inside the sandbox are persisted in the volume -sandbox.files.write('/mnt/my-data/hello.txt', 'Hello, world!') -``` - diff --git a/docs/volumes/read-write.mdx b/docs/volumes/read-write.mdx deleted file mode 100644 index 43838b6c..00000000 --- a/docs/volumes/read-write.mdx +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: "Read & write files" -sidebarTitle: Read & write ---- - -## Reading files - -You can read files from a volume using the `readFile()` / `read_file()` method. - - -```js JavaScript & TypeScript -import { Volume } from 'e2b' - -const volume = await Volume.create('my-volume') - -const content = await volume.readFile('/path/to/file') -console.log(content) -``` -```python Python -from e2b import Volume - -volume = Volume.create('my-volume') - -content = volume.read_file('/path/to/file') -print(content) -``` - - -## Writing files - -You can write files to a volume using the `writeFile()` / `write_file()` method. - - -```js JavaScript & TypeScript -import { Volume } from 'e2b' - -const volume = await Volume.create('my-volume') - -await volume.writeFile('/path/to/file', 'file content') -``` -```python Python -from e2b import Volume - -volume = Volume.create('my-volume') - -volume.write_file('/path/to/file', 'file content') -``` - - -## Creating directories - -You can create directories in a volume using the `makeDir()` / `make_dir()` method. - - -```js JavaScript & TypeScript -import { Volume } from 'e2b' - -const volume = await Volume.create('my-volume') - -await volume.makeDir('/path/to/dir') - -// Create nested directories with force option -await volume.makeDir('/path/to/nested/dir', { force: true }) -``` -```python Python -from e2b import Volume - -volume = Volume.create('my-volume') - -volume.make_dir('/path/to/dir') - -# Create nested directories with force option -volume.make_dir('/path/to/nested/dir', force=True) -``` - - -## Listing directory contents - -You can list the contents of a directory in a volume using the `list()` method. - - -```js JavaScript & TypeScript -import { Volume } from 'e2b' - -const volume = await Volume.create('my-volume') - -const entries = await volume.list('/path/to/dir') -console.log(entries) -// [ -// { name: 'file.txt', type: 'file', path: '/path/to/dir/file.txt', size: 13, ... }, -// { name: 'subdir', type: 'directory', path: '/path/to/dir/subdir', size: 0, ... }, -// ] -``` -```python Python -from e2b import Volume - -volume = Volume.create('my-volume') - -entries = volume.list('/path/to/dir') -print(entries) -# [ -# VolumeEntryStat(name='file.txt', type_='file', path='/path/to/dir/file.txt', size=13, ...), -# VolumeEntryStat(name='subdir', type_='directory', path='/path/to/dir/subdir', size=0, ...), -# ] -``` - - -## Removing files or directories - -You can remove files or directories from a volume using the `remove()` method. - - -```js JavaScript & TypeScript -import { Volume } from 'e2b' - -const volume = await Volume.create('my-volume') - -// Remove a file -await volume.remove('/path/to/file') - -// Remove a directory recursively -await volume.remove('/path/to/dir', { recursive: true }) -``` -```python Python -from e2b import Volume - -volume = Volume.create('my-volume') - -# Remove a file -volume.remove('/path/to/file') - -# Remove a directory recursively -volume.remove('/path/to/dir', recursive=True) -``` - diff --git a/docs/volumes/upload.mdx b/docs/volumes/upload.mdx deleted file mode 100644 index dee64c68..00000000 --- a/docs/volumes/upload.mdx +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: "Upload data to volume" -sidebarTitle: Upload data ---- - -You can upload data to a volume using the `writeFile()` / `write_file()` method. - -## Upload single file - - -```js JavaScript & TypeScript -import fs from 'fs' -import { Volume } from 'e2b' - -const volume = await Volume.create('my-volume') - -// Read file from local filesystem -const content = fs.readFileSync('/local/path') -// Upload file to volume -await volume.writeFile('/path/in/volume', content) -``` -```python Python -from e2b import Volume - -volume = Volume.create('my-volume') - -# Read file from local filesystem -with open('/local/path', 'rb') as file: - # Upload file to volume - volume.write_file('/path/in/volume', file) -``` - - -## Upload directory / multiple files - - -```js JavaScript & TypeScript -import fs from 'fs' -import path from 'path' -import { Volume } from 'e2b' - -const volume = await Volume.create('my-volume') - -const directoryPath = '/local/dir' -const files = fs.readdirSync(directoryPath) - -for (const file of files) { - const fullPath = path.join(directoryPath, file) - - // Skip directories - if (!fs.statSync(fullPath).isFile()) continue - - const content = fs.readFileSync(fullPath) - await volume.writeFile(`/upload/${file}`, content) -} -``` -```python Python -import os -from e2b import Volume - -volume = Volume.create('my-volume') - -directory_path = '/local/dir' - -for filename in os.listdir(directory_path): - file_path = os.path.join(directory_path, filename) - - # Skip directories - if not os.path.isfile(file_path): - continue - - with open(file_path, 'rb') as file: - volume.write_file(f'/upload/{filename}', file) -``` - From e0711ea49848eb0ad4fe459069e9c6fe485fa08a Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Mon, 30 Mar 2026 11:53:45 +0200 Subject: [PATCH 15/22] docs: move filesystem directly after commands in sidebar --- docs.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs.json b/docs.json index c6473f0d..5f0b073a 100644 --- a/docs.json +++ b/docs.json @@ -74,6 +74,7 @@ "docs/sandbox/lifecycle", "docs/sandbox/persistence", "docs/sandbox/commands", + "docs/sandbox/filesystem", "docs/sandbox/configuration", "docs/sandbox/security", "docs/sandbox/observability", @@ -81,7 +82,6 @@ "docs/sandbox/git-integration", "docs/sandbox/proxy-tunneling", "docs/sandbox/custom-domain", - "docs/sandbox/filesystem", "docs/sandbox/volumes", "docs/sandbox/mcp", "docs/sandbox/mcp/available-servers" From 30ad99353c1ad3d3d4210388cb236cb935e0cd37 Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Mon, 30 Mar 2026 11:55:32 +0200 Subject: [PATCH 16/22] docs: restore support contact info and volume diagrams on volumes page --- docs/sandbox/volumes.mdx | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/docs/sandbox/volumes.mdx b/docs/sandbox/volumes.mdx index efa8be9c..7c1690fc 100644 --- a/docs/sandbox/volumes.mdx +++ b/docs/sandbox/volumes.mdx @@ -5,10 +5,40 @@ icon: "hard-drive" description: "Persistent storage volumes for E2B sandboxes — create, mount, read, write, and manage files that persist across sandbox sessions." --- -Volumes is currently in private beta. + +Volumes are currently in private beta. +If you'd like access, please reach out to us at [support@e2b.dev](mailto:support@e2b.dev). + Volumes provide persistent storage that exists independently of any single sandbox. Data written to a volume survives sandbox shutdowns and can be shared across multiple sandbox sessions. Use volumes when you need files, datasets, or other artifacts to persist beyond the lifecycle of an individual sandbox. +**One volume shared across multiple sandboxes** + +```mermaid actions={false} +graph LR + V1[Volume A] --- S1[Sandbox 1] + V1 --- S2[Sandbox 2] + V1 --- S3[Sandbox 3] +``` + +**Each sandbox with its own volume** + +```mermaid actions={false} +graph LR + V2[Volume A] --- S4[Sandbox 1] + V3[Volume B] --- S5[Sandbox 2] +``` + +**Standalone usage via SDK** + +```mermaid actions={false} +graph LR + SDK[SDK] --- V4[Volume A] + SDK --- V5[Volume B] +``` + +When a volume is mounted to a sandbox, files can be read and written directly at the mount path. The SDK methods below are meant to be used when the volume is not mounted to any sandbox. + ## Managing volumes ### Create a volume From 657dc1195e13481def102658da981711204c8250 Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Mon, 30 Mar 2026 11:56:28 +0200 Subject: [PATCH 17/22] docs: clarify that sandbox filesystem is already persistent, volumes are for cross-sandbox storage --- docs/sandbox/volumes.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sandbox/volumes.mdx b/docs/sandbox/volumes.mdx index 7c1690fc..583d122f 100644 --- a/docs/sandbox/volumes.mdx +++ b/docs/sandbox/volumes.mdx @@ -10,7 +10,7 @@ Volumes are currently in private beta. If you'd like access, please reach out to us at [support@e2b.dev](mailto:support@e2b.dev). -Volumes provide persistent storage that exists independently of any single sandbox. Data written to a volume survives sandbox shutdowns and can be shared across multiple sandbox sessions. Use volumes when you need files, datasets, or other artifacts to persist beyond the lifecycle of an individual sandbox. +Every sandbox already has its own persistent filesystem — when you [pause and resume](/docs/sandbox/persistence) a sandbox, all files are preserved. Volumes solve a different problem: storage that exists independently of any single sandbox. Data written to a volume survives sandbox shutdowns and can be shared across multiple sandbox sessions. Use volumes when you need to share files between sandboxes or keep data accessible even after a sandbox is killed. **One volume shared across multiple sandboxes** From c1c68e792c61ddaad0bfa15cfcd7933d1f9f76c5 Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Mon, 30 Mar 2026 11:57:39 +0200 Subject: [PATCH 18/22] docs: make sandbox filesystem note more visible with Info callout on volumes page --- docs/sandbox/volumes.mdx | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/sandbox/volumes.mdx b/docs/sandbox/volumes.mdx index 583d122f..e8f7234f 100644 --- a/docs/sandbox/volumes.mdx +++ b/docs/sandbox/volumes.mdx @@ -10,7 +10,11 @@ Volumes are currently in private beta. If you'd like access, please reach out to us at [support@e2b.dev](mailto:support@e2b.dev). -Every sandbox already has its own persistent filesystem — when you [pause and resume](/docs/sandbox/persistence) a sandbox, all files are preserved. Volumes solve a different problem: storage that exists independently of any single sandbox. Data written to a volume survives sandbox shutdowns and can be shared across multiple sandbox sessions. Use volumes when you need to share files between sandboxes or keep data accessible even after a sandbox is killed. + +**Most use cases don't need volumes.** Every sandbox already has its own persistent filesystem — when you [pause and resume](/docs/sandbox/persistence) a sandbox, all files are preserved automatically. Volumes are for when you need storage that is **shared across multiple sandboxes** or persists independently of any single sandbox. + + +Volumes provide storage that exists independently of any single sandbox. Data written to a volume survives sandbox shutdowns and can be mounted to different sandboxes over time. **One volume shared across multiple sandboxes** From 99f80d2f517eaeb50c45e954604dfc38fc1cf59c Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Mon, 30 Mar 2026 12:00:04 +0200 Subject: [PATCH 19/22] docs: use Note component for volumes filesystem callout --- docs/sandbox/volumes.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sandbox/volumes.mdx b/docs/sandbox/volumes.mdx index e8f7234f..9fddb313 100644 --- a/docs/sandbox/volumes.mdx +++ b/docs/sandbox/volumes.mdx @@ -10,9 +10,9 @@ Volumes are currently in private beta. If you'd like access, please reach out to us at [support@e2b.dev](mailto:support@e2b.dev). - + **Most use cases don't need volumes.** Every sandbox already has its own persistent filesystem — when you [pause and resume](/docs/sandbox/persistence) a sandbox, all files are preserved automatically. Volumes are for when you need storage that is **shared across multiple sandboxes** or persists independently of any single sandbox. - + Volumes provide storage that exists independently of any single sandbox. Data written to a volume survives sandbox shutdowns and can be mounted to different sandboxes over time. From 49cffb6778e9d3bb696967aea17cfb49af641898 Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Mon, 30 Mar 2026 12:19:06 +0200 Subject: [PATCH 20/22] docs: add missing redirects for deleted use-cases pages --- docs.json | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs.json b/docs.json index 5f0b073a..c4d3889e 100644 --- a/docs.json +++ b/docs.json @@ -4104,6 +4104,16 @@ "source": "/docs/sandbox/ip-tunneling", "destination": "/docs/sandbox/proxy-tunneling", "permanent": true + }, + { + "source": "/docs/use-cases/agent-browser", + "destination": "/docs/use-cases/computer-use", + "permanent": true + }, + { + "source": "/docs/use-cases/browser-use", + "destination": "/docs/use-cases/computer-use", + "permanent": true } ] } From 439dea14681be27dd811db8e9a8a09c3efca75dd Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Mon, 30 Mar 2026 12:27:46 +0200 Subject: [PATCH 21/22] docs: rename Lifecycle page to Managing sandboxes The page content (timeouts, metadata, list/filter, connect, shutdown) is about sandbox management, not lifecycle per se. The actual lifecycle story is on overview + persistence pages. --- docs/sandbox.mdx | 4 ++-- docs/sandbox/lifecycle.mdx | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/sandbox.mdx b/docs/sandbox.mdx index 699443ed..19150a43 100644 --- a/docs/sandbox.mdx +++ b/docs/sandbox.mdx @@ -70,8 +70,8 @@ print(result.stdout) ## Core - - Timeouts, sandbox info, metadata, listing, connecting, and shutdown. + + Timeouts, sandbox info, metadata, listing, filtering, and shutdown. Pause/resume, auto-pause, auto-resume, snapshots, and state transitions. diff --git a/docs/sandbox/lifecycle.mdx b/docs/sandbox/lifecycle.mdx index 4d73ff76..4d66ba2a 100644 --- a/docs/sandbox/lifecycle.mdx +++ b/docs/sandbox/lifecycle.mdx @@ -1,8 +1,8 @@ --- -title: "Sandbox lifecycle" -sidebarTitle: Lifecycle -icon: "rotate" -description: "Manage E2B sandbox lifecycle — timeouts, metadata, listing, connecting, and shutdown. Sandboxes are persistent cloud environments for AI agents." +title: "Managing sandboxes" +sidebarTitle: Managing sandboxes +icon: "list-check" +description: "Manage E2B sandboxes — timeouts, metadata, listing, filtering, connecting, and shutdown. Sandboxes are persistent cloud environments for AI agents." --- Sandboxes stay running as long as you need them. When their timeout expires, they can automatically [pause to save resources](/docs/sandbox/persistence) — preserving their full state so you can resume at any time. You can also configure an explicit timeout or shut down a sandbox manually. From f758c0e395ab8d3c4e990c4e608e1a4b53012715 Mon Sep 17 00:00:00 2001 From: Tomas Beran Date: Mon, 30 Mar 2026 12:41:14 +0200 Subject: [PATCH 22/22] docs: replace code-interpreter imports with base e2b SDK in sandbox pages None of the sandbox section examples use run_code or code interpreter features, so they should import from 'e2b' / 'from e2b import' instead of the code-interpreter package. --- docs/sandbox/commands.mdx | 40 ++++++++++++++-------------- docs/sandbox/filesystem.mdx | 44 +++++++++++++++---------------- docs/sandbox/lifecycle.mdx | 48 +++++++++++++++++----------------- docs/sandbox/observability.mdx | 8 +++--- docs/sandbox/persistence.mdx | 24 ++++++++--------- docs/sandbox/security.mdx | 48 +++++++++++++++++----------------- 6 files changed, 106 insertions(+), 106 deletions(-) diff --git a/docs/sandbox/commands.mdx b/docs/sandbox/commands.mdx index ea3cfff2..c867b4a8 100644 --- a/docs/sandbox/commands.mdx +++ b/docs/sandbox/commands.mdx @@ -13,14 +13,14 @@ Use `commands.run()` to execute terminal commands inside the sandbox. ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() const result = await sandbox.commands.run('ls -l') console.log(result) ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.create() result = sandbox.commands.run('ls -l') @@ -34,7 +34,7 @@ To stream command output as it is being executed, pass `onStdout`/`onStderr` cal ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -49,7 +49,7 @@ const result = await sandbox.commands.run('echo hello; sleep 1; echo world', { console.log(result) ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.create() @@ -64,7 +64,7 @@ To run commands in background, pass the `background` option. This returns immedi ```js JavaScript & TypeScript highlight={7} -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -80,7 +80,7 @@ const command = await sandbox.commands.run('echo hello; sleep 10; echo world', { await command.kill() ``` ```python Python highlight={6} -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.create() @@ -116,7 +116,7 @@ Use `sandbox.pty.create()` to start an interactive bash shell. ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -137,7 +137,7 @@ console.log('Terminal PID:', terminal.pid) ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox() @@ -165,7 +165,7 @@ PTY sessions have a configurable timeout that controls the session duration. The ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -178,7 +178,7 @@ const terminal = await sandbox.pty.create({ ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox() @@ -199,7 +199,7 @@ Use `sendInput()` in JavaScript or `send_stdin()` in Python to send data to the ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -217,7 +217,7 @@ await sandbox.pty.sendInput( ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox() @@ -239,7 +239,7 @@ When the user's terminal window changes size, notify the PTY with `resize()`. Th ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -257,7 +257,7 @@ await sandbox.pty.resize(terminal.pid, { ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox() @@ -278,7 +278,7 @@ You can disconnect from a PTY session while keeping it running, then reconnect l ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -311,7 +311,7 @@ await reconnected.wait() ```python Python import time -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox() @@ -351,7 +351,7 @@ Terminate the PTY session with `kill()`. ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -370,7 +370,7 @@ console.log('Killed:', killed) // true if successful ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox() @@ -395,7 +395,7 @@ Use `wait()` to wait for the terminal session to end (e.g., when the user types ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -414,7 +414,7 @@ console.log('Exit code:', result.exitCode) ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox() diff --git a/docs/sandbox/filesystem.mdx b/docs/sandbox/filesystem.mdx index d48c8988..d540f13e 100644 --- a/docs/sandbox/filesystem.mdx +++ b/docs/sandbox/filesystem.mdx @@ -17,13 +17,13 @@ You can read files from the sandbox filesystem using the `files.read()` method. ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() const fileContent = await sandbox.files.read('/path/to/file') ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.create() file_content = sandbox.files.read('/path/to/file') @@ -36,14 +36,14 @@ You can write single files to the sandbox filesystem using the `files.write()` m ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() await sandbox.files.write('/path/to/file', 'file content') ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.create() @@ -57,7 +57,7 @@ You can also write multiple files to the sandbox. ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -67,7 +67,7 @@ await sandbox.files.write([ ]) ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.create() @@ -86,7 +86,7 @@ You can get information about a file or directory using the `files.getInfo()` / ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -111,7 +111,7 @@ console.log(info) // } ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.create() @@ -141,7 +141,7 @@ print(info) ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -166,7 +166,7 @@ console.log(info) // } ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.create() @@ -203,7 +203,7 @@ It's recommended not to collect or close watcher immediately after making a chan ```js JavaScript & TypeScript highlight={7-12} -import { Sandbox, FilesystemEventType } from '@e2b/code-interpreter' +import { Sandbox, FilesystemEventType } from 'e2b' const sandbox = await Sandbox.create() const dirname = '/home/user' @@ -220,7 +220,7 @@ const handle = await sandbox.files.watchDir(dirname, async (event) => { await sandbox.files.write(`${dirname}/my-file`, 'hello') ``` ```python Python highlight={7,12-16} -from e2b_code_interpreter import Sandbox, FilesystemEventType +from e2b import Sandbox, FilesystemEventType sandbox = Sandbox.create() dirname = '/home/user' @@ -249,7 +249,7 @@ When rapidly creating new folders (e.g., deeply nested path of folders), events ```js JavaScript & TypeScript highlight={13,17} -import { Sandbox, FilesystemEventType } from '@e2b/code-interpreter' +import { Sandbox, FilesystemEventType } from 'e2b' const sandbox = await Sandbox.create() const dirname = '/home/user' @@ -268,7 +268,7 @@ const handle = await sandbox.files.watchDir(dirname, async (event) => { await sandbox.files.write(`${dirname}/my-folder/my-file`, 'hello') ``` ```python Python highlight={7,9} -from e2b_code_interpreter import Sandbox, FilesystemEventType +from e2b import Sandbox, FilesystemEventType sandbox = Sandbox.create() dirname = '/home/user' @@ -296,7 +296,7 @@ You can upload data to the sandbox using the `files.write()` method. ```js JavaScript & TypeScript import fs from 'fs' -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -306,7 +306,7 @@ const content = fs.readFileSync('/local/path') await sandbox.files.write('/path/in/sandbox', content) ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.create() @@ -327,7 +327,7 @@ You can optionally set an expiration time for the URL so that it will be valid o ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' // Start a secured sandbox (all operations must be authorized by default) const sandbox = await Sandbox.create(template, { secure: true }) @@ -370,7 +370,7 @@ content = sandbox.files.read('/path/in/sandbox') ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -412,7 +412,7 @@ await sandbox.files.write(files) ``` ```python Python import os -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.create() @@ -455,7 +455,7 @@ You can download data from the sandbox using the `files.read()` method. ```js JavaScript & TypeScript import fs from 'fs' -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -465,7 +465,7 @@ const content = await sandbox.files.read('/path/in/sandbox') fs.writeFileSync('/local/path', content) ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.create() @@ -488,7 +488,7 @@ You can optionally set an expiration time for the URL so that it will be valid o ```js JavaScript & TypeScript import fs from 'fs' -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' // Start a secured sandbox (all operations must be authorized by default) const sandbox = await Sandbox.create(template, { secure: true }) diff --git a/docs/sandbox/lifecycle.mdx b/docs/sandbox/lifecycle.mdx index 4d66ba2a..04c0728a 100644 --- a/docs/sandbox/lifecycle.mdx +++ b/docs/sandbox/lifecycle.mdx @@ -17,7 +17,7 @@ Every sandbox has a configurable timeout that determines how long it stays runni ```js JavaScript & TypeScript highlight={6} -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' // Create a sandbox and keep it running for 60 seconds. // 🚨 Note: The units are milliseconds. @@ -26,7 +26,7 @@ const sandbox = await Sandbox.create({ }) ``` ```python Python highlight={6} -from e2b_code_interpreter import Sandbox +from e2b import Sandbox # Create a sandbox and keep it running for 60 seconds. # 🚨 Note: The units are seconds. @@ -47,7 +47,7 @@ You can for example start with a sandbox with 1 minute timeout and then periodic ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' // Create a sandbox and keep it running for 60 seconds. const sandbox = await Sandbox.create({ timeoutMs: 60_000 }) @@ -57,7 +57,7 @@ const sandbox = await Sandbox.create({ timeoutMs: 60_000 }) await sandbox.setTimeout(30_000) ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox # Create a sandbox and keep it running for 60 seconds. sandbox = Sandbox.create(timeout=60) @@ -74,7 +74,7 @@ You can retrieve sandbox information like sandbox ID, template, metadata, starte ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' // Create a sandbox and keep it running for 60 seconds. const sandbox = await Sandbox.create({ timeoutMs: 60_000 }) @@ -95,7 +95,7 @@ console.log(info) ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox # Create a sandbox and keep it running for 60 seconds. sandbox = Sandbox.create(timeout=60) @@ -123,7 +123,7 @@ You specify metadata when creating a sandbox and can access it later through [li ```js JavaScript & TypeScript highlight={6} -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' // Create sandbox with metadata. const sandbox = await Sandbox.create({ @@ -142,7 +142,7 @@ const runningSandboxes = await paginator.nextItems() console.log(runningSandboxes[0].metadata) ``` ```python Python highlight={6} -from e2b_code_interpreter import Sandbox +from e2b import Sandbox # Create sandbox with metadata. sandbox = Sandbox.create( @@ -174,7 +174,7 @@ You can list sandboxes using the `Sandbox.list()` method. The method supports pa ```js JavaScript & TypeScript highlight={6,11,14,24} - import { Sandbox, SandboxInfo } from '@e2b/code-interpreter' + import { Sandbox, SandboxInfo } from 'e2b' const sandbox = await Sandbox.create( { @@ -200,7 +200,7 @@ You can list sandboxes using the `Sandbox.list()` method. The method supports pa const nextPage = await paginator.nextItems() ``` ```python Python highlight={5,9,12,22} - from e2b_code_interpreter import Sandbox, SandboxInfo + from e2b import Sandbox, SandboxInfo sandbox = Sandbox.create( metadata={ @@ -231,7 +231,7 @@ Filter sandboxes by their current state. The state parameter can contain either ```js JavaScript & TypeScript highlight={9,13} - import { Sandbox } from '@e2b/code-interpreter' + import { Sandbox } from 'e2b' // Create a sandbox. const sandbox = await Sandbox.create() @@ -246,7 +246,7 @@ Filter sandboxes by their current state. The state parameter can contain either const sandboxes = await paginator.nextItems() ``` ```python Python highlight={9,14} - from e2b_code_interpreter import Sandbox, SandboxQuery, SandboxState + from e2b import Sandbox, SandboxQuery, SandboxState # Create a sandbox with metadata. sandbox = Sandbox.create() @@ -267,7 +267,7 @@ You can also filter sandboxes by metadata key-value pairs specified during creat ```js JavaScript & TypeScript highlight={6-8,15,18} - import { Sandbox } from '@e2b/code-interpreter' + import { Sandbox } from 'e2b' // Create sandbox with metadata. const sandbox = await Sandbox.create({ @@ -288,7 +288,7 @@ You can also filter sandboxes by metadata key-value pairs specified during creat const sandboxes = await paginator.nextItems() ``` ```python Python highlight={6-8,16-17} - from e2b_code_interpreter import Sandbox, SandboxQuery, SandboxState + from e2b import Sandbox, SandboxQuery, SandboxState # Create sandbox with metadata. sandbox = Sandbox.create( @@ -320,7 +320,7 @@ For more granular pagination, you can set custom per-page item limit (default an ```js JavaScript & TypeScript highlight={4-5,16} - import { Sandbox } from '@e2b/code-interpreter' + import { Sandbox } from 'e2b' const paginator = Sandbox.list({ limit: 100, @@ -338,7 +338,7 @@ For more granular pagination, you can set custom per-page item limit (default an await paginator.nextItems() ``` ```python Python highlight={5-6,13} - from e2b_code_interpreter import Sandbox + from e2b import Sandbox paginator = Sandbox.list( limit=100, @@ -357,7 +357,7 @@ You can fetch all pages by looping through the paginator while checking if there ```js JavaScript & TypeScript highlight={7} - import { Sandbox } from '@e2b/code-interpreter' + import { Sandbox } from 'e2b' const paginator = Sandbox.list() @@ -369,7 +369,7 @@ You can fetch all pages by looping through the paginator while checking if there } ``` ```python Python highlight={7} - from e2b_code_interpreter import Sandbox, SandboxQuery + from e2b import Sandbox, SandboxQuery paginator = Sandbox.list() @@ -391,7 +391,7 @@ To connect to a running sandbox, you first need to retrieve its ID. You can do t ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sbx = await Sandbox.create() @@ -410,7 +410,7 @@ const sandboxId = runningSandboxes[0].sandboxId ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sbx = Sandbox.create() @@ -433,7 +433,7 @@ Now that you have the sandbox ID, you can connect to the sandbox using the `Sand ```js JavaScript & TypeScript highlight={3} -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.connect(sandboxId) @@ -444,7 +444,7 @@ console.log(`Running in sandbox ${sandbox.sandboxId} as "${result.stdout.trim()} ``` ```python Python highlight={3} -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.connect(sandbox_id) @@ -461,7 +461,7 @@ You can shutdown the sandbox any time even before the timeout is up by calling t ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' // Create a sandbox and keep it running for 60 seconds. const sandbox = await Sandbox.create({ timeoutMs: 60_000 }) @@ -470,7 +470,7 @@ const sandbox = await Sandbox.create({ timeoutMs: 60_000 }) await sandbox.kill() ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox # Create a sandbox and keep it running for 60 seconds. sandbox = Sandbox.create(timeout=60) diff --git a/docs/sandbox/observability.mdx b/docs/sandbox/observability.mdx index 8149927d..1b6bb168 100644 --- a/docs/sandbox/observability.mdx +++ b/docs/sandbox/observability.mdx @@ -15,7 +15,7 @@ Sandbox metrics let you monitor CPU, memory, and disk usage of running sandboxes ```js JavaScript & TypeScript highlight={9} -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sbx = await Sandbox.create() console.log('Sandbox created', sbx.sandboxId) @@ -54,7 +54,7 @@ console.log('Sandbox metrics:', metrics) ``` ```python Python highlight={10} from time import sleep -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sbx = Sandbox.create() print('Sandbox created', sbx.sandbox_id) @@ -124,7 +124,7 @@ Query Parameters: ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sbx = await Sandbox.create() @@ -198,7 +198,7 @@ console.log(teamSandboxEvents) ``` ```python Python import requests -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sbx = Sandbox.create() diff --git a/docs/sandbox/persistence.mdx b/docs/sandbox/persistence.mdx index e7e1de86..fb2fb5b7 100644 --- a/docs/sandbox/persistence.mdx +++ b/docs/sandbox/persistence.mdx @@ -39,7 +39,7 @@ flowchart TD ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() // Starts in Running state @@ -54,7 +54,7 @@ await sandbox.kill() // Running/Paused → Killed ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.create() # Starts in Running state @@ -77,7 +77,7 @@ When you pause a sandbox, both the sandbox's filesystem and memory state will be ```js JavaScript & TypeScript highlight={8-9} -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sbx = await Sandbox.create() console.log('Sandbox created', sbx.sandboxId) @@ -88,7 +88,7 @@ await sbx.pause() console.log('Sandbox paused', sbx.sandboxId) ``` ```python Python highlight={8-9} -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sbx = Sandbox.create() print('Sandbox created', sbx.sandbox_id) @@ -107,7 +107,7 @@ This means that all the files in the sandbox's filesystem will be restored and a ```js JavaScript & TypeScript highlight={12-13} -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sbx = await Sandbox.create() console.log('Sandbox created', sbx.sandboxId) @@ -122,7 +122,7 @@ const sameSbx = await sbx.connect() console.log('Connected to the sandbox', sameSbx.sandboxId) ``` ```python Python highlight={12-13} -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sbx = Sandbox.create() print('Sandbox created', sbx.sandbox_id) @@ -144,7 +144,7 @@ You can list all paused sandboxes by calling the `Sandbox.list` method and suppl ```js JavaScript & TypeScript highlight={4,7} -import { Sandbox, SandboxInfo } from '@e2b/code-interpreter' +import { Sandbox, SandboxInfo } from 'e2b' // List all paused sandboxes const paginator = Sandbox.list({ query: { state: ['paused'] } }) @@ -160,7 +160,7 @@ while (paginator.hasNext) { ``` ```python Python highlight={4,7} # List all paused sandboxes -from e2b_code_interpreter import Sandbox, SandboxQuery, SandboxState +from e2b import Sandbox, SandboxQuery, SandboxState paginator = Sandbox.list(SandboxQuery(state=[SandboxState.PAUSED])) @@ -180,7 +180,7 @@ You can remove paused sandboxes by calling the `kill` method on the Sandbox inst ```js JavaScript & TypeScript highlight={11,14} -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sbx = await Sandbox.create() console.log('Sandbox created', sbx.sandboxId) @@ -196,7 +196,7 @@ await sbx.kill() await Sandbox.kill(sbx.sandboxId) ``` ```python Python highlight={9,12} -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sbx = Sandbox.create() @@ -217,12 +217,12 @@ When you connect to a sandbox, the inactivity timeout resets. The default is 5 m ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sbx = await Sandbox.connect(sandboxId, { timeoutMs: 60 * 1000 }) // 60 seconds ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sbx = Sandbox.connect(sandbox_id, timeout=60) # 60 seconds ``` diff --git a/docs/sandbox/security.mdx b/docs/sandbox/security.mdx index 3785c5bc..627d4d07 100644 --- a/docs/sandbox/security.mdx +++ b/docs/sandbox/security.mdx @@ -47,13 +47,13 @@ Disabling secured access is discouraged because it creates security vulnerabilit ```js JavaScript & TypeScript - import { Sandbox } from '@e2b/code-interpreter' + import { Sandbox } from 'e2b' const sandbox = await Sandbox.create({ secure: false }) // Explicitly disable ``` ```python Python - from e2b_code_interpreter import Sandbox + from e2b import Sandbox sandbox = Sandbox.create(secure=False) # Explicitly disable ``` @@ -69,7 +69,7 @@ You can control whether a sandbox has access to the internet by using the `allow ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' // Create sandbox with internet access enabled (default) const sandbox = await Sandbox.create({ allowInternetAccess: true }) @@ -78,7 +78,7 @@ const sandbox = await Sandbox.create({ allowInternetAccess: true }) const isolatedSandbox = await Sandbox.create({ allowInternetAccess: false }) ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox # Create sandbox with internet access enabled (default) sandbox = Sandbox.create(allow_internet_access=True) @@ -104,7 +104,7 @@ You can specify IP addresses, CIDR blocks, or domain names that the sandbox is a ```js JavaScript & TypeScript -import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' +import { Sandbox, ALL_TRAFFIC } from 'e2b' // Deny all traffic except specific IPs const sandbox = await Sandbox.create({ @@ -122,7 +122,7 @@ const restrictedSandbox = await Sandbox.create({ }) ``` ```python Python -from e2b_code_interpreter import Sandbox, ALL_TRAFFIC +from e2b import Sandbox, ALL_TRAFFIC # Deny all traffic except specific IPs sandbox = Sandbox.create( @@ -147,7 +147,7 @@ You can allow traffic to specific domains by specifying hostnames in `allow out` ```js JavaScript & TypeScript -import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' +import { Sandbox, ALL_TRAFFIC } from 'e2b' // Allow only traffic to google.com const sandbox = await Sandbox.create({ @@ -158,7 +158,7 @@ const sandbox = await Sandbox.create({ }) ``` ```python Python -from e2b_code_interpreter import Sandbox, ALL_TRAFFIC +from e2b import Sandbox, ALL_TRAFFIC # Allow only traffic to google.com sandbox = Sandbox.create( @@ -178,7 +178,7 @@ You can also use wildcards to allow all subdomains of a domain: ```js JavaScript & TypeScript -import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' +import { Sandbox, ALL_TRAFFIC } from 'e2b' // Allow traffic to any subdomain of mydomain.com const sandbox = await Sandbox.create({ @@ -189,7 +189,7 @@ const sandbox = await Sandbox.create({ }) ``` ```python Python -from e2b_code_interpreter import Sandbox, ALL_TRAFFIC +from e2b import Sandbox, ALL_TRAFFIC # Allow traffic to any subdomain of mydomain.com sandbox = Sandbox.create( @@ -205,7 +205,7 @@ You can combine domain names with IP addresses and CIDR blocks: ```js JavaScript & TypeScript -import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' +import { Sandbox, ALL_TRAFFIC } from 'e2b' // Allow traffic to specific domains and IPs const sandbox = await Sandbox.create({ @@ -216,7 +216,7 @@ const sandbox = await Sandbox.create({ }) ``` ```python Python -from e2b_code_interpreter import Sandbox, ALL_TRAFFIC +from e2b import Sandbox, ALL_TRAFFIC # Allow traffic to specific domains and IPs sandbox = Sandbox.create( @@ -238,7 +238,7 @@ When both `allow out` and `deny out` are specified, **allow rules always take pr ```js JavaScript & TypeScript -import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' +import { Sandbox, ALL_TRAFFIC } from 'e2b' // Even though ALL_TRAFFIC is denied, 1.1.1.1 and 8.8.8.8 are explicitly allowed const sandbox = await Sandbox.create({ @@ -249,7 +249,7 @@ const sandbox = await Sandbox.create({ }) ``` ```python Python -from e2b_code_interpreter import Sandbox, ALL_TRAFFIC +from e2b import Sandbox, ALL_TRAFFIC # Even though ALL_TRAFFIC is denied, 1.1.1.1 and 8.8.8.8 are explicitly allowed sandbox = Sandbox.create( @@ -267,7 +267,7 @@ The `ALL_TRAFFIC` constant represents the CIDR range `0.0.0.0/0`, which matches ```js JavaScript & TypeScript -import { Sandbox, ALL_TRAFFIC } from '@e2b/code-interpreter' +import { Sandbox, ALL_TRAFFIC } from 'e2b' // Deny all outbound traffic const sandbox = await Sandbox.create({ @@ -277,7 +277,7 @@ const sandbox = await Sandbox.create({ }) ``` ```python Python -from e2b_code_interpreter import Sandbox, ALL_TRAFFIC +from e2b import Sandbox, ALL_TRAFFIC # Deny all outbound traffic sandbox = Sandbox.create( @@ -294,7 +294,7 @@ Every sandbox has a public URL that can be used to access running services insid ```js JavaScript & TypeScript highlight={6} -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -303,7 +303,7 @@ const host = sandbox.getHost(3000) console.log(`https://${host}`) ``` ```python Python highlight={6} -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.create() @@ -332,7 +332,7 @@ By default, sandbox URLs are publicly accessible. You can restrict access to req ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' // Create sandbox with restricted public access const sandbox = await Sandbox.create({ @@ -364,7 +364,7 @@ console.log(response2.status) // 200 ``` ```python Python import requests -from e2b_code_interpreter import Sandbox +from e2b import Sandbox # Create sandbox with restricted public access sandbox = Sandbox.create( @@ -404,7 +404,7 @@ In this example we will start a simple HTTP server that listens on port 3000 and ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' const sandbox = await Sandbox.create() @@ -424,7 +424,7 @@ await process.kill() ``` ```python Python import requests -from e2b_code_interpreter import Sandbox +from e2b import Sandbox sandbox = Sandbox.create() @@ -450,7 +450,7 @@ You can customize the `Host` header that gets sent to services running inside th ```js JavaScript & TypeScript -import { Sandbox } from '@e2b/code-interpreter' +import { Sandbox } from 'e2b' // Create sandbox with custom host masking const sandbox = await Sandbox.create({ @@ -463,7 +463,7 @@ const sandbox = await Sandbox.create({ // Requests to the sandbox will have Host header set to for example: localhost:8080 ``` ```python Python -from e2b_code_interpreter import Sandbox +from e2b import Sandbox # Create sandbox with custom host masking sandbox = Sandbox.create(